Snab build fix (#757)

This commit is contained in:
Raivis Dejus 2024-05-26 15:41:02 +03:00 committed by GitHub
commit 5ba8eaa1f4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 24 additions and 60 deletions

View file

@ -20,6 +20,7 @@ jobs:
matrix:
include:
- os: macos-13
- os: macos-latest
- os: windows-latest
- os: ubuntu-20.04
- os: ubuntu-22.04

View file

@ -15,13 +15,9 @@ from PyQt6.QtCore import QObject, pyqtSignal, QRunnable
from platformdirs import user_cache_dir
from tqdm.auto import tqdm
whisper = None
faster_whisper = None
huggingface_hub = None
if sys.platform != "linux":
import faster_whisper
import whisper
import huggingface_hub
import faster_whisper
import whisper
import huggingface_hub
# Catch exception from whisper.dll not getting loaded.
# TODO: Remove flag and try-except when issue with loading
@ -77,17 +73,6 @@ class ModelType(enum.Enum):
# See: https://github.com/chidiwilliams/buzz/issues/274,
# https://github.com/chidiwilliams/buzz/issues/197
(self == ModelType.WHISPER_CPP and not LOADED_WHISPER_CPP_BINARY)
# Disable Whisper and Faster Whisper options
# on Linux due to execstack errors on Snap
or (
sys.platform == "linux"
and self
in (
ModelType.WHISPER,
ModelType.FASTER_WHISPER,
ModelType.HUGGING_FACE,
)
)
):
return False
return True
@ -99,6 +84,7 @@ class ModelType(enum.Enum):
ModelType.FASTER_WHISPER,
)
HUGGING_FACE_MODEL_ALLOW_PATTERNS = [
"added_tokens.json",
"config.json",

View file

@ -15,8 +15,7 @@ from buzz.transcriber.transcriber import TranscriptionOptions
from buzz.transcriber.whisper_cpp import WhisperCpp, whisper_cpp_params
from buzz.transformers_whisper import TransformersWhisper
if sys.platform != "linux":
import whisper
import whisper
class RecordingTranscriber(QObject):

View file

@ -17,11 +17,10 @@ from buzz.model_loader import ModelType
from buzz.transcriber.file_transcriber import FileTranscriber
from buzz.transcriber.transcriber import FileTranscriptionTask, Segment
if sys.platform != "linux":
import faster_whisper
import whisper
import stable_whisper
from stable_whisper import WhisperResult
import faster_whisper
import whisper
import stable_whisper
from stable_whisper import WhisperResult
PROGRESS_REGEX = re.compile(r"\d+(\.\d+)?%")

View file

@ -4,10 +4,8 @@ from typing import Optional, Union
import numpy as np
from tqdm import tqdm
WhisperProcessor = WhisperForConditionalGeneration = None
if sys.platform != "linux":
import whisper
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import whisper
from transformers import WhisperProcessor, WhisperForConditionalGeneration
def load_model(model_name_or_path: str):

2
poetry.lock generated
View file

@ -3394,4 +3394,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = ">=3.9.13,<3.11"
content-hash = "6bf14757a6f7575cba194c4f8b820aad6337a114dc87a9f551aec2d521b0dc9c"
content-hash = "0b674818a94169f61efde66d2230f4e1869d34223589b755d4ad5f6aca99653f"

View file

@ -23,12 +23,10 @@ dataclasses-json = "^0.6.4"
numpy = "^1.21.2"
requests = "^2.31.0"
yt-dlp = "2024.3.10"
# Only install on non-Linux to prevent execstack errors
stable-ts = { version = "2.15.9", markers = "sys_platform != 'linux'" }
faster-whisper = { version = "^1.0.1", markers = "sys_platform != 'linux'" }
openai-whisper = { version = "v20231117", markers = "sys_platform != 'linux'" }
transformers = { version = "^4.39.1", markers = "sys_platform != 'linux'" }
stable-ts = "2.15.9"
faster-whisper = "^1.0.1"
openai-whisper = "v20231117"
transformers = "^4.39.1"
[tool.poetry.group.dev.dependencies]
autopep8 = "^1.7.0"

View file

@ -38,7 +38,6 @@ parts:
- libqt5gui5
- libgdk-pixbuf2.0-0
- libqt5svg5 # for loading icon themes which are svg
- locales-all
- libglib2.0-0
override-prime: |
craftctl default
@ -103,6 +102,11 @@ parts:
override-build: |
craftctl default
pip install .
# Use custom ctranslate2 that has no execstack issues
pip install https://github.com/raivisdejus/CTranslate2-no-execstack/releases/download/v4.2.1/ctranslate2-4.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
# Switch to CPU torch
pip uninstall -y torch torchaudio nvidia-cublas-cu12 nvidia-cuda-cupti-cu12 nvidia-cuda-nvrtc-cu12 nvidia-cuda-runtime-cu12 nvidia-cudnn-cu12 nvidia-cufft-cu12 nvidia-curand-cu12 nvidia-cusolver-cu12 nvidia-cusparse-cu12 nvidia-nccl-cu12 nvidia-nvjitlink-cu12 nvidia-nvtx-cu12
python -m pip install torch torchaudio --extra-index-url https://download.pytorch.org/whl/cpu
python build.py
mkdir $CRAFT_PART_INSTALL/buzz
cp $CRAFT_PART_BUILD/buzz/whisper_cpp.py $CRAFT_PART_INSTALL/buzz/

View file

@ -26,9 +26,6 @@ class TestCLI:
"--txt",
test_audio_path,
],
marks=pytest.mark.skipif(
sys.platform == "linux", reason="Skip on Linux"
),
)
],
indirect=True,

View file

@ -26,8 +26,6 @@ from buzz.transcriber.whisper_file_transcriber import WhisperFileTranscriber
from tests.audio import test_audio_path
from tests.model_loader import get_model_path
UNSUPPORTED_ON_LINUX_REASON = "Whisper not supported on Linux"
class TestWhisperFileTranscriber:
@pytest.mark.parametrize(
@ -179,7 +177,6 @@ class TestWhisperFileTranscriber:
),
],
)
@pytest.mark.skipif(sys.platform == "linux", reason=UNSUPPORTED_ON_LINUX_REASON)
def test_transcribe_from_file(
self,
qtbot: QtBot,
@ -227,7 +224,6 @@ class TestWhisperFileTranscriber:
assert len(segments[i].text) > 0
logging.debug(f"{segments[i].start} {segments[i].end} {segments[i].text}")
@pytest.mark.skipif(sys.platform == "linux", reason=UNSUPPORTED_ON_LINUX_REASON)
def test_transcribe_from_url(self, qtbot):
url = (
"https://github.com/chidiwilliams/buzz/raw/main/testdata/whisper-french.mp3"
@ -268,9 +264,6 @@ class TestWhisperFileTranscriber:
assert len(segments[i].text) > 0
logging.debug(f"{segments[i].start} {segments[i].end} {segments[i].text}")
@pytest.mark.skipif(
sys.platform == "linux", reason="Avoid execstack errors on Snap"
)
def test_transcribe_from_folder_watch_source(self, qtbot):
file_path = tempfile.mktemp(suffix=".mp3")
shutil.copy(test_audio_path, file_path)

View file

@ -1,6 +1,4 @@
import platform
import sys
import pytest
from buzz.transformers_whisper import load_model
@ -8,8 +6,8 @@ from tests.audio import test_audio_path
@pytest.mark.skipif(
sys.platform == "linux" or platform.system() == "Darwin",
reason="Not supported on Linux",
platform.system() == "Darwin",
reason="Not supported on Darwin",
)
class TestTransformersWhisper:
def test_should_transcribe(self):

View file

@ -17,15 +17,6 @@ class TestModelTypeComboBox:
"Faster Whisper",
"OpenAI Whisper API",
],
marks=pytest.mark.skipif(
sys.platform == "linux", reason="Skip on Linux"
),
),
pytest.param(
["Whisper.cpp", "OpenAI Whisper API"],
marks=pytest.mark.skipif(
sys.platform != "linux", reason="Skip on non-Linux"
),
),
],
)