Fix for faster whisper offline (#1074)

This commit is contained in:
Raivis Dejus 2025-02-08 10:44:03 +02:00 committed by GitHub
commit e40ae5134e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 7 additions and 10 deletions

View file

@ -580,6 +580,7 @@ msgstr "关于"
msgid "Preferences..."
msgstr "偏好设置..."
#: buzz/widgets/menu_bar.py:51 buzz/widgets/menu_bar.py:61
msgid "Help"
msgstr "帮助"

View file

@ -395,23 +395,19 @@ def download_faster_whisper_model(
if size == WhisperModelSize.CUSTOM:
repo_id = custom_repo_id
elif size == WhisperModelSize.LARGEV3:
repo_id = "Systran/faster-whisper-large-v3"
# Maybe switch to 'mobiuslabsgmbh/faster-whisper-large-v3-turbo', seems to be used in
# faster-whisper code https://github.com/SYSTRAN/faster-whisper/blob/master/faster_whisper/utils.py#L29
# If so changes needed also in whisper_file_transcriber.py
# Replicating models from faster-whisper code https://github.com/SYSTRAN/faster-whisper/blob/master/faster_whisper/utils.py#L29
# Changes to turbo model also in whisper_file_transcriber.py
elif size == WhisperModelSize.LARGEV3TURBO:
repo_id = "deepdml/faster-whisper-large-v3-turbo-ct2"
repo_id = "mobiuslabsgmbh/faster-whisper-large-v3-turbo"
else:
repo_id = "guillaumekln/faster-whisper-%s" % size
repo_id = "Systran/faster-whisper-%s" % size
allow_patterns = [
"model.bin", # largest by size first
"pytorch_model.bin", # possible alternative model filename
"config.json",
"tokenizer.json",
"vocabulary.txt",
"vocabulary.json",
"vocabulary.*",
]
if local_files_only:

View file

@ -141,7 +141,7 @@ class WhisperFileTranscriber(FileTranscriber):
if task.transcription_options.model.whisper_model_size == WhisperModelSize.CUSTOM:
model_size_or_path = task.transcription_options.model.hugging_face_model_id
elif task.transcription_options.model.whisper_model_size == WhisperModelSize.LARGEV3TURBO:
model_size_or_path = "deepdml/faster-whisper-large-v3-turbo-ct2"
model_size_or_path = "mobiuslabsgmbh/faster-whisper-large-v3-turbo"
else:
model_size_or_path = task.transcription_options.model.whisper_model_size.to_faster_whisper_model_size()