1026 translation improvements (#1380)

This commit is contained in:
Raivis Dejus 2026-02-08 15:13:21 +02:00 committed by GitHub
commit 795da67f20
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
27 changed files with 1319 additions and 149 deletions

View file

@ -1,5 +1,5 @@
# Change also in pyproject.toml and buzz/__version__.py
version := 1.4.3
version := 1.4.4
mac_app_path := ./dist/Buzz.app
mac_zip_path := ./dist/Buzz-${version}-mac.zip

View file

@ -1 +1 @@
VERSION = "1.4.3"
VERSION = "1.4.4"

View file

@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: buzz\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: 2025-10-17 07:59+0200\n"
"Last-Translator: Éric Duarte <contacto@ericdq.com>\n"
"Language-Team: Catalan <jmas@softcatala.org>\n"
@ -375,6 +375,7 @@ msgid "Model:"
msgstr "Model:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr ""
"L'ús per primera vegada d'un model pot trigar diversos minuts a carregar-se."
@ -676,11 +677,11 @@ msgstr "Selecciona la carpeta d'exportació"
msgid "Select Background Color"
msgstr "Selecciona un fitxer d'àudio"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "S'ha produït un error en iniciar un enregistrament nou:"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1036,14 +1037,15 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr "No s'ha pogut desar la clau OpenAI API a l'anell de claus"
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
"El servidor Whisper no s'ha pogut iniciar. Consulteu els registres per "
"obtenir més informació."
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1417,15 +1419,15 @@ msgstr "Sundanès"
msgid "Cantonese"
msgstr "Cantonès"
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr "S'ha produït un error de connexió"
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr "Començant Whisper.cpp..."
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
#, fuzzy
msgid "Starting transcription..."
msgstr "Cancel·la la transcripció"
@ -1510,6 +1512,12 @@ msgstr "Afegeix a sobre"
msgid "Append and correct"
msgstr "Afegeix i corregeix"
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
#, fuzzy
msgid "Translation error, see logs!"
msgstr "Configuració de la traducció"
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -2,7 +2,7 @@ msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: \n"
"Last-Translator: Ole Guldberg2 <xalt7x.service@gmail.com>\n"
"Language-Team: \n"
@ -374,6 +374,7 @@ msgid "Model:"
msgstr "Model:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr "Først gang kan brug af en model tage flere minutter at indlæse."
@ -673,11 +674,11 @@ msgstr "Vælg eksport-mappe"
msgid "Select Background Color"
msgstr "Vælg audio-fil"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "Der skete en fejl ved opstart af en ny optagelse:"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1030,12 +1031,13 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr "Kan ikke gemme OpenAI API-nøgle i nøgleringen"
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1407,15 +1409,15 @@ msgstr ""
msgid "Cantonese"
msgstr ""
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr "Der er opstået en forbindelsesfejl"
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr ""
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
#, fuzzy
msgid "Starting transcription..."
msgstr "Afbryd transkription"
@ -1501,6 +1503,12 @@ msgstr "Tilføj herover"
msgid "Append and correct"
msgstr "Tilføj og ret"
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
#, fuzzy
msgid "Translation error, see logs!"
msgstr "Oversættelsesindstillinger"
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -6,7 +6,7 @@ msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: 2025-03-05 14:41+0100\n"
"Last-Translator: \n"
"Language-Team: \n"
@ -374,6 +374,7 @@ msgid "Model:"
msgstr "Modell:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr ""
"Bei der ersten Verwendung eines Modells kann das Laden mehrere Minuten "
@ -674,11 +675,11 @@ msgstr "Exportordner auswählen"
msgid "Select Background Color"
msgstr "Audiodatei auswählen"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "Beim Starten einer neuen Aufnahme ist ein Fehler aufgetreten:"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1032,12 +1033,13 @@ msgstr ""
"Der OpenAI-API-Schlüssel kann nicht im Schlüsselbund gespeichert werden"
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1409,15 +1411,15 @@ msgstr "Sundanesisch"
msgid "Cantonese"
msgstr "Kantonesisch"
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr "Ein Verbindungsfehler ist aufgetreten"
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr ""
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
#, fuzzy
msgid "Starting transcription..."
msgstr "Transkription abbrechen"
@ -1503,6 +1505,12 @@ msgstr "Oben anhängen"
msgid "Append and correct"
msgstr "Anhängen und korrigieren"
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
#, fuzzy
msgid "Translation error, see logs!"
msgstr "Übersetzungseinstellungen"
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@ -364,6 +364,7 @@ msgid "Model:"
msgstr ""
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr ""
@ -653,11 +654,11 @@ msgstr ""
msgid "Select Background Color"
msgstr ""
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr ""
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1002,12 +1003,13 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1378,15 +1380,15 @@ msgstr ""
msgid "Cantonese"
msgstr ""
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr ""
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr ""
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
msgid "Starting transcription..."
msgstr ""
@ -1470,6 +1472,11 @@ msgstr ""
msgid "Append and correct"
msgstr ""
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
msgid "Translation error, see logs!"
msgstr ""
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: 2025-09-08 12:43+0200\n"
"Last-Translator: Éric Duarte <contacto@ericdq.com>\n"
"Language-Team: \n"
@ -387,6 +387,7 @@ msgid "Model:"
msgstr "Modelo:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr ""
"El uso por primera vez de un modelo puede tardar varios minutos en cargarse."
@ -712,12 +713,12 @@ msgid "Select Background Color"
msgstr "Seleccionar archivo de audio"
# automatic translation
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "Se produjo un error al iniciar una grabación nueva:"
# automatic translation
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1089,14 +1090,15 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr "No se puede guardar la clave de la API de OpenAI en el llavero"
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
"El servidor Whisper no se pudo iniciar. Consulta los registros para obtener "
"más detalles."
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1471,16 +1473,16 @@ msgstr "Sundanés"
msgid "Cantonese"
msgstr "Cantonés"
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr "Se ha producido un error de conexión"
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr "Iniciando Whisper.cpp..."
# automatic translation
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
#, fuzzy
msgid "Starting transcription..."
msgstr "Cancelar transcripción"
@ -1571,6 +1573,12 @@ msgstr "Añadir arriba"
msgid "Append and correct"
msgstr "Añadir y corregir"
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
#, fuzzy
msgid "Translation error, see logs!"
msgstr "Ajustes de traducción"
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -6,7 +6,7 @@ msgid ""
msgstr ""
"Project-Id-Version: buzz\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: 2026-01-25 21:42+0200\n"
"Language-Team: (Italiano) Albano Battistella <albanobattistella@gmail.com>\n"
"Language: it_IT\n"
@ -378,6 +378,7 @@ msgid "Model:"
msgstr "Modello:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr ""
"Il caricamento di un modello al primo utilizzo potrebbe richiedere diversi "
@ -674,11 +675,11 @@ msgstr "Seleziona il colore del testo"
msgid "Select Background Color"
msgstr "Seleziona il colore di sfondo"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "Si è verificato un errore durante l'avvio della nuova registrazione:"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1040,13 +1041,14 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr "Impossibile salvare la chiave API OpenAI nel portachiavi"
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
"Impossibile avviare il server Whisper. Controllare i log per i dettagli."
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1420,15 +1422,15 @@ msgstr "Sundanese"
msgid "Cantonese"
msgstr "Cantonese"
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr "Si è verificato un errore di connessione"
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr "Avvio di Whisper.cpp..."
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
msgid "Starting transcription..."
msgstr "Inizio trascrizione..."
@ -1512,6 +1514,12 @@ msgstr "Aggiungere sopra"
msgid "Append and correct"
msgstr "Aggiungere e correggere"
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
#, fuzzy
msgid "Translation error, see logs!"
msgstr "Impostazioni di traduzione"
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -2,7 +2,7 @@ msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: \n"
"Last-Translator: nunawa <71294849+nunawa@users.noreply.github.com>\n"
"Language-Team: \n"
@ -370,6 +370,7 @@ msgid "Model:"
msgstr "モデル:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr ""
@ -669,11 +670,11 @@ msgstr "出力フォルダを選択"
msgid "Select Background Color"
msgstr "音声ファイルを選択"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "新規録音開始時にエラーが発生しました:"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1025,12 +1026,13 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr "OpenAI API キーをkeyringに保存できません"
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1402,15 +1404,15 @@ msgstr ""
msgid "Cantonese"
msgstr ""
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr "接続エラーが発生しました"
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr ""
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
#, fuzzy
msgid "Starting transcription..."
msgstr "文字起こしをキャンセルする"
@ -1496,6 +1498,12 @@ msgstr ""
msgid "Append and correct"
msgstr ""
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
#, fuzzy
msgid "Translation error, see logs!"
msgstr "翻訳設定"
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -7,8 +7,8 @@ msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"PO-Revision-Date: 2026-02-06 20:32+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: 2026-02-08 13:58+0200\n"
"Last-Translator: \n"
"Language-Team: \n"
"Language: lv_LV\n"
@ -380,6 +380,7 @@ msgid "Model:"
msgstr "Modelis:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr "Pirmā modeļa ielādes reize var aizņemt pat vairākas minūtes."
@ -674,11 +675,11 @@ msgstr "Izvēlieties teksta krāsu"
msgid "Select Background Color"
msgstr "Izvēlieties fona krāsu"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "Sākot jaunu ierakstu notikusi kļūda:"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1031,14 +1032,15 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr "Neizdevās saglabāt OpenAI API atslēgu atslēgu saišķī"
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
"Whisper serverim neizdevās ieslēgties. Lūdzu pārbaudiet lietotnes žurnāla "
"ierakstus."
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1412,15 +1414,15 @@ msgstr "Sundāņu"
msgid "Cantonese"
msgstr "Kantonas"
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr "Notika savienojuma kļūda"
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr "Palaiž Whisper.cpp..."
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
msgid "Starting transcription..."
msgstr "Sāk atpazīšanu..."
@ -1504,6 +1506,11 @@ msgstr "Jaunie teikumi augšā"
msgid "Append and correct"
msgstr "Papildināt un labot esošo"
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
msgid "Translation error, see logs!"
msgstr "Kļūda tulkojot, skatiet sistēmas žurnālu!"
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: 2025-03-20 18:30+0100\n"
"Last-Translator: Heimen Stoffels <vistausss@fastmail.com>\n"
"Language-Team: none\n"
@ -376,6 +376,7 @@ msgid "Model:"
msgstr "Model:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr ""
"Let op: de eerste keer kan het enkele minuten duren voordat het model "
@ -676,11 +677,11 @@ msgstr "Kies een exportmap"
msgid "Select Background Color"
msgstr "Kies een audiobestand"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "Er is een fout opgetreden tijdens het starten van de opname:"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1031,12 +1032,13 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr "De OpenAI-api-sleutel kan niet worden bewaard in de sleutelbos"
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1408,15 +1410,15 @@ msgstr "Soedanees"
msgid "Cantonese"
msgstr "Kantonees"
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr "Er is een verbindingsfout opgetreden"
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr ""
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
#, fuzzy
msgid "Starting transcription..."
msgstr "Transcriptie wissen"
@ -1502,6 +1504,12 @@ msgstr "Bovenaan toevoegen"
msgid "Append and correct"
msgstr "Toevoegen en corrigeren"
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
#, fuzzy
msgid "Translation error, see logs!"
msgstr "Vertaalinstellingen"
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: 2024-03-17 20:50+0200\n"
"Last-Translator: \n"
"Language-Team: \n"
@ -377,6 +377,7 @@ msgid "Model:"
msgstr "Model:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr ""
@ -681,11 +682,11 @@ msgstr "Wybierz plik audio"
msgid "Select Background Color"
msgstr "Wybierz plik audio"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "Wystąpił błąd podczas rozpoczęcia nowego nagrania:"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1043,12 +1044,13 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1421,15 +1423,15 @@ msgstr ""
msgid "Cantonese"
msgstr ""
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr ""
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr ""
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
#, fuzzy
msgid "Starting transcription..."
msgstr "Anuluj transkrypcję"
@ -1520,6 +1522,11 @@ msgstr ""
msgid "Append and correct"
msgstr ""
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
msgid "Translation error, see logs!"
msgstr ""
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: Buzz\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: 2025-11-01 17:43-0300\n"
"Last-Translator: Paulo Schopf <pschopf@gmail.com>\n"
"Language-Team: none\n"
@ -374,6 +374,7 @@ msgid "Model:"
msgstr "Modelo:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr "O primeiro uso de um modelo pode levar vários minutos para carregar."
@ -674,11 +675,11 @@ msgstr "Selecionar Pasta de Exportação"
msgid "Select Background Color"
msgstr "Selecionar arquivo de áudio"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "Ocorreu um erro ao iniciar uma nova gravação:"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1034,12 +1035,13 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr "Não foi possível salvar a chave da API OpenAI no cofre de chaves"
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr "Falha ao iniciar o servidor Whisper. Verifique os logs."
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1413,15 +1415,15 @@ msgstr "Sundanês"
msgid "Cantonese"
msgstr "Cantonês"
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr "Ocorreu um erro de conexão"
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr "Iniciando Whisper.cpp..."
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
#, fuzzy
msgid "Starting transcription..."
msgstr "Iniciando transcrição..."
@ -1506,6 +1508,12 @@ msgstr "Acrescentar acima"
msgid "Append and correct"
msgstr "Acrescentar e corrigir"
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
#, fuzzy
msgid "Translation error, see logs!"
msgstr "Configurações de tradução"
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -2,7 +2,7 @@ msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: \n"
"Last-Translator: Yevhen Popok <xalt7x.service@gmail.com>\n"
"Language-Team: \n"
@ -372,6 +372,7 @@ msgid "Model:"
msgstr "Модель:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr ""
@ -671,11 +672,11 @@ msgstr "Виберіть теку для експорту"
msgid "Select Background Color"
msgstr "Вибрати аудіофайл"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "При старті нового запису виникла помилка:"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1027,12 +1028,13 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr "Не вдається додати до звʼязки ключів API-ключ OpenAI"
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1404,15 +1406,15 @@ msgstr ""
msgid "Cantonese"
msgstr ""
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr "Виникла помилка зʼєднання"
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr ""
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
#, fuzzy
msgid "Starting transcription..."
msgstr "Скасувати транскрипцію"
@ -1498,6 +1500,12 @@ msgstr ""
msgid "Append and correct"
msgstr ""
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
#, fuzzy
msgid "Translation error, see logs!"
msgstr "Налаштування перекладу"
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: 2023-05-01 15:45+0800\n"
"Last-Translator: \n"
"Language-Team: lamb \n"
@ -380,6 +380,7 @@ msgid "Model:"
msgstr "模型:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr "首次使用模型可能需要几分钟的时间才能加载"
@ -684,11 +685,11 @@ msgstr "选择输出文件夹"
msgid "Select Background Color"
msgstr "选择音频文件"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "开始新录制时出错"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1043,12 +1044,13 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr "无法将OpenAI API密钥保存到密钥串"
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1421,15 +1423,15 @@ msgstr ""
msgid "Cantonese"
msgstr ""
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr "连接发生错误"
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr ""
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
#, fuzzy
msgid "Starting transcription..."
msgstr "取消识别"
@ -1520,6 +1522,12 @@ msgstr "增加上方"
msgid "Append and correct"
msgstr "增加并纠正"
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
#, fuzzy
msgid "Translation error, see logs!"
msgstr "翻译设置"
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2026-02-06 20:31+0200\n"
"POT-Creation-Date: 2026-02-08 13:57+0200\n"
"PO-Revision-Date: 2023-05-01 15:45+0800\n"
"Last-Translator: \n"
"Language-Team: Lamb\n"
@ -375,6 +375,7 @@ msgid "Model:"
msgstr "模型:"
#: buzz/widgets/transcriber/transcription_options_group_box.py:113
#: buzz/transcriber/recording_transcriber.py:374
msgid "First time use of a model may take up to several minutest to load."
msgstr ""
@ -677,11 +678,11 @@ msgstr "選擇聲音檔案"
msgid "Select Background Color"
msgstr "選擇聲音檔案"
#: buzz/widgets/recording_transcriber_widget.py:836
#: buzz/widgets/recording_transcriber_widget.py:854
msgid "An error occurred while starting a new recording:"
msgstr "開始新錄製出錯"
#: buzz/widgets/recording_transcriber_widget.py:840
#: buzz/widgets/recording_transcriber_widget.py:858
msgid ""
"Please check your audio devices or check the application logs for more "
"information."
@ -1035,12 +1036,13 @@ msgid "Unable to save OpenAI API key to keyring"
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:57
#: buzz/transcriber/recording_transcriber.py:430
#: buzz/transcriber/recording_transcriber.py:100
#: buzz/transcriber/recording_transcriber.py:445
msgid "Whisper server failed to start. Check logs for details."
msgstr ""
#: buzz/transcriber/local_whisper_cpp_server_transcriber.py:60
#: buzz/transcriber/recording_transcriber.py:434
#: buzz/transcriber/recording_transcriber.py:449
msgid ""
"Whisper server failed to start due to insufficient memory. Please try again "
"with a smaller model. To force CPU mode use BUZZ_FORCE_CPU=TRUE environment "
@ -1413,15 +1415,15 @@ msgstr ""
msgid "Cantonese"
msgstr ""
#: buzz/transcriber/recording_transcriber.py:247 buzz/model_loader.py:793
#: buzz/transcriber/recording_transcriber.py:254 buzz/model_loader.py:793
msgid "A connection error occurred"
msgstr ""
#: buzz/transcriber/recording_transcriber.py:361
#: buzz/transcriber/recording_transcriber.py:371
msgid "Starting Whisper.cpp..."
msgstr ""
#: buzz/transcriber/recording_transcriber.py:421
#: buzz/transcriber/recording_transcriber.py:436
#, fuzzy
msgid "Starting transcription..."
msgstr "取消錄製"
@ -1512,6 +1514,11 @@ msgstr ""
msgid "Append and correct"
msgstr ""
#: buzz/translator.py:79 buzz/translator.py:112 buzz/translator.py:124
#: buzz/translator.py:140
msgid "Translation error, see logs!"
msgstr ""
#: buzz/file_transcriber_queue_worker.py:154
msgid ""
"Speech extraction failed! Check your internet connection — a model may need "

View file

@ -1,17 +1,22 @@
import os
import re
import logging
import queue
from typing import Optional
from typing import Optional, List, Tuple
from openai import OpenAI, max_retries
from PyQt6.QtCore import QObject, pyqtSignal
from buzz.locale import _
from buzz.settings.settings import Settings
from buzz.store.keyring_store import get_password, Key
from buzz.transcriber.transcriber import TranscriptionOptions
from buzz.widgets.transcriber.advanced_settings_dialog import AdvancedSettingsDialog
BATCH_SIZE = 10
class Translator(QObject):
translation = pyqtSignal(str, int)
finished = pyqtSignal()
@ -51,6 +56,91 @@ class Translator(QObject):
max_retries=0
)
def _translate_single(self, transcript: str, transcript_id: int) -> Tuple[str, int]:
"""Translate a single transcript via the API. Returns (translation, transcript_id)."""
try:
completion = self.openai_client.chat.completions.create(
model=self.transcription_options.llm_model,
messages=[
{"role": "system", "content": self.transcription_options.llm_prompt},
{"role": "user", "content": transcript}
],
timeout=60.0,
)
except Exception as e:
completion = None
logging.error(f"Translation error! Server response: {e}")
if completion and completion.choices and completion.choices[0].message:
logging.debug(f"Received translation response: {completion}")
return completion.choices[0].message.content, transcript_id
else:
logging.error(f"Translation error! Server response: {completion}")
return _("Translation error, see logs!"), transcript_id
def _translate_batch(self, items: List[Tuple[str, int]]) -> List[Tuple[str, int]]:
"""Translate multiple transcripts in a single API call.
Returns list of (translation, transcript_id) in the same order as input."""
numbered_parts = []
for i, (transcript, _) in enumerate(items, 1):
numbered_parts.append(f"[{i}] {transcript}")
combined = "\n".join(numbered_parts)
batch_prompt = (
f"{self.transcription_options.llm_prompt}\n\n"
f"You will receive {len(items)} numbered texts. "
f"Process each one separately according to the instruction above "
f"and return them in the exact same numbered format, e.g.:\n"
f"[1] processed text\n[2] processed text"
)
try:
completion = self.openai_client.chat.completions.create(
model=self.transcription_options.llm_model,
messages=[
{"role": "system", "content": batch_prompt},
{"role": "user", "content": combined}
],
timeout=60.0,
)
except Exception as e:
completion = None
logging.error(f"Batch translation error! Server response: {e}")
if not (completion and completion.choices and completion.choices[0].message):
logging.error(f"Batch translation error! Server response: {completion}")
return [(_("Translation error, see logs!"), tid) for _, tid in items]
response_text = completion.choices[0].message.content
logging.debug(f"Received batch translation response: {response_text}")
translations = self._parse_batch_response(response_text, len(items))
results = []
for i, (_, transcript_id) in enumerate(items):
if i < len(translations):
results.append((translations[i], transcript_id))
else:
results.append((_("Translation error, see logs!"), transcript_id))
return results
@staticmethod
def _parse_batch_response(response: str, expected_count: int) -> List[str]:
"""Parse a numbered batch response like '[1] text\\n[2] text' into a list of strings."""
# Split on [N] markers — re.split with a group returns: [before, group1, after1, group2, after2, ...]
parts = re.split(r'\[(\d+)\]\s*', response)
translations = {}
for i in range(1, len(parts) - 1, 2):
num = int(parts[i])
text = parts[i + 1].strip()
translations[num] = text
return [
translations.get(i, _("Translation error, see logs!"))
for i in range(1, expected_count + 1)
]
def start(self):
logging.debug("Starting translation queue")
@ -62,30 +152,32 @@ class Translator(QObject):
logging.debug("Translation queue received stop signal")
break
transcript, transcript_id = item
# Collect a batch: start with the first item, then drain more
batch = [item]
stop_after_batch = False
while len(batch) < BATCH_SIZE:
try:
next_item = self.queue.get_nowait()
if next_item is None:
stop_after_batch = True
break
batch.append(next_item)
except queue.Empty:
break
try:
completion = self.openai_client.chat.completions.create(
model=self.transcription_options.llm_model,
messages=[
{"role": "system", "content": self.transcription_options.llm_prompt},
{"role": "user", "content": transcript}
],
timeout=30.0,
)
except Exception as e:
completion = None
logging.error(f"Translation error! Server response: {e}")
if completion and completion.choices and completion.choices[0].message:
logging.debug(f"Received translation response: {completion}")
next_translation = completion.choices[0].message.content
if len(batch) == 1:
transcript, transcript_id = batch[0]
translation, tid = self._translate_single(transcript, transcript_id)
self.translation.emit(translation, tid)
else:
logging.error(f"Translation error! Server response: {completion}")
next_translation = "Translation error, see logs!"
logging.debug(f"Translating batch of {len(batch)} in single request")
results = self._translate_batch(batch)
for translation, tid in results:
self.translation.emit(translation, tid)
self.translation.emit(next_translation, transcript_id)
if stop_after_batch:
logging.debug("Translation queue received stop signal")
break
logging.debug("Translation queue stopped")
self.finished.emit()

View file

@ -1,7 +1,7 @@
[project]
name = "buzz-captions"
# Change also in Makefile and buzz/__version__.py
version = "1.4.3"
version = "1.4.4"
description = ""
authors = [{ name = "Chidi Williams", email = "williamschidi1@gmail.com" }]
requires-python = ">=3.12,<3.13"

View file

@ -1,7 +1,18 @@
import os
import pytest
from unittest.mock import patch
from buzz.model_loader import ModelDownloader,TranscriptionModel, ModelType, WhisperModelSize
from buzz.model_loader import (
ModelDownloader,
TranscriptionModel,
ModelType,
WhisperModelSize,
map_language_to_mms,
is_mms_model,
get_expected_whisper_model_size,
get_whisper_file_path,
WHISPER_MODEL_SIZES,
)
class TestModelLoader:
@ -23,3 +34,174 @@ class TestModelLoader:
assert model_path is not None, "Model path is None"
assert os.path.isdir(model_path), "Model path is not a directory"
assert len(os.listdir(model_path)) > 0, "Model directory is empty"
class TestMapLanguageToMms:
def test_empty_returns_english(self):
assert map_language_to_mms("") == "eng"
def test_two_letter_known_code(self):
assert map_language_to_mms("en") == "eng"
assert map_language_to_mms("fr") == "fra"
assert map_language_to_mms("lv") == "lav"
def test_three_letter_code_returned_as_is(self):
assert map_language_to_mms("eng") == "eng"
assert map_language_to_mms("fra") == "fra"
def test_unknown_two_letter_code_returned_as_is(self):
assert map_language_to_mms("xx") == "xx"
@pytest.mark.parametrize(
"code,expected",
[
("de", "deu"),
("es", "spa"),
("ja", "jpn"),
("zh", "cmn"),
("ar", "ara"),
],
)
def test_various_language_codes(self, code, expected):
assert map_language_to_mms(code) == expected
class TestIsMmsModel:
def test_empty_string(self):
assert is_mms_model("") is False
def test_mms_in_model_id(self):
assert is_mms_model("facebook/mms-1b-all") is True
def test_mms_case_insensitive(self):
assert is_mms_model("facebook/MMS-1b-all") is True
def test_non_mms_model(self):
assert is_mms_model("openai/whisper-tiny") is False
class TestWhisperModelSize:
def test_to_faster_whisper_model_size_large(self):
assert WhisperModelSize.LARGE.to_faster_whisper_model_size() == "large-v1"
def test_to_faster_whisper_model_size_tiny(self):
assert WhisperModelSize.TINY.to_faster_whisper_model_size() == "tiny"
def test_to_faster_whisper_model_size_largev3(self):
assert WhisperModelSize.LARGEV3.to_faster_whisper_model_size() == "large-v3"
def test_to_whisper_cpp_model_size_large(self):
assert WhisperModelSize.LARGE.to_whisper_cpp_model_size() == "large-v1"
def test_to_whisper_cpp_model_size_tiny(self):
assert WhisperModelSize.TINY.to_whisper_cpp_model_size() == "tiny"
def test_str(self):
assert str(WhisperModelSize.TINY) == "Tiny"
assert str(WhisperModelSize.LARGE) == "Large"
assert str(WhisperModelSize.LARGEV3TURBO) == "Large-v3-turbo"
assert str(WhisperModelSize.CUSTOM) == "Custom"
class TestModelType:
def test_supports_initial_prompt(self):
assert ModelType.WHISPER.supports_initial_prompt is True
assert ModelType.WHISPER_CPP.supports_initial_prompt is True
assert ModelType.OPEN_AI_WHISPER_API.supports_initial_prompt is True
assert ModelType.FASTER_WHISPER.supports_initial_prompt is True
assert ModelType.HUGGING_FACE.supports_initial_prompt is False
@pytest.mark.parametrize(
"platform_system,platform_machine,expected_faster_whisper",
[
("Linux", "x86_64", True),
("Windows", "AMD64", True),
("Darwin", "arm64", True),
("Darwin", "x86_64", False), # Faster Whisper not available on macOS x86_64
],
)
def test_is_available(self, platform_system, platform_machine, expected_faster_whisper):
with patch("platform.system", return_value=platform_system), \
patch("platform.machine", return_value=platform_machine):
# These should always be available
assert ModelType.WHISPER.is_available() is True
assert ModelType.HUGGING_FACE.is_available() is True
assert ModelType.OPEN_AI_WHISPER_API.is_available() is True
assert ModelType.WHISPER_CPP.is_available() is True
# Faster Whisper depends on platform
assert ModelType.FASTER_WHISPER.is_available() == expected_faster_whisper
def test_is_manually_downloadable(self):
assert ModelType.WHISPER.is_manually_downloadable() is True
assert ModelType.WHISPER_CPP.is_manually_downloadable() is True
assert ModelType.FASTER_WHISPER.is_manually_downloadable() is True
assert ModelType.HUGGING_FACE.is_manually_downloadable() is False
assert ModelType.OPEN_AI_WHISPER_API.is_manually_downloadable() is False
class TestTranscriptionModel:
def test_str_whisper(self):
model = TranscriptionModel(
model_type=ModelType.WHISPER, whisper_model_size=WhisperModelSize.TINY
)
assert str(model) == "Whisper (Tiny)"
def test_str_whisper_cpp(self):
model = TranscriptionModel(
model_type=ModelType.WHISPER_CPP, whisper_model_size=WhisperModelSize.BASE
)
assert str(model) == "Whisper.cpp (Base)"
def test_str_hugging_face(self):
model = TranscriptionModel(
model_type=ModelType.HUGGING_FACE,
hugging_face_model_id="openai/whisper-tiny",
)
assert str(model) == "Hugging Face (openai/whisper-tiny)"
def test_str_faster_whisper(self):
model = TranscriptionModel(
model_type=ModelType.FASTER_WHISPER,
whisper_model_size=WhisperModelSize.SMALL,
)
assert str(model) == "Faster Whisper (Small)"
def test_str_openai_api(self):
model = TranscriptionModel(model_type=ModelType.OPEN_AI_WHISPER_API)
assert str(model) == "OpenAI Whisper API"
def test_default(self):
model = TranscriptionModel.default()
assert model.model_type in list(ModelType)
assert model.model_type.is_available() is True
def test_get_local_model_path_openai_api(self):
model = TranscriptionModel(model_type=ModelType.OPEN_AI_WHISPER_API)
assert model.get_local_model_path() == ""
class TestGetExpectedWhisperModelSize:
def test_known_sizes(self):
assert get_expected_whisper_model_size(WhisperModelSize.TINY) == 72 * 1024 * 1024
assert get_expected_whisper_model_size(WhisperModelSize.LARGE) == 2870 * 1024 * 1024
def test_unknown_size_returns_none(self):
assert get_expected_whisper_model_size(WhisperModelSize.CUSTOM) is None
assert get_expected_whisper_model_size(WhisperModelSize.LUMII) is None
def test_all_defined_sizes_have_values(self):
for size in WHISPER_MODEL_SIZES:
assert WHISPER_MODEL_SIZES[size] > 0
class TestGetWhisperFilePath:
def test_custom_size(self):
path = get_whisper_file_path(WhisperModelSize.CUSTOM)
assert path.endswith("custom")
assert "whisper" in path
def test_tiny_size(self):
path = get_whisper_file_path(WhisperModelSize.TINY)
assert "whisper" in path
assert path.endswith(".pt")

View file

@ -1,9 +1,10 @@
import pytest
import unittest.mock
import uuid
from PyQt6.QtCore import QCoreApplication, QThread
from buzz.file_transcriber_queue_worker import FileTranscriberQueueWorker
from buzz.model_loader import ModelType, TranscriptionModel, WhisperModelSize
from buzz.transcriber.transcriber import FileTranscriptionTask, TranscriptionOptions, FileTranscriptionOptions
from buzz.transcriber.transcriber import FileTranscriptionTask, TranscriptionOptions, FileTranscriptionOptions, Segment
from buzz.transcriber.whisper_file_transcriber import WhisperFileTranscriber
from tests.audio import test_multibyte_utf8_audio_path
import time
@ -31,6 +32,106 @@ def worker(qapp):
thread.wait()
@pytest.fixture
def simple_worker(qapp):
"""A non-threaded worker for unit tests that only test individual methods."""
worker = FileTranscriberQueueWorker()
yield worker
class TestFileTranscriberQueueWorker:
def test_cancel_task_adds_to_canceled_set(self, simple_worker):
task_id = uuid.uuid4()
simple_worker.cancel_task(task_id)
assert task_id in simple_worker.canceled_tasks
def test_add_task_removes_from_canceled(self, simple_worker):
options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP, whisper_model_size=WhisperModelSize.TINY),
extract_speech=False
)
task = FileTranscriptionTask(
file_path=str(test_multibyte_utf8_audio_path),
transcription_options=options,
file_transcription_options=FileTranscriptionOptions(),
model_path="mock_path"
)
# First cancel it
simple_worker.cancel_task(task.uid)
assert task.uid in simple_worker.canceled_tasks
# Prevent trigger_run from starting the run loop
simple_worker.is_running = True
# Then add it back
simple_worker.add_task(task)
assert task.uid not in simple_worker.canceled_tasks
def test_on_task_error_with_cancellation(self, simple_worker):
options = TranscriptionOptions()
task = FileTranscriptionTask(
file_path=str(test_multibyte_utf8_audio_path),
transcription_options=options,
file_transcription_options=FileTranscriptionOptions(),
model_path="mock_path"
)
simple_worker.current_task = task
error_spy = unittest.mock.Mock()
simple_worker.task_error.connect(error_spy)
simple_worker.on_task_error("Transcription was canceled")
error_spy.assert_called_once()
assert task.status == FileTranscriptionTask.Status.CANCELED
assert "canceled" in task.error.lower()
def test_on_task_error_with_regular_error(self, simple_worker):
options = TranscriptionOptions()
task = FileTranscriptionTask(
file_path=str(test_multibyte_utf8_audio_path),
transcription_options=options,
file_transcription_options=FileTranscriptionOptions(),
model_path="mock_path"
)
simple_worker.current_task = task
error_spy = unittest.mock.Mock()
simple_worker.task_error.connect(error_spy)
simple_worker.on_task_error("Some error occurred")
error_spy.assert_called_once()
assert task.status == FileTranscriptionTask.Status.FAILED
assert task.error == "Some error occurred"
def test_on_task_progress_conversion(self, simple_worker):
options = TranscriptionOptions()
task = FileTranscriptionTask(
file_path=str(test_multibyte_utf8_audio_path),
transcription_options=options,
file_transcription_options=FileTranscriptionOptions(),
model_path="mock_path"
)
simple_worker.current_task = task
progress_spy = unittest.mock.Mock()
simple_worker.task_progress.connect(progress_spy)
simple_worker.on_task_progress((50, 100))
progress_spy.assert_called_once()
args = progress_spy.call_args[0]
assert args[0] == task
assert args[1] == 0.5
def test_stop_puts_sentinel_in_queue(self, simple_worker):
initial_size = simple_worker.tasks_queue.qsize()
simple_worker.stop()
# Sentinel (None) should be added to queue
assert simple_worker.tasks_queue.qsize() == initial_size + 1
def test_transcription_with_whisper_cpp_tiny_no_speech_extraction(worker):
options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP, whisper_model_size=WhisperModelSize.TINY),

View file

@ -5,16 +5,78 @@ import pytest
from buzz.transcriber.openai_whisper_api_file_transcriber import (
OpenAIWhisperAPIFileTranscriber,
append_segment,
)
from buzz.transcriber.transcriber import (
FileTranscriptionTask,
TranscriptionOptions,
FileTranscriptionOptions,
Segment,
)
from openai.types.audio import Transcription, Translation
class TestAppendSegment:
def test_valid_utf8(self):
result = []
success = append_segment(result, b"Hello world", 100, 200)
assert success is True
assert len(result) == 1
assert result[0].start == 1000 # 100 centiseconds to ms
assert result[0].end == 2000 # 200 centiseconds to ms
assert result[0].text == "Hello world"
def test_empty_bytes(self):
result = []
success = append_segment(result, b"", 100, 200)
assert success is True
assert len(result) == 0
def test_invalid_utf8(self):
result = []
# Invalid UTF-8 sequence
success = append_segment(result, b"\xff\xfe", 100, 200)
assert success is False
assert len(result) == 0
def test_multibyte_utf8(self):
result = []
success = append_segment(result, "Привет".encode("utf-8"), 50, 150)
assert success is True
assert len(result) == 1
assert result[0].text == "Привет"
class TestGetValue:
def test_get_value_from_dict(self):
obj = {"key": "value", "number": 42}
assert OpenAIWhisperAPIFileTranscriber.get_value(obj, "key") == "value"
assert OpenAIWhisperAPIFileTranscriber.get_value(obj, "number") == 42
def test_get_value_from_object(self):
class TestObj:
key = "value"
number = 42
obj = TestObj()
assert OpenAIWhisperAPIFileTranscriber.get_value(obj, "key") == "value"
assert OpenAIWhisperAPIFileTranscriber.get_value(obj, "number") == 42
def test_get_value_missing_key_dict(self):
obj = {"key": "value"}
assert OpenAIWhisperAPIFileTranscriber.get_value(obj, "missing") is None
assert OpenAIWhisperAPIFileTranscriber.get_value(obj, "missing", "default") == "default"
def test_get_value_missing_attribute_object(self):
class TestObj:
key = "value"
obj = TestObj()
assert OpenAIWhisperAPIFileTranscriber.get_value(obj, "missing") is None
assert OpenAIWhisperAPIFileTranscriber.get_value(obj, "missing", "default") == "default"
class TestOpenAIWhisperAPIFileTranscriber:
@pytest.fixture
def mock_openai_client(self):

View file

@ -1,7 +1,8 @@
import os
import sys
import time
from unittest.mock import Mock, patch
import numpy as np
from unittest.mock import Mock, patch, MagicMock
from PyQt6.QtCore import QThread
@ -10,10 +11,72 @@ from buzz.assets import APP_BASE_DIR
from buzz.model_loader import TranscriptionModel, ModelType, WhisperModelSize
from buzz.transcriber.recording_transcriber import RecordingTranscriber
from buzz.transcriber.transcriber import TranscriptionOptions, Task
from buzz.settings.recording_transcriber_mode import RecordingTranscriberMode
from tests.mock_sounddevice import MockSoundDevice
from tests.model_loader import get_model_path
class TestAmplitude:
def test_symmetric_array(self):
arr = np.array([1.0, -1.0, 2.0, -2.0])
amplitude = RecordingTranscriber.amplitude(arr)
assert amplitude == 2.0
def test_asymmetric_array(self):
arr = np.array([1.0, 2.0, 3.0, -1.0])
amplitude = RecordingTranscriber.amplitude(arr)
# (abs(3.0) + abs(-1.0)) / 2 = (3.0 + 1.0) / 2 = 2.0
assert amplitude == 2.0
def test_all_zeros(self):
arr = np.array([0.0, 0.0, 0.0])
amplitude = RecordingTranscriber.amplitude(arr)
assert amplitude == 0.0
def test_all_positive(self):
arr = np.array([1.0, 2.0, 3.0, 4.0])
amplitude = RecordingTranscriber.amplitude(arr)
# (abs(4.0) + abs(1.0)) / 2 = (4.0 + 1.0) / 2 = 2.5
assert amplitude == 2.5
def test_all_negative(self):
arr = np.array([-1.0, -2.0, -3.0, -4.0])
amplitude = RecordingTranscriber.amplitude(arr)
# (abs(-1.0) + abs(-4.0)) / 2 = (1.0 + 4.0) / 2 = 2.5
assert amplitude == 2.5
class TestGetDeviceSampleRate:
def test_returns_default_16khz_when_supported(self):
with patch("sounddevice.check_input_settings"):
rate = RecordingTranscriber.get_device_sample_rate(None)
assert rate == 16000
def test_falls_back_to_device_default(self):
import sounddevice
from sounddevice import PortAudioError
def raise_error(*args, **kwargs):
raise PortAudioError("Device doesn't support 16000")
device_info = {"default_samplerate": 44100}
with patch("sounddevice.check_input_settings", side_effect=raise_error), \
patch("sounddevice.query_devices", return_value=device_info):
rate = RecordingTranscriber.get_device_sample_rate(0)
assert rate == 44100
def test_returns_default_when_query_fails(self):
from sounddevice import PortAudioError
def raise_error(*args, **kwargs):
raise PortAudioError("Device doesn't support 16000")
with patch("sounddevice.check_input_settings", side_effect=raise_error), \
patch("sounddevice.query_devices", return_value=None):
rate = RecordingTranscriber.get_device_sample_rate(0)
assert rate == 16000
class TestRecordingTranscriber:
def test_should_transcribe(self, qtbot):
@ -63,4 +126,400 @@ class TestRecordingTranscriber:
thread.quit()
thread.wait()
time.sleep(3)
# Ensure process is cleaned up
if transcriber.process and transcriber.process.poll() is None:
transcriber.process.terminate()
try:
transcriber.process.wait(timeout=2)
except:
pass
# Process pending events to ensure cleanup
from PyQt6.QtCore import QCoreApplication
QCoreApplication.processEvents()
time.sleep(0.1)
class TestRecordingTranscriberInit:
def test_init_default_mode(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="en",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"):
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
assert transcriber.transcription_options == transcription_options
assert transcriber.input_device_index == 0
assert transcriber.sample_rate == 16000
assert transcriber.model_path == "/fake/path"
assert transcriber.n_batch_samples == 5 * 16000
assert transcriber.keep_sample_seconds == 0.15
assert transcriber.is_running is False
assert transcriber.openai_client is None
def test_init_append_and_correct_mode(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="en",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"), \
patch("buzz.transcriber.recording_transcriber.Settings") as mock_settings_class:
# Mock settings to return APPEND_AND_CORRECT mode (index 2 in the enum)
mock_settings_instance = MagicMock()
mock_settings_class.return_value = mock_settings_instance
# Return 2 for APPEND_AND_CORRECT mode (it's the third item in the enum)
mock_settings_instance.value.return_value = 2
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
# APPEND_AND_CORRECT mode should use smaller batch size and longer keep duration
assert transcriber.n_batch_samples == 3 * 16000
assert transcriber.keep_sample_seconds == 1.5
def test_init_uses_default_sample_rate_when_none(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="en",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"):
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=None,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
# Should use default whisper sample rate
assert transcriber.sample_rate == 16000
class TestStreamCallback:
def test_stream_callback_adds_to_queue(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="en",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"):
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
# Create test audio data
in_data = np.array([[0.1], [0.2], [0.3], [0.4]], dtype=np.float32)
initial_size = transcriber.queue.size
transcriber.stream_callback(in_data, 4, None, None)
# Queue should have grown by 4 samples
assert transcriber.queue.size == initial_size + 4
def test_stream_callback_emits_amplitude_changed(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="en",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"):
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
# Mock the amplitude_changed signal
amplitude_values = []
transcriber.amplitude_changed.connect(lambda amp: amplitude_values.append(amp))
# Create test audio data
in_data = np.array([[0.1], [0.2], [0.3], [0.4]], dtype=np.float32)
transcriber.stream_callback(in_data, 4, None, None)
# Should have emitted one amplitude value
assert len(amplitude_values) == 1
assert amplitude_values[0] > 0
def test_stream_callback_drops_data_when_queue_full(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="en",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"):
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
# Fill the queue beyond max_queue_size
transcriber.queue = np.ones(transcriber.max_queue_size, dtype=np.float32)
initial_size = transcriber.queue.size
# Try to add more data
in_data = np.array([[0.1], [0.2]], dtype=np.float32)
transcriber.stream_callback(in_data, 2, None, None)
# Queue should not have grown (data was dropped)
assert transcriber.queue.size == initial_size
class TestStopRecording:
def test_stop_recording_sets_is_running_false(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="en",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"):
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
transcriber.is_running = True
transcriber.stop_recording()
assert transcriber.is_running is False
def test_stop_recording_terminates_process(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="en",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"):
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
# Mock a running process
mock_process = MagicMock()
mock_process.poll.return_value = None # Process is running
transcriber.process = mock_process
transcriber.stop_recording()
# Process should have been terminated and waited
mock_process.terminate.assert_called_once()
mock_process.wait.assert_called_once_with(timeout=5)
def test_stop_recording_skips_terminated_process(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="en",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"):
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
# Mock an already terminated process
mock_process = MagicMock()
mock_process.poll.return_value = 0 # Process already terminated
transcriber.process = mock_process
transcriber.stop_recording()
# terminate and wait should not be called
mock_process.terminate.assert_not_called()
mock_process.wait.assert_not_called()
class TestStartLocalWhisperServer:
def test_start_local_whisper_server_creates_openai_client(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="en",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"), \
patch("subprocess.Popen") as mock_popen, \
patch("time.sleep"):
# Mock a successful process
mock_process = MagicMock()
mock_process.poll.return_value = None # Process is running
mock_popen.return_value = mock_process
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
try:
transcriber.is_running = True
transcriber.start_local_whisper_server()
# Should have created an OpenAI client
assert transcriber.openai_client is not None
assert transcriber.process is not None
finally:
# Clean up to prevent QThread warnings
transcriber.is_running = False
transcriber.process = None
def test_start_local_whisper_server_with_language(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="fr",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"), \
patch("subprocess.Popen") as mock_popen, \
patch("time.sleep"):
mock_process = MagicMock()
mock_process.poll.return_value = None
mock_popen.return_value = mock_process
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
try:
transcriber.is_running = True
transcriber.start_local_whisper_server()
# Check that the language was passed to the command
call_args = mock_popen.call_args
cmd = call_args[0][0]
assert "--language" in cmd
assert "fr" in cmd
finally:
transcriber.is_running = False
transcriber.process = None
def test_start_local_whisper_server_auto_language(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language=None,
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"), \
patch("subprocess.Popen") as mock_popen, \
patch("time.sleep"):
mock_process = MagicMock()
mock_process.poll.return_value = None
mock_popen.return_value = mock_process
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
try:
transcriber.is_running = True
transcriber.start_local_whisper_server()
# Check that auto language was used
call_args = mock_popen.call_args
cmd = call_args[0][0]
assert "--language" in cmd
assert "auto" in cmd
finally:
transcriber.is_running = False
transcriber.process = None
def test_start_local_whisper_server_handles_failure(self):
transcription_options = TranscriptionOptions(
model=TranscriptionModel(model_type=ModelType.WHISPER_CPP),
language="en",
task=Task.TRANSCRIBE,
)
with patch("sounddevice.check_input_settings"), \
patch("subprocess.Popen") as mock_popen, \
patch("time.sleep"):
# Mock a failed process
mock_process = MagicMock()
mock_process.poll.return_value = 1 # Process terminated with error
mock_process.stderr.read.return_value = b"Error loading model"
mock_popen.return_value = mock_process
transcriber = RecordingTranscriber(
transcription_options=transcription_options,
input_device_index=0,
sample_rate=16000,
model_path="/fake/path",
sounddevice=MockSoundDevice(),
)
transcriptions = []
transcriber.transcription.connect(lambda text: transcriptions.append(text))
try:
transcriber.is_running = True
transcriber.start_local_whisper_server()
# Should not have created a client when server failed
assert transcriber.openai_client is None
# Should have emitted starting and error messages
assert len(transcriptions) >= 1
# First message should be about starting Whisper.cpp
assert "Whisper" in transcriptions[0]
finally:
transcriber.is_running = False
transcriber.process = None

View file

@ -1,9 +1,69 @@
import os
import sys
import platform
from unittest.mock import patch
import pytest
from buzz.transformers_whisper import TransformersTranscriber
from buzz.transformers_whisper import TransformersTranscriber, is_intel_mac, is_peft_model
class TestIsIntelMac:
@pytest.mark.parametrize(
"sys_platform,machine,expected",
[
("linux", "x86_64", False),
("win32", "x86_64", False),
("darwin", "arm64", False),
("darwin", "x86_64", True),
("darwin", "i386", False),
],
)
def test_is_intel_mac(self, sys_platform, machine, expected):
with patch("buzz.transformers_whisper.sys.platform", sys_platform), \
patch("buzz.transformers_whisper.platform.machine", return_value=machine):
assert is_intel_mac() == expected
class TestIsPeftModel:
@pytest.mark.parametrize(
"model_id,expected",
[
("openai/whisper-tiny-peft", True),
("user/model-PEFT", True),
("openai/whisper-tiny", False),
("facebook/mms-1b-all", False),
("", False),
],
)
def test_peft_detection(self, model_id, expected):
assert is_peft_model(model_id) == expected
class TestGetPeftRepoId:
def test_repo_id_returned_as_is(self):
transcriber = TransformersTranscriber("user/whisper-tiny-peft")
with patch("os.path.exists", return_value=False):
assert transcriber._get_peft_repo_id() == "user/whisper-tiny-peft"
def test_linux_cache_path(self):
linux_path = "/home/user/.cache/Buzz/models/models--user--whisper-peft/snapshots/abc123"
transcriber = TransformersTranscriber(linux_path)
with patch("os.path.exists", return_value=True), \
patch("buzz.transformers_whisper.os.sep", "/"):
assert transcriber._get_peft_repo_id() == "user/whisper-peft"
def test_windows_cache_path(self):
windows_path = r"C:\Users\user\.cache\Buzz\models\models--user--whisper-peft\snapshots\abc123"
transcriber = TransformersTranscriber(windows_path)
with patch("os.path.exists", return_value=True), \
patch("buzz.transformers_whisper.os.sep", "\\"):
assert transcriber._get_peft_repo_id() == "user/whisper-peft"
def test_fallback_returns_model_id(self):
transcriber = TransformersTranscriber("some-local-model")
with patch("os.path.exists", return_value=True):
assert transcriber._get_peft_repo_id() == "some-local-model"
class TestGetMmsRepoId:

View file

@ -21,11 +21,59 @@ from buzz.transcriber.transcriber import (
FileTranscriptionOptions,
Segment,
)
from buzz.transcriber.whisper_file_transcriber import WhisperFileTranscriber
from buzz.transcriber.whisper_file_transcriber import (
WhisperFileTranscriber,
check_file_has_audio_stream,
PROGRESS_REGEX,
)
from tests.audio import test_audio_path
from tests.model_loader import get_model_path
class TestCheckFileHasAudioStream:
def test_valid_audio_file(self):
# Should not raise exception for valid audio file
check_file_has_audio_stream(test_audio_path)
def test_missing_file(self):
with pytest.raises(ValueError, match="File not found"):
check_file_has_audio_stream("/nonexistent/path/to/file.mp3")
def test_invalid_media_file(self):
# Create a temporary text file (not a valid media file)
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
try:
temp_file.write(b"This is not a valid media file")
temp_file.close()
with pytest.raises(ValueError, match="Invalid media file"):
check_file_has_audio_stream(temp_file.name)
finally:
os.unlink(temp_file.name)
class TestProgressRegex:
def test_integer_percentage(self):
match = PROGRESS_REGEX.search("Progress: 50%")
assert match is not None
assert match.group() == "50%"
def test_decimal_percentage(self):
match = PROGRESS_REGEX.search("Progress: 75.5%")
assert match is not None
assert match.group() == "75.5%"
def test_no_match(self):
match = PROGRESS_REGEX.search("No percentage here")
assert match is None
def test_extract_percentage_value(self):
line = "Transcription progress: 85%"
match = PROGRESS_REGEX.search(line)
assert match is not None
percentage = int(match.group().strip("%"))
assert percentage == 85
class TestWhisperFileTranscriber:
@pytest.mark.parametrize(
"file_path,output_format,expected_file_path",

View file

@ -8,6 +8,56 @@ from PyQt6.QtCore import QThread
from buzz.translator import Translator
from buzz.transcriber.transcriber import TranscriptionOptions
from buzz.widgets.transcriber.advanced_settings_dialog import AdvancedSettingsDialog
from buzz.locale import _
class TestParseBatchResponse:
def test_simple_batch(self):
response = "[1] Hello\n[2] World"
result = Translator._parse_batch_response(response, 2)
assert len(result) == 2
assert result[0] == "Hello"
assert result[1] == "World"
def test_missing_entries_fallback(self):
response = "[1] Hello\n[3] World"
result = Translator._parse_batch_response(response, 3)
assert len(result) == 3
assert result[0] == "Hello"
assert result[1] == _("Translation error, see logs!")
assert result[2] == "World"
def test_multiline_entries(self):
response = "[1] This is a long\nmultiline translation\n[2] Short"
result = Translator._parse_batch_response(response, 2)
assert len(result) == 2
assert "multiline" in result[0]
assert result[1] == "Short"
def test_single_item_batch(self):
response = "[1] Single translation"
result = Translator._parse_batch_response(response, 1)
assert len(result) == 1
assert result[0] == "Single translation"
def test_empty_response(self):
response = ""
result = Translator._parse_batch_response(response, 2)
assert len(result) == 2
assert result[0] == _("Translation error, see logs!")
assert result[1] == _("Translation error, see logs!")
def test_whitespace_handling(self):
response = "[1] Hello with spaces \n[2] World "
result = Translator._parse_batch_response(response, 2)
assert result[0] == "Hello with spaces"
assert result[1] == "World"
def test_out_of_order_entries(self):
response = "[2] Second\n[1] First"
result = Translator._parse_batch_response(response, 2)
assert result[0] == "First"
assert result[1] == "Second"
class TestTranslator:
@ -25,6 +75,7 @@ class TestTranslator:
side_effect.call_count = 0
mock_queue.get.side_effect = side_effect
mock_queue.get_nowait.side_effect = Empty
mock_chat = Mock()
mock_openai.return_value.chat = mock_chat
mock_chat.completions.create.return_value = Mock(
@ -110,6 +161,10 @@ class TestTranslator:
self.translation_thread.quit()
# Wait for the thread to actually finish before cleanup
self.translation_thread.wait()
# Process pending events to ensure deleteLater() is handled
from PyQt6.QtCore import QCoreApplication
QCoreApplication.processEvents()
time.sleep(0.1) # Give time for cleanup
# Note: translator and translation_thread will be automatically deleted
# via the deleteLater() connections set up earlier

View file

@ -778,9 +778,13 @@ class TestTranscriptionViewerWidgetAdditional:
widget.close()
# TODO - it is sending actual requests, should mock
def test_run_translation(self, qtbot: QtBot, transcription, transcription_service, shortcuts):
@patch('buzz.translator.OpenAI')
def test_run_translation(self, mock_openai, qtbot: QtBot, transcription, transcription_service, shortcuts):
"""Test run_translation method"""
mock_openai.return_value.chat.completions.create.return_value = MagicMock(
choices=[MagicMock(message=MagicMock(content="Translated text"))]
)
widget = TranscriptionViewerWidget(
transcription, transcription_service, shortcuts
)

3
uv.lock generated
View file

@ -274,7 +274,7 @@ wheels = [
[[package]]
name = "buzz-captions"
version = "1.4.3"
version = "1.4.4"
source = { editable = "." }
dependencies = [
{ name = "accelerate" },
@ -1132,7 +1132,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/f8/0a/a3871375c7b9727edaeeea994bfff7c63ff7804c9829c19309ba2e058807/greenlet-3.3.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:b01548f6e0b9e9784a2c99c5651e5dc89ffcbe870bc5fb2e5ef864e9cc6b5dcb", size = 276379, upload-time = "2025-12-04T14:23:30.498Z" },
{ url = "https://files.pythonhosted.org/packages/43/ab/7ebfe34dce8b87be0d11dae91acbf76f7b8246bf9d6b319c741f99fa59c6/greenlet-3.3.0-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:349345b770dc88f81506c6861d22a6ccd422207829d2c854ae2af8025af303e3", size = 597294, upload-time = "2025-12-04T14:50:06.847Z" },
{ url = "https://files.pythonhosted.org/packages/a4/39/f1c8da50024feecd0793dbd5e08f526809b8ab5609224a2da40aad3a7641/greenlet-3.3.0-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e8e18ed6995e9e2c0b4ed264d2cf89260ab3ac7e13555b8032b25a74c6d18655", size = 607742, upload-time = "2025-12-04T14:57:42.349Z" },
{ url = "https://files.pythonhosted.org/packages/77/cb/43692bcd5f7a0da6ec0ec6d58ee7cddb606d055ce94a62ac9b1aa481e969/greenlet-3.3.0-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:c024b1e5696626890038e34f76140ed1daf858e37496d33f2af57f06189e70d7", size = 622297, upload-time = "2025-12-04T15:07:13.552Z" },
{ url = "https://files.pythonhosted.org/packages/75/b0/6bde0b1011a60782108c01de5913c588cf51a839174538d266de15e4bf4d/greenlet-3.3.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:047ab3df20ede6a57c35c14bf5200fcf04039d50f908270d3f9a7a82064f543b", size = 609885, upload-time = "2025-12-04T14:26:02.368Z" },
{ url = "https://files.pythonhosted.org/packages/49/0e/49b46ac39f931f59f987b7cd9f34bfec8ef81d2a1e6e00682f55be5de9f4/greenlet-3.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d9ad37fc657b1102ec880e637cccf20191581f75c64087a549e66c57e1ceb53", size = 1567424, upload-time = "2025-12-04T15:04:23.757Z" },
{ url = "https://files.pythonhosted.org/packages/05/f5/49a9ac2dff7f10091935def9165c90236d8f175afb27cbed38fb1d61ab6b/greenlet-3.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:83cd0e36932e0e7f36a64b732a6f60c2fc2df28c351bae79fbaf4f8092fe7614", size = 1636017, upload-time = "2025-12-04T14:27:29.688Z" },