From 21ebbb37120331fec0845c5ff4003681a47b0bed Mon Sep 17 00:00:00 2001 From: raivisdejus Date: Sun, 22 Feb 2026 15:47:42 +0000 Subject: [PATCH] deploy: 4c9b249c50c0865609c64671b600a6e18af61f1e --- 404.html | 4 ++-- assets/js/2c266bbb.2d6b08c4.js | 1 + assets/js/2c266bbb.41936c9e.js | 1 - assets/js/runtime~main.02a8de98.js | 1 + assets/js/runtime~main.27d323e6.js | 1 - docs.html | 4 ++-- docs/cli.html | 4 ++-- docs/faq.html | 4 ++-- docs/installation.html | 4 ++-- docs/preferences.html | 4 ++-- docs/usage/edit_and_resize.html | 4 ++-- docs/usage/file_import.html | 4 ++-- docs/usage/live_recording.html | 7 ++++--- docs/usage/speaker_identification.html | 4 ++-- docs/usage/transcription_viewer.html | 4 ++-- docs/usage/translations.html | 4 ++-- index.html | 4 ++-- 17 files changed, 30 insertions(+), 29 deletions(-) create mode 100644 assets/js/2c266bbb.2d6b08c4.js delete mode 100644 assets/js/2c266bbb.41936c9e.js create mode 100644 assets/js/runtime~main.02a8de98.js delete mode 100644 assets/js/runtime~main.27d323e6.js diff --git a/404.html b/404.html index 818adf90..1e1633d1 100644 --- a/404.html +++ b/404.html @@ -4,13 +4,13 @@ Page Not Found | Buzz - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/2c266bbb.2d6b08c4.js b/assets/js/2c266bbb.2d6b08c4.js new file mode 100644 index 00000000..eeb10ef6 --- /dev/null +++ b/assets/js/2c266bbb.2d6b08c4.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[799],{3905:(e,t,n)=>{n.d(t,{Zo:()=>s,kt:()=>g});var a=n(7294);function r(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);t&&(a=a.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,a)}return n}function i(e){for(var t=1;t=0||(r[n]=e[n]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var p=a.createContext({}),u=function(e){var t=a.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},s=function(e){var t=u(e.components);return a.createElement(p.Provider,{value:t},e.children)},c="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return a.createElement(a.Fragment,{},t)}},m=a.forwardRef((function(e,t){var n=e.components,r=e.mdxType,o=e.originalType,p=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),c=u(n),m=r,g=c["".concat(p,".").concat(m)]||c[m]||d[m]||o;return n?a.createElement(g,i(i({ref:t},s),{},{components:n})):a.createElement(g,i({ref:t},s))}));function g(e,t){var n=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=n.length,i=new Array(o);i[0]=m;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l[c]="string"==typeof e?e:r,i[1]=l;for(var u=2;u{n.r(t),n.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>l,toc:()=>u});var a=n(7462),r=(n(7294),n(3905));const o={title:"Live Recording"},i=void 0,l={unversionedId:"usage/live_recording",id:"usage/live_recording",title:"Live Recording",description:"To start a live recording:",source:"@site/docs/usage/2_live_recording.md",sourceDirName:"usage",slug:"/usage/live_recording",permalink:"/buzz/docs/usage/live_recording",draft:!1,tags:[],version:"current",sidebarPosition:2,frontMatter:{title:"Live Recording"},sidebar:"tutorialSidebar",previous:{title:"File Import",permalink:"/buzz/docs/usage/file_import"},next:{title:"Translations",permalink:"/buzz/docs/usage/translations"}},p={},u=[{value:"Advanced preferences",id:"advanced-preferences",level:4},{value:"Presentation Window",id:"presentation-window",level:4},{value:"Record audio playing from computer (macOS)",id:"record-audio-playing-from-computer-macos",level:3},{value:"Record audio playing from computer (Windows)",id:"record-audio-playing-from-computer-windows",level:3},{value:"Record audio playing from computer (Linux)",id:"record-audio-playing-from-computer-linux",level:3}],s={toc:u},c="wrapper";function d(e){let{components:t,...n}=e;return(0,r.kt)(c,(0,a.Z)({},s,n,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("p",null,"To start a live recording:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Select a recording task, language, quality, and microphone."),(0,r.kt)("li",{parentName:"ul"},"Click Record.")),(0,r.kt)("blockquote",null,(0,r.kt)("p",{parentName:"blockquote"},(0,r.kt)("strong",{parentName:"p"},"Note:")," Transcribing audio using the default Whisper model is resource-intensive. Consider using the Whisper.cpp.\nIt supports GPU acceleration, if the model fits in GPU memory. Use smaller models for real-time performance.")),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},"Field"),(0,r.kt)("th",{parentName:"tr",align:null},"Options"),(0,r.kt)("th",{parentName:"tr",align:null},"Default"),(0,r.kt)("th",{parentName:"tr",align:null},"Description"))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Task"),(0,r.kt)("td",{parentName:"tr",align:null},'"Transcribe", "Translate to English"'),(0,r.kt)("td",{parentName:"tr",align:null},'"Transcribe"'),(0,r.kt)("td",{parentName:"tr",align:null},'"Transcribe" converts the input audio into text in the selected language, while "Translate to English" converts it into text in English.')),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Language"),(0,r.kt)("td",{parentName:"tr",align:null},"See ",(0,r.kt)("a",{parentName:"td",href:"https://github.com/openai/whisper#available-models-and-languages"},"Whisper's documentation")," for the full list of supported languages"),(0,r.kt)("td",{parentName:"tr",align:null},'"Detect Language"'),(0,r.kt)("td",{parentName:"tr",align:null},'"Detect Language" will try to detect the spoken language in the audio based on the first few seconds. However, selecting a language is recommended (if known) as it will improve transcription quality in many cases.')),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Microphone"),(0,r.kt)("td",{parentName:"tr",align:null},"[Available system microphones]"),(0,r.kt)("td",{parentName:"tr",align:null},"[Default system microphone]"),(0,r.kt)("td",{parentName:"tr",align:null},"Microphone for recording input audio.")))),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://www.loom.com/share/564b753eb4d44b55b985b8abd26b55f7",title:"Live Recording on Buzz"},(0,r.kt)("img",{parentName:"a",src:"https://cdn.loom.com/sessions/thumbnails/564b753eb4d44b55b985b8abd26b55f7-with-play.gif",alt:"Live Recording on Buzz"}))),(0,r.kt)("h4",{id:"advanced-preferences"},"Advanced preferences"),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Silence threshold")," Set threshold to for transcriptions to be processed. If average volume level is under this setting the sentence will not be transcribed. Available since 1.4.4.\n",(0,r.kt)("strong",{parentName:"p"},"Line separator")," Marking to add to the transcription and translation lines. Default value is two new lines (",(0,r.kt)("inlineCode",{parentName:"p"},"\\n\\n"),") that result in an empty space between translation or transcription lines. To have no empty line use ",(0,r.kt)("inlineCode",{parentName:"p"},"\\n"),". Available since 1.4.4."),(0,r.kt)("h4",{id:"presentation-window"},"Presentation Window"),(0,r.kt)("p",null,"Since 1.4.2 Buzz has an easy to use presentation window you can use to show live transcriptions during events and presentations. To open it start the recording and new options for the ",(0,r.kt)("inlineCode",{parentName:"p"},"Presentation window")," will appear."),(0,r.kt)("h3",{id:"record-audio-playing-from-computer-macos"},"Record audio playing from computer (macOS)"),(0,r.kt)("p",null,"To record audio playing from an application on your computer, you may install an audio loopback driver (a program that\nlets you create virtual audio devices). The rest of this guide will\nuse ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/ExistentialAudio/BlackHole"},"BlackHole")," on Mac, but you can use other alternatives for your\noperating system (\nsee ",(0,r.kt)("a",{parentName:"p",href:"https://nerds.de/en/loopbeaudio.html"},"LoopBeAudio"),", ",(0,r.kt)("a",{parentName:"p",href:"https://rogueamoeba.com/loopback/"},"LoopBack"),",\nand ",(0,r.kt)("a",{parentName:"p",href:"https://vac.muzychenko.net/en/"},"Virtual Audio Cable"),")."),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Install ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/ExistentialAudio/BlackHole#option-2-install-via-homebrew"},"BlackHole via Homebrew")),(0,r.kt)("pre",{parentName:"li"},(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"brew install blackhole-2ch\n"))),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Open Audio MIDI Setup from Spotlight or from ",(0,r.kt)("inlineCode",{parentName:"p"},"/Applications/Utilities/Audio Midi Setup.app"),"."),(0,r.kt)("p",{parentName:"li"},(0,r.kt)("img",{parentName:"p",src:"https://existential.audio/howto/img/spotlight.png",alt:"Open Audio MIDI Setup from Spotlight"}))),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Click the '+' icon at the lower left corner and select 'Create Multi-Output Device'."),(0,r.kt)("p",{parentName:"li"},(0,r.kt)("img",{parentName:"p",src:"https://existential.audio/howto/img/createmulti-output.png",alt:"Create multi-output device"}))),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Add your default speaker and BlackHole to the multi-output device."),(0,r.kt)("p",{parentName:"li"},(0,r.kt)("img",{parentName:"p",src:"https://existential.audio/howto/img/multi-output.png",alt:"Screenshot of multi-output device"}))),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Select this multi-output device as your speaker (application or system-wide) to play audio into BlackHole.")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Open Buzz, select BlackHole as your microphone, and record as before to see transcriptions from the audio playing\nthrough BlackHole."))),(0,r.kt)("h3",{id:"record-audio-playing-from-computer-windows"},"Record audio playing from computer (Windows)"),(0,r.kt)("p",null,"To transcribe system audio you need to configure virtual audio device and connect output from the applications you whant to transcribe to this virtual speaker. After that you can select it as source in the Buzz."),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Install ",(0,r.kt)("a",{parentName:"p",href:"https://vb-audio.com/Cable/"},"VB CABLE")," as virtual audio device. ")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},'Configure using Windows Sound settings. Right-click on the speaker icon in the system tray and select "Open Sound settings". In the "Choose your output device" dropdown select "CABLE Input" to send all system sound to the virtual device or use "Advanced sound options" to select application that will output their sound to this device.'))),(0,r.kt)("h3",{id:"record-audio-playing-from-computer-linux"},"Record audio playing from computer (Linux)"),(0,r.kt)("p",null,"As described on ",(0,r.kt)("a",{parentName:"p",href:"https://wiki.ubuntu.com/record_system_sound"},"Ubuntu Wiki")," on any Linux with pulse audio you can redirect application audio to a virtual speaker. After that you can select it as source in Buzz."),(0,r.kt)("p",null,"Overall steps:"),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},"Launch application that will produce the sound you want to transcribe and start the playback. For example start a video in a media player. "),(0,r.kt)("li",{parentName:"ol"},"Launch Buzz and open Live recording screen, so you see the settings."),(0,r.kt)("li",{parentName:"ol"},"Configure sound routing from the application you want to transcribe sound from to Buzz in ",(0,r.kt)("inlineCode",{parentName:"li"},"Recording tab")," of the PulseAudio Volume Control (",(0,r.kt)("inlineCode",{parentName:"li"},"pavucontrol"),").")))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/2c266bbb.41936c9e.js b/assets/js/2c266bbb.41936c9e.js deleted file mode 100644 index d5fda2be..00000000 --- a/assets/js/2c266bbb.41936c9e.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[799],{3905:(e,t,a)=>{a.d(t,{Zo:()=>s,kt:()=>g});var n=a(7294);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var p=n.createContext({}),u=function(e){var t=n.useContext(p),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},s=function(e){var t=u(e.components);return n.createElement(p.Provider,{value:t},e.children)},c="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,p=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),c=u(a),m=r,g=c["".concat(p,".").concat(m)]||c[m]||d[m]||o;return a?n.createElement(g,i(i({ref:t},s),{},{components:a})):n.createElement(g,i({ref:t},s))}));function g(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,i=new Array(o);i[0]=m;var l={};for(var p in t)hasOwnProperty.call(t,p)&&(l[p]=t[p]);l.originalType=e,l[c]="string"==typeof e?e:r,i[1]=l;for(var u=2;u{a.r(t),a.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>l,toc:()=>u});var n=a(7462),r=(a(7294),a(3905));const o={title:"Live Recording"},i=void 0,l={unversionedId:"usage/live_recording",id:"usage/live_recording",title:"Live Recording",description:"To start a live recording:",source:"@site/docs/usage/2_live_recording.md",sourceDirName:"usage",slug:"/usage/live_recording",permalink:"/buzz/docs/usage/live_recording",draft:!1,tags:[],version:"current",sidebarPosition:2,frontMatter:{title:"Live Recording"},sidebar:"tutorialSidebar",previous:{title:"File Import",permalink:"/buzz/docs/usage/file_import"},next:{title:"Translations",permalink:"/buzz/docs/usage/translations"}},p={},u=[{value:"Record audio playing from computer (macOS)",id:"record-audio-playing-from-computer-macos",level:3},{value:"Record audio playing from computer (Windows)",id:"record-audio-playing-from-computer-windows",level:3},{value:"Record audio playing from computer (Linux)",id:"record-audio-playing-from-computer-linux",level:3}],s={toc:u},c="wrapper";function d(e){let{components:t,...a}=e;return(0,r.kt)(c,(0,n.Z)({},s,a,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("p",null,"To start a live recording:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Select a recording task, language, quality, and microphone."),(0,r.kt)("li",{parentName:"ul"},"Click Record.")),(0,r.kt)("blockquote",null,(0,r.kt)("p",{parentName:"blockquote"},(0,r.kt)("strong",{parentName:"p"},"Note:")," Transcribing audio using the default Whisper model is resource-intensive. Consider using the Whisper.cpp.\nSince 1.3.0 it supports GPU acceleration, if the model fits in GPU memory. Use smaller models for real-time performance.")),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},"Field"),(0,r.kt)("th",{parentName:"tr",align:null},"Options"),(0,r.kt)("th",{parentName:"tr",align:null},"Default"),(0,r.kt)("th",{parentName:"tr",align:null},"Description"))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Task"),(0,r.kt)("td",{parentName:"tr",align:null},'"Transcribe", "Translate to English"'),(0,r.kt)("td",{parentName:"tr",align:null},'"Transcribe"'),(0,r.kt)("td",{parentName:"tr",align:null},'"Transcribe" converts the input audio into text in the selected language, while "Translate to English" converts it into text in English.')),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Language"),(0,r.kt)("td",{parentName:"tr",align:null},"See ",(0,r.kt)("a",{parentName:"td",href:"https://github.com/openai/whisper#available-models-and-languages"},"Whisper's documentation")," for the full list of supported languages"),(0,r.kt)("td",{parentName:"tr",align:null},'"Detect Language"'),(0,r.kt)("td",{parentName:"tr",align:null},'"Detect Language" will try to detect the spoken language in the audio based on the first few seconds. However, selecting a language is recommended (if known) as it will improve transcription quality in many cases.')),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Microphone"),(0,r.kt)("td",{parentName:"tr",align:null},"[Available system microphones]"),(0,r.kt)("td",{parentName:"tr",align:null},"[Default system microphone]"),(0,r.kt)("td",{parentName:"tr",align:null},"Microphone for recording input audio.")))),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"https://www.loom.com/share/564b753eb4d44b55b985b8abd26b55f7",title:"Live Recording on Buzz"},(0,r.kt)("img",{parentName:"a",src:"https://cdn.loom.com/sessions/thumbnails/564b753eb4d44b55b985b8abd26b55f7-with-play.gif",alt:"Live Recording on Buzz"}))),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Presentation Window")," Since 1.4.2 Buzz has an easy to use presentation window you can use to show live transcriptions during events and presentations. To open it start the recording and new options for the ",(0,r.kt)("inlineCode",{parentName:"p"},"Presentation window")," will appear."),(0,r.kt)("h3",{id:"record-audio-playing-from-computer-macos"},"Record audio playing from computer (macOS)"),(0,r.kt)("p",null,"To record audio playing from an application on your computer, you may install an audio loopback driver (a program that\nlets you create virtual audio devices). The rest of this guide will\nuse ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/ExistentialAudio/BlackHole"},"BlackHole")," on Mac, but you can use other alternatives for your\noperating system (\nsee ",(0,r.kt)("a",{parentName:"p",href:"https://nerds.de/en/loopbeaudio.html"},"LoopBeAudio"),", ",(0,r.kt)("a",{parentName:"p",href:"https://rogueamoeba.com/loopback/"},"LoopBack"),",\nand ",(0,r.kt)("a",{parentName:"p",href:"https://vac.muzychenko.net/en/"},"Virtual Audio Cable"),")."),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Install ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/ExistentialAudio/BlackHole#option-2-install-via-homebrew"},"BlackHole via Homebrew")),(0,r.kt)("pre",{parentName:"li"},(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"brew install blackhole-2ch\n"))),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Open Audio MIDI Setup from Spotlight or from ",(0,r.kt)("inlineCode",{parentName:"p"},"/Applications/Utilities/Audio Midi Setup.app"),"."),(0,r.kt)("p",{parentName:"li"},(0,r.kt)("img",{parentName:"p",src:"https://existential.audio/howto/img/spotlight.png",alt:"Open Audio MIDI Setup from Spotlight"}))),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Click the '+' icon at the lower left corner and select 'Create Multi-Output Device'."),(0,r.kt)("p",{parentName:"li"},(0,r.kt)("img",{parentName:"p",src:"https://existential.audio/howto/img/createmulti-output.png",alt:"Create multi-output device"}))),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Add your default speaker and BlackHole to the multi-output device."),(0,r.kt)("p",{parentName:"li"},(0,r.kt)("img",{parentName:"p",src:"https://existential.audio/howto/img/multi-output.png",alt:"Screenshot of multi-output device"}))),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Select this multi-output device as your speaker (application or system-wide) to play audio into BlackHole.")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Open Buzz, select BlackHole as your microphone, and record as before to see transcriptions from the audio playing\nthrough BlackHole."))),(0,r.kt)("h3",{id:"record-audio-playing-from-computer-windows"},"Record audio playing from computer (Windows)"),(0,r.kt)("p",null,"To transcribe system audio you need to configure virtual audio device and connect output from the applications you whant to transcribe to this virtual speaker. After that you can select it as source in the Buzz."),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},"Install ",(0,r.kt)("a",{parentName:"p",href:"https://vb-audio.com/Cable/"},"VB CABLE")," as virtual audio device. ")),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},'Configure using Windows Sound settings. Right-click on the speaker icon in the system tray and select "Open Sound settings". In the "Choose your output device" dropdown select "CABLE Input" to send all system sound to the virtual device or use "Advanced sound options" to select application that will output their sound to this device.'))),(0,r.kt)("h3",{id:"record-audio-playing-from-computer-linux"},"Record audio playing from computer (Linux)"),(0,r.kt)("p",null,"As described on ",(0,r.kt)("a",{parentName:"p",href:"https://wiki.ubuntu.com/record_system_sound"},"Ubuntu Wiki")," on any Linux with pulse audio you can redirect application audio to a virtual speaker. After that you can select it as source in Buzz."),(0,r.kt)("p",null,"Overall steps:"),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},"Launch application that will produce the sound you want to transcribe and start the playback. For example start a video in a media player. "),(0,r.kt)("li",{parentName:"ol"},"Launch Buzz and open Live recording screen, so you see the settings."),(0,r.kt)("li",{parentName:"ol"},"Configure sound routing from the application you want to transcribe sound from to Buzz in ",(0,r.kt)("inlineCode",{parentName:"li"},"Recording tab")," of the PulseAudio Volume Control (",(0,r.kt)("inlineCode",{parentName:"li"},"pavucontrol"),").")))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.02a8de98.js b/assets/js/runtime~main.02a8de98.js new file mode 100644 index 00000000..81d486d8 --- /dev/null +++ b/assets/js/runtime~main.02a8de98.js @@ -0,0 +1 @@ +(()=>{"use strict";var e,t,r,o,a,n={},c={};function d(e){var t=c[e];if(void 0!==t)return t.exports;var r=c[e]={id:e,loaded:!1,exports:{}};return n[e].call(r.exports,r,r.exports,d),r.loaded=!0,r.exports}d.m=n,d.c=c,e=[],d.O=(t,r,o,a)=>{if(!r){var n=1/0;for(b=0;b=a)&&Object.keys(d.O).every((e=>d.O[e](r[f])))?r.splice(f--,1):(c=!1,a0&&e[b-1][2]>a;b--)e[b]=e[b-1];e[b]=[r,o,a]},d.n=e=>{var t=e&&e.__esModule?()=>e.default:()=>e;return d.d(t,{a:t}),t},r=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,d.t=function(e,o){if(1&o&&(e=this(e)),8&o)return e;if("object"==typeof e&&e){if(4&o&&e.__esModule)return e;if(16&o&&"function"==typeof e.then)return e}var a=Object.create(null);d.r(a);var n={};t=t||[null,r({}),r([]),r(r)];for(var c=2&o&&e;"object"==typeof c&&!~t.indexOf(c);c=r(c))Object.getOwnPropertyNames(c).forEach((t=>n[t]=()=>e[t]));return n.default=()=>e,d.d(a,n),a},d.d=(e,t)=>{for(var r in t)d.o(t,r)&&!d.o(e,r)&&Object.defineProperty(e,r,{enumerable:!0,get:t[r]})},d.f={},d.e=e=>Promise.all(Object.keys(d.f).reduce(((t,r)=>(d.f[r](e,t),t)),[])),d.u=e=>"assets/js/"+({53:"935f2afb",150:"6ec58200",217:"3b8c55ea",237:"1df93b7f",382:"1102fda7",468:"1a20bc57",514:"1be78505",559:"36b603e5",598:"dbcee777",697:"409b9ecc",799:"2c266bbb",836:"0480b142",860:"3e407b54",884:"343de823",918:"17896441",958:"6dbc2e00",971:"c377a04b"}[e]||e)+"."+{53:"15830391",150:"8170897d",217:"cd236734",237:"71d7b441",382:"cd2bd517",468:"af8afb84",514:"131974b6",559:"6df0f804",598:"cca98005",697:"7778f743",799:"2d6b08c4",836:"6861ca9e",860:"987a7018",884:"d33c8b39",918:"0aaaf3b3",958:"24fcb0eb",971:"2f3d6e44",972:"d5cc17b1"}[e]+".js",d.miniCssF=e=>{},d.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),d.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),o={},a="docs:",d.l=(e,t,r,n)=>{if(o[e])o[e].push(t);else{var c,f;if(void 0!==r)for(var i=document.getElementsByTagName("script"),b=0;b{c.onerror=c.onload=null,clearTimeout(s);var a=o[e];if(delete o[e],c.parentNode&&c.parentNode.removeChild(c),a&&a.forEach((e=>e(r))),t)return t(r)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:c}),12e4);c.onerror=l.bind(null,c.onerror),c.onload=l.bind(null,c.onload),f&&document.head.appendChild(c)}},d.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},d.p="/buzz/",d.gca=function(e){return e={17896441:"918","935f2afb":"53","6ec58200":"150","3b8c55ea":"217","1df93b7f":"237","1102fda7":"382","1a20bc57":"468","1be78505":"514","36b603e5":"559",dbcee777:"598","409b9ecc":"697","2c266bbb":"799","0480b142":"836","3e407b54":"860","343de823":"884","6dbc2e00":"958",c377a04b:"971"}[e]||e,d.p+d.u(e)},(()=>{var e={303:0,532:0};d.f.j=(t,r)=>{var o=d.o(e,t)?e[t]:void 0;if(0!==o)if(o)r.push(o[2]);else if(/^(303|532)$/.test(t))e[t]=0;else{var a=new Promise(((r,a)=>o=e[t]=[r,a]));r.push(o[2]=a);var n=d.p+d.u(t),c=new Error;d.l(n,(r=>{if(d.o(e,t)&&(0!==(o=e[t])&&(e[t]=void 0),o)){var a=r&&("load"===r.type?"missing":r.type),n=r&&r.target&&r.target.src;c.message="Loading chunk "+t+" failed.\n("+a+": "+n+")",c.name="ChunkLoadError",c.type=a,c.request=n,o[1](c)}}),"chunk-"+t,t)}},d.O.j=t=>0===e[t];var t=(t,r)=>{var o,a,n=r[0],c=r[1],f=r[2],i=0;if(n.some((t=>0!==e[t]))){for(o in c)d.o(c,o)&&(d.m[o]=c[o]);if(f)var b=f(d)}for(t&&t(r);i{"use strict";var e,t,r,o,a,n={},c={};function f(e){var t=c[e];if(void 0!==t)return t.exports;var r=c[e]={id:e,loaded:!1,exports:{}};return n[e].call(r.exports,r,r.exports,f),r.loaded=!0,r.exports}f.m=n,f.c=c,e=[],f.O=(t,r,o,a)=>{if(!r){var n=1/0;for(b=0;b=a)&&Object.keys(f.O).every((e=>f.O[e](r[i])))?r.splice(i--,1):(c=!1,a0&&e[b-1][2]>a;b--)e[b]=e[b-1];e[b]=[r,o,a]},f.n=e=>{var t=e&&e.__esModule?()=>e.default:()=>e;return f.d(t,{a:t}),t},r=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,f.t=function(e,o){if(1&o&&(e=this(e)),8&o)return e;if("object"==typeof e&&e){if(4&o&&e.__esModule)return e;if(16&o&&"function"==typeof e.then)return e}var a=Object.create(null);f.r(a);var n={};t=t||[null,r({}),r([]),r(r)];for(var c=2&o&&e;"object"==typeof c&&!~t.indexOf(c);c=r(c))Object.getOwnPropertyNames(c).forEach((t=>n[t]=()=>e[t]));return n.default=()=>e,f.d(a,n),a},f.d=(e,t)=>{for(var r in t)f.o(t,r)&&!f.o(e,r)&&Object.defineProperty(e,r,{enumerable:!0,get:t[r]})},f.f={},f.e=e=>Promise.all(Object.keys(f.f).reduce(((t,r)=>(f.f[r](e,t),t)),[])),f.u=e=>"assets/js/"+({53:"935f2afb",150:"6ec58200",217:"3b8c55ea",237:"1df93b7f",382:"1102fda7",468:"1a20bc57",514:"1be78505",559:"36b603e5",598:"dbcee777",697:"409b9ecc",799:"2c266bbb",836:"0480b142",860:"3e407b54",884:"343de823",918:"17896441",958:"6dbc2e00",971:"c377a04b"}[e]||e)+"."+{53:"15830391",150:"8170897d",217:"cd236734",237:"71d7b441",382:"cd2bd517",468:"af8afb84",514:"131974b6",559:"6df0f804",598:"cca98005",697:"7778f743",799:"41936c9e",836:"6861ca9e",860:"987a7018",884:"d33c8b39",918:"0aaaf3b3",958:"24fcb0eb",971:"2f3d6e44",972:"d5cc17b1"}[e]+".js",f.miniCssF=e=>{},f.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),f.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),o={},a="docs:",f.l=(e,t,r,n)=>{if(o[e])o[e].push(t);else{var c,i;if(void 0!==r)for(var d=document.getElementsByTagName("script"),b=0;b{c.onerror=c.onload=null,clearTimeout(s);var a=o[e];if(delete o[e],c.parentNode&&c.parentNode.removeChild(c),a&&a.forEach((e=>e(r))),t)return t(r)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:c}),12e4);c.onerror=l.bind(null,c.onerror),c.onload=l.bind(null,c.onload),i&&document.head.appendChild(c)}},f.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},f.p="/buzz/",f.gca=function(e){return e={17896441:"918","935f2afb":"53","6ec58200":"150","3b8c55ea":"217","1df93b7f":"237","1102fda7":"382","1a20bc57":"468","1be78505":"514","36b603e5":"559",dbcee777:"598","409b9ecc":"697","2c266bbb":"799","0480b142":"836","3e407b54":"860","343de823":"884","6dbc2e00":"958",c377a04b:"971"}[e]||e,f.p+f.u(e)},(()=>{var e={303:0,532:0};f.f.j=(t,r)=>{var o=f.o(e,t)?e[t]:void 0;if(0!==o)if(o)r.push(o[2]);else if(/^(303|532)$/.test(t))e[t]=0;else{var a=new Promise(((r,a)=>o=e[t]=[r,a]));r.push(o[2]=a);var n=f.p+f.u(t),c=new Error;f.l(n,(r=>{if(f.o(e,t)&&(0!==(o=e[t])&&(e[t]=void 0),o)){var a=r&&("load"===r.type?"missing":r.type),n=r&&r.target&&r.target.src;c.message="Loading chunk "+t+" failed.\n("+a+": "+n+")",c.name="ChunkLoadError",c.type=a,c.request=n,o[1](c)}}),"chunk-"+t,t)}},f.O.j=t=>0===e[t];var t=(t,r)=>{var o,a,n=r[0],c=r[1],i=r[2],d=0;if(n.some((t=>0!==e[t]))){for(o in c)f.o(c,o)&&(f.m[o]=c[o]);if(i)var b=i(f)}for(t&&t(r);d Introduction | Buzz - + @@ -20,7 +20,7 @@ real-time, Whisper.cpp (with Vulkan GPU acceleration), Faster Whisper, Whisper-compatible Hugging Face models, and the OpenAI Whisper API
  • Command-Line Interface
  • Speech separation before transcription for better accuracy on noisy audio
  • Speaker identification in transcribed media
  • Available on Mac, Windows, and Linux
  • - + \ No newline at end of file diff --git a/docs/cli.html b/docs/cli.html index 7d3bb6ee..589301c6 100644 --- a/docs/cli.html +++ b/docs/cli.html @@ -4,13 +4,13 @@ CLI | Buzz - +

    CLI

    Commands

    add

    Start a new transcription task.

    Usage: buzz add [options] [file url file...]

    Options:
    -t, --task <task> The task to perform. Allowed: translate,
    transcribe. Default: transcribe.
    -m, --model-type <model-type> Model type. Allowed: whisper, whispercpp,
    huggingface, fasterwhisper, openaiapi. Default:
    whisper.
    -s, --model-size <model-size> Model size. Use only when --model-type is
    whisper, whispercpp, or fasterwhisper. Allowed:
    tiny, base, small, medium, large. Default:
    tiny.
    --hfid <id> Hugging Face model ID. Use only when
    --model-type is huggingface. Example:
    "openai/whisper-tiny"
    -l, --language <code> Language code. Allowed: af (Afrikaans), am
    (Amharic), ar (Arabic), as (Assamese), az
    (Azerbaijani), ba (Bashkir), be (Belarusian),
    bg (Bulgarian), bn (Bengali), bo (Tibetan), br
    (Breton), bs (Bosnian), ca (Catalan), cs
    (Czech), cy (Welsh), da (Danish), de (German),
    el (Greek), en (English), es (Spanish), et
    (Estonian), eu (Basque), fa (Persian), fi
    (Finnish), fo (Faroese), fr (French), gl
    (Galician), gu (Gujarati), ha (Hausa), haw
    (Hawaiian), he (Hebrew), hi (Hindi), hr
    (Croatian), ht (Haitian Creole), hu
    (Hungarian), hy (Armenian), id (Indonesian), is
    (Icelandic), it (Italian), ja (Japanese), jw
    (Javanese), ka (Georgian), kk (Kazakh), km
    (Khmer), kn (Kannada), ko (Korean), la (Latin),
    lb (Luxembourgish), ln (Lingala), lo (Lao), lt
    (Lithuanian), lv (Latvian), mg (Malagasy), mi
    (Maori), mk (Macedonian), ml (Malayalam), mn
    (Mongolian), mr (Marathi), ms (Malay), mt
    (Maltese), my (Myanmar), ne (Nepali), nl
    (Dutch), nn (Nynorsk), no (Norwegian), oc
    (Occitan), pa (Punjabi), pl (Polish), ps
    (Pashto), pt (Portuguese), ro (Romanian), ru
    (Russian), sa (Sanskrit), sd (Sindhi), si
    (Sinhala), sk (Slovak), sl (Slovenian), sn
    (Shona), so (Somali), sq (Albanian), sr
    (Serbian), su (Sundanese), sv (Swedish), sw
    (Swahili), ta (Tamil), te (Telugu), tg (Tajik),
    th (Thai), tk (Turkmen), tl (Tagalog), tr
    (Turkish), tt (Tatar), uk (Ukrainian), ur
    (Urdu), uz (Uzbek), vi (Vietnamese), yi
    (Yiddish), yo (Yoruba), zh (Chinese). Leave
    empty to detect language.
    -p, --prompt <prompt> Initial prompt.
    -w, --word-timestamps Generate word-level timestamps. (available since 1.2.0)
    -e, --extract-speech Extract speech from audio before transcribing. (available since 1.3.0)
    --openai-token <token> OpenAI access token. Use only when
    --model-type is openaiapi. Defaults to your
    previously saved access token, if one exists.
    --srt Output result in an SRT file.
    --vtt Output result in a VTT file.
    --txt Output result in a TXT file.
    --hide-gui Hide the main application window. (available since 1.2.0)
    -h, --help Displays help on commandline options.
    --help-all Displays help including Qt specific options.
    -v, --version Displays version information.

    Arguments:
    files or urls Input file paths or urls. Url import availalbe since 1.2.0.

    Examples:

    # Translate two MP3 files from French to English using OpenAI Whisper API
    buzz add --task translate --language fr --model-type openaiapi /Users/user/Downloads/1b3b03e4-8db5-ea2c-ace5-b71ff32e3304.mp3 /Users/user/Downloads/koaf9083k1lkpsfdi0.mp3

    # Transcribe an MP4 using Whisper.cpp "small" model and immediately export to SRT and VTT files
    buzz add --task transcribe --model-type whispercpp --model-size small --prompt "My initial prompt" --srt --vtt /Users/user/Downloads/buzz/1b3b03e4-8db5-ea2c-ace5-b71ff32e3304.mp4
    - + \ No newline at end of file diff --git a/docs/faq.html b/docs/faq.html index 7bb09981..a20f844c 100644 --- a/docs/faq.html +++ b/docs/faq.html @@ -4,7 +4,7 @@ FAQ | Buzz - + @@ -12,7 +12,7 @@

    FAQ

    1. Where are the models stored?

    The models are stored:

    • Linux: ~/.cache/Buzz
    • Mac OS: ~/Library/Caches/Buzz
    • Windows: %USERPROFILE%\AppData\Local\Buzz\Buzz\Cache

    Paste the location in your file manager to access the models or go to Help -> Preferences -> Models and click on Show file location button after downloading some model.

    2. What can I try if the transcription runs too slowly?

    Speech recognition requires large amount of computation, so one option is to try using a lower Whisper model size or using a Whisper.cpp model to run speech recognition of your computer. If you have access to a computer with GPU that has at least 6GB of VRAM you can try using the Faster Whisper model.

    Buzz also supports using OpenAI API to do speech recognition on a remote server. To use this feature you need to set OpenAI API key in Preferences. See Preferences section for more details.

    3. How to record system audio?

    To transcribe system audio you need to configure virtual audio device and connect output from the applications you want to transcribe to this virtual speaker. After that you can select it as source in the Buzz. See Usage section for more details.

    Relevant tools:

    4. What model should I use?

    Model size to use will depend on your hardware and use case. Smaller models will work faster but will have more inaccuracies. Larger models will be more accurate but will require more powerful hardware or longer time to transcribe.

    When choosing among large models consider the following. "Large" is the first released older model, "Large-V2" is later updated model with better accuracy, for some languages considered the most robust and stable. "Large-V3" is the latest model with the best accuracy in many cases, but some times can hallucinate or invent words that were never in the audio. "Turbo" model tries to get a good balance between speed and accuracy. The only sure way to know what model best suits your needs is to test them all in your language.

    In addition to choosing an appropriate model size you also can choose whisper type.

    • Whisper is initial OpenAI implementation, it is accurate but slow and requires a lot of RAM.
    • Faster Whisper is an optimized implementation, it is orders of magnitude faster than regular Whisper and requires less RAM. Use this option if you have an Nvidia GPU with at least 6GB of VRAM.
    • Whisper.cpp is optimized C++ implementation, it quite fast and efficient and will use any brand of GPU. Whisper.cpp is capable of running real time transcription even on a modern laptop with integrated GPU. It can also run on CPU only. Use this option if you do not have Nvidia GPU.
    • HuggingFace option is a Transformers implementation and is good in that it supports wide range of custom models that may be optimized for a particular language. This option also supports MMS family of models from Meta AI that support over 1000 of worlds languages as well as PEFT adjustments to Whisper models.

    5. How to get GPU acceleration for faster transcription?

    On Linux GPU acceleration is supported out of the box on Nvidia GPUs. If you still get any issues install CUDA 12, cuBLASS and cuDNN.

    On Windows GPU support is included in the installation .exe. CUDA 12 required, computers with older CUDA versions will use CPU. See this note on enabling CUDA GPU support.

    6. How to fix Unanticipated host error[PaErrorCode-9999]?

    Check if there are any system settings preventing apps from accessing the microphone.

    On Windows, see if Buzz has permission to use the microphone in Settings -> Privacy -> Microphone.

    See method 1 in this video https://www.youtube.com/watch?v=eRcCYgOuSYQ

    For method 2 there is no need to uninstall the antivirus, but see if you can temporarily disable it or if there are settings that may prevent Buzz from accessing the microphone.

    7. Can I use Buzz on a computer without internet?

    Yes, Buzz can be used without internet connection if you download the necessary models on some other computer that has the internet and manually move them to the offline computer. The easiest way to find where the models are stored is to go to Help -> Preferences -> Models. Then download some model, and push "Show file location" button. This will open the folder where the models are stored. Copy the models folder to the same location on the offline computer. F.e. for Linux it is .cache/Buzz/models in your home directory.

    8. Buzz crashes, what to do?

    If a model download was incomplete or corrupted, Buzz may crash. Try to delete the downloaded model files in Help -> Preferences -> Models and re-download them.

    If that does not help, check the log file for errors and report the issue so we can fix it. If possible attach the log file to the issue. Since Version 1.3.4, to get to the logs folder go to Help -> About Buzz and click on Show logs button.

    9. Where can I get latest development version?

    Latest development version will have latest bug fixes and most recent features. If you feel a bit adventurous it is recommended to try the latest development version as they needs some testing before they get released to everybody.

    • Linux users can get the latest version with this command sudo snap install buzz --edge

    • For other platforms do the following:

      1. Go to the build section
      2. Click on the link to the latest build, the most recent successful build entry in the list
      3. Scroll down to the artifacts section in the build page
      4. Download the installation file. Please note that you need to be logged in the Github to see the download links. Latest build example

    10. Why is my system theme not applied to Buzz installed from Flatpak?

    For dark themes on Gnome environments you may need to install gnome-themes-extra package and set the following preferences:

    gsettings set org.gnome.desktop.interface gtk-theme Adwaita-dark
    gsettings set org.gnome.desktop.interface color-scheme prefer-dark

    If your system theme is not applied to Buzz installed from Flatpak Linux app store, ensure the desired theme is in ~/.themes folder.

    You may need to copy the system themes to this folder cp -r /usr/share/themes/ ~/.themes/ and give Flatpaks access to this folder flatpak override --user --filesystem=~/.themes.

    On Fedora run the following to install the necessary packages sudo dnf install gnome-themes-extra qadwaitadecorations-qt{5,6} qt{5,6}-qtwayland

    - + \ No newline at end of file diff --git a/docs/installation.html b/docs/installation.html index a5775e4f..d6797b7f 100644 --- a/docs/installation.html +++ b/docs/installation.html @@ -4,14 +4,14 @@ Installation | Buzz - +

    Installation

    To install Buzz, download the latest version for your operating system. Buzz is available on Mac (Intel and Apple silicon), Windows, and Linux.

    macOS

    Download the .dmg from the SourceForge.

    Windows

    Get the installation files from the SourceForge.

    App is not signed, you will get a warning when you install it. Select More info -> Run anyway.

    Linux

    Buzz is available as a Flatpak or a Snap.

    To install flatpak, run:

    flatpak install flathub io.github.chidiwilliams.Buzz

    Download on Flathub

    To install snap, run:

    sudo apt-get install libportaudio2 libcanberra-gtk-module libcanberra-gtk3-module
    sudo snap install buzz
    sudo snap connect buzz:password-manager-service

    Get it from the Snap Store

    PyPI

    pip install buzz-captions
    python -m buzz

    On Linux install system dependencies you may be missing

    sudo apt-get install --no-install-recommends libyaml-dev libtbb-dev libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-shape0 libxcb-cursor0 libportaudio2 gettext libpulse0 ffmpeg

    On versions prior to Ubuntu 24.04 install sudo apt-get install --no-install-recommends libegl1-mesa

    - + \ No newline at end of file diff --git a/docs/preferences.html b/docs/preferences.html index 22755790..ba2a4279 100644 --- a/docs/preferences.html +++ b/docs/preferences.html @@ -4,7 +4,7 @@ Preferences | Buzz - + @@ -23,7 +23,7 @@ larger model. For example q_5 version. Whisper.cpp base models in d Increasing number of threads even more will lead in slower transcription time as results from parallel threads has to be combined to produce the final answer.

    BUZZ_TRANSLATION_API_BASE_URL - Base URL of OpenAI compatible API to use for translation.

    BUZZ_TRANSLATION_API_KEY - Api key of OpenAI compatible API to use for translation.

    BUZZ_MODEL_ROOT - Root directory to store model files. You may also want to set HF_HOME to the same folder as some libraries used in Buzz download their models independently. Defaults to user_cache_dir.

    BUZZ_FAVORITE_LANGUAGES - Coma separated list of supported language codes to show on top of language list.

    BUZZ_DOWNLOAD_COOKIEFILE - Location of a cookiefile to use for downloading private videos or as workaround for anti-bot protection.

    BUZZ_FORCE_CPU - Will force Buzz to use CPU and not GPU, useful for setups with older GPU if that is slower than GPU or GPU has issues. Example usage BUZZ_FORCE_CPU=true. Available since 1.2.1

    BUZZ_REDUCE_GPU_MEMORY - Will use 8bit quantization for Huggingface adn Faster Whisper transcriptions to reduce required GPU memory. Example usage BUZZ_REDUCE_GPU_MEMORY=true. Available since 1.4.0

    BUZZ_MERGE_REGROUP_RULE - Custom regroup merge rule to use when combining transcripts with word-level timings. More information on available options in stable-ts repo. Available since 1.3.0

    BUZZ_DISABLE_TELEMETRY - Buzz collects basic OS name and architecture usage statistics to better focus development efforts. This variable lets disable collection of these statistics. Example usage BUZZ_DISABLE_TELEMETRY=true. Available since 1.3.0

    BUZZ_UPLOAD_URL - Live recording transcripts and translations can be uploaded to a server for display on the web. Set this variable to the desired upload url. You can use buzz-transcription-server as a server. Buzz will upload the following json via POST requests - {"kind": "transcript", "text": "Sample transcript"} or {"kind": "translation", "text": "Sample translation"}. Example usage BUZZ_UPLOAD_URL=http://localhost:5000/upload. Available since 1.3.0

    Example of data collected by telemetry:

    Buzz: 1.3.0, locale: ('lv_LV', 'UTF-8'), system: Linux, release: 6.14.0-27-generic, machine: x86_64, version: #27~24.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue Jul 22 17:38:49 UTC 2,

    BUZZ_PARAGRAPH_SPLIT_TIME - Time in milliseconds of silence to split paragraphs in transcript and add two newlines when exporting the transcripts as text. Default is 2000 or 2 seconds. Available since 1.3.0

    - + \ No newline at end of file diff --git a/docs/usage/edit_and_resize.html b/docs/usage/edit_and_resize.html index f20c95e5..0b79e1d0 100644 --- a/docs/usage/edit_and_resize.html +++ b/docs/usage/edit_and_resize.html @@ -4,13 +4,13 @@ Edit and Resize | Buzz - +

    Edit and Resize

    Resize options

    When transcript of some audio or video file is generated you can edit it and export to different subtitle formats or plain text. Double-click the transcript in the list of transcripts to see additional options for editing and exporting.

    Transcription view screen has option to resize the transcripts. Click on the "Resize" button so see available options. Transcripts that have been generated with word-level timings setting enabled can be combined into subtitles specifying different options, like maximum length of a subtitle and if subtitles should be split on punctuation. For transcripts that have been generated without word-level timings setting enabled can only be recombined specifying desired max length of a subtitle.

    If audio file is still present on the system word-level timing merge will also analyze the audio for silences to improve subtitle accuracy. Subtitle generation from transcripts with word-level timings is available since version 1.3.0.

    The resize tool also has an option to extend end time of segments if you want the subtitles to be on the screen for longer. You can specify the amount of time in seconds to extend each subtitle segment. Buzz will add this amount of time to the end of each subtitle segment making sure that the end of a segment does not go over start of the next segment. This feature is available since 1.4.3.

    - + \ No newline at end of file diff --git a/docs/usage/file_import.html b/docs/usage/file_import.html index 4721587d..60af79e5 100644 --- a/docs/usage/file_import.html +++ b/docs/usage/file_import.html @@ -4,14 +4,14 @@ File Import | Buzz - +

    File Import

    To import a file:

    • Click Import Media File on the File menu (or the '+' icon on the toolbar, or Command/Ctrl + O).
    • Choose an audio or video file.
    • Select a task, language, and the model settings.
    • Click Run.
    • When the transcription status shows 'Completed', double-click on the row (or select the row and click the '⤢' icon) to open the transcription.

    Available options:

    To reduce misspellings you can pass some commonly misspelled words in an Initial prompt that is available under Advanced... button. See this guide on prompting.

    FieldOptionsDefaultDescription
    Export As"TXT", "SRT", "VTT""TXT"Export file format
    Word-Level TimingsOff / OnOffIf checked, the transcription will generate a separate subtitle line for each word in the audio. Combine words into subtitles afterwards with the resize option.
    Extract speechOff / OnOffIf checked, speech will be extracted to a separate audio tack to improve accuracy. Available since 1.3.0.

    (See the Live Recording section for more information about the task, language, and quality settings.)

    Media File Import on Buzz

    💡 Tip: It is recommended to always select language to transcribe to as automatic language detection may result in unexpected results.

    - + \ No newline at end of file diff --git a/docs/usage/live_recording.html b/docs/usage/live_recording.html index 46619094..cb212dc9 100644 --- a/docs/usage/live_recording.html +++ b/docs/usage/live_recording.html @@ -4,20 +4,21 @@ Live Recording | Buzz - +

    Live Recording

    To start a live recording:

    • Select a recording task, language, quality, and microphone.
    • Click Record.

    Note: Transcribing audio using the default Whisper model is resource-intensive. Consider using the Whisper.cpp. -Since 1.3.0 it supports GPU acceleration, if the model fits in GPU memory. Use smaller models for real-time performance.

    FieldOptionsDefaultDescription
    Task"Transcribe", "Translate to English""Transcribe""Transcribe" converts the input audio into text in the selected language, while "Translate to English" converts it into text in English.
    LanguageSee Whisper's documentation for the full list of supported languages"Detect Language""Detect Language" will try to detect the spoken language in the audio based on the first few seconds. However, selecting a language is recommended (if known) as it will improve transcription quality in many cases.
    Microphone[Available system microphones][Default system microphone]Microphone for recording input audio.

    Live Recording on Buzz

    Presentation Window Since 1.4.2 Buzz has an easy to use presentation window you can use to show live transcriptions during events and presentations. To open it start the recording and new options for the Presentation window will appear.

    Record audio playing from computer (macOS)

    To record audio playing from an application on your computer, you may install an audio loopback driver (a program that +It supports GPU acceleration, if the model fits in GPU memory. Use smaller models for real-time performance.

    FieldOptionsDefaultDescription
    Task"Transcribe", "Translate to English""Transcribe""Transcribe" converts the input audio into text in the selected language, while "Translate to English" converts it into text in English.
    LanguageSee Whisper's documentation for the full list of supported languages"Detect Language""Detect Language" will try to detect the spoken language in the audio based on the first few seconds. However, selecting a language is recommended (if known) as it will improve transcription quality in many cases.
    Microphone[Available system microphones][Default system microphone]Microphone for recording input audio.

    Live Recording on Buzz

    Advanced preferences

    Silence threshold Set threshold to for transcriptions to be processed. If average volume level is under this setting the sentence will not be transcribed. Available since 1.4.4. +Line separator Marking to add to the transcription and translation lines. Default value is two new lines (\n\n) that result in an empty space between translation or transcription lines. To have no empty line use \n. Available since 1.4.4.

    Presentation Window

    Since 1.4.2 Buzz has an easy to use presentation window you can use to show live transcriptions during events and presentations. To open it start the recording and new options for the Presentation window will appear.

    Record audio playing from computer (macOS)

    To record audio playing from an application on your computer, you may install an audio loopback driver (a program that lets you create virtual audio devices). The rest of this guide will use BlackHole on Mac, but you can use other alternatives for your operating system ( see LoopBeAudio, LoopBack, and Virtual Audio Cable).

    1. Install BlackHole via Homebrew

      brew install blackhole-2ch
    2. Open Audio MIDI Setup from Spotlight or from /Applications/Utilities/Audio Midi Setup.app.

      Open Audio MIDI Setup from Spotlight

    3. Click the '+' icon at the lower left corner and select 'Create Multi-Output Device'.

      Create multi-output device

    4. Add your default speaker and BlackHole to the multi-output device.

      Screenshot of multi-output device

    5. Select this multi-output device as your speaker (application or system-wide) to play audio into BlackHole.

    6. Open Buzz, select BlackHole as your microphone, and record as before to see transcriptions from the audio playing through BlackHole.

    Record audio playing from computer (Windows)

    To transcribe system audio you need to configure virtual audio device and connect output from the applications you whant to transcribe to this virtual speaker. After that you can select it as source in the Buzz.

    1. Install VB CABLE as virtual audio device.

    2. Configure using Windows Sound settings. Right-click on the speaker icon in the system tray and select "Open Sound settings". In the "Choose your output device" dropdown select "CABLE Input" to send all system sound to the virtual device or use "Advanced sound options" to select application that will output their sound to this device.

    Record audio playing from computer (Linux)

    As described on Ubuntu Wiki on any Linux with pulse audio you can redirect application audio to a virtual speaker. After that you can select it as source in Buzz.

    Overall steps:

    1. Launch application that will produce the sound you want to transcribe and start the playback. For example start a video in a media player.
    2. Launch Buzz and open Live recording screen, so you see the settings.
    3. Configure sound routing from the application you want to transcribe sound from to Buzz in Recording tab of the PulseAudio Volume Control (pavucontrol).
    - + \ No newline at end of file diff --git a/docs/usage/speaker_identification.html b/docs/usage/speaker_identification.html index 37a6547d..d258b122 100644 --- a/docs/usage/speaker_identification.html +++ b/docs/usage/speaker_identification.html @@ -4,13 +4,13 @@ Speaker identification | Buzz - +

    Speaker identification

    When transcript of some audio or video file is generated you can identify speakers in the transcript. Double-click the transcript in the list of transcripts to see additional options for editing and exporting.

    Transcription view screen has option to identify speakers. Click on the "Identify speakers" button so see available options.

    If audio file is still present on the system speaker identification will mark each speakers sentences with appropriate label. You can preview 10 seconds of some random sentence of the identified speaker and rename the automatically identified label to speakers real name. If "Merge speaker sentences" checkbox is selected when you save the speaker labels, all consecutive sentences of the same speaker will be merged into one segment. Speaker identification is available since version 1.4.0 on all platforms except Intel macOS.

    - + \ No newline at end of file diff --git a/docs/usage/transcription_viewer.html b/docs/usage/transcription_viewer.html index 762cf2c8..4a8bf4da 100644 --- a/docs/usage/transcription_viewer.html +++ b/docs/usage/transcription_viewer.html @@ -4,13 +4,13 @@ Transcription Viewer | Buzz - +

    Transcription Viewer

    The Buzz transcription viewer provides a powerful interface for reviewing, editing, and navigating through your transcriptions. This guide covers all the features available in the transcription viewer.

    Overview

    The transcription viewer is organized into several key sections:

    • Top Toolbar: Contains view mode, export, translate, resize, and search
    • Search Bar: Find and navigate through transcript text
    • Transcription Segments: Table view of all transcription segments with timestamps
    • Playback Controls: Audio playback settings and speed controls (since version 1.3.0)
    • Audio Player: Standard media player with progress bar
    • Current Segment Display: Shows the currently selected or playing segment

    Top Toolbar

    View Mode Button

    • Function: Switch between different viewing modes
    • Options:
      • Timestamps: Shows segments in a table format with start/end times
      • Text: Shows combined text without timestamps
      • Translation: Shows translated text (if available)

    Export Button

    • Function: Export transcription in various formats
    • Formats: SRT, VTT, TXT, JSON, and more
    • Usage: Click to open export menu and select desired format

    Translate Button

    • Function: Translate transcription to different languages
    • Usage: Click to open translation settings and start translation

    Resize Button

    • Function: Adjust transcription segment boundaries
    • Usage: Click to open resize dialog for fine-tuning timestamps
    • More information: See Edit and Resize section

    Playback Controls Button

    (since version 1.3.0)

    • Function: Show/hide playback control panel
    • Shortcut: Ctrl+Alt+P (Windows/Linux) or Cmd+Alt+P (macOS)
    • Behavior: Toggle button that shows/hides the playback controls below

    Find Button

    (since version 1.3.0)

    • Function: Show/hide search functionality
    • Shortcut: Ctrl+F (Windows/Linux) or Cmd+F (macOS)
    • Behavior: Toggle button that shows/hides the search bar

    Scroll to Current Button

    (since version 1.3.0)

    • Function: Automatically scroll to the currently playing text
    • Shortcut: Ctrl+G (Windows/Linux) or Cmd+G (macOS)
    • Usage: Click to jump to the current audio position in the transcript

    Search Functionality

    (since version 1.3.0)

    The search bar appears below the toolbar when activated and provides:

    • Search Input: Type text to find in the transcription (wider input field for better usability)
    • Navigation: Up/down arrows to move between matches
    • Status: Shows current match position and total matches (e.g., "3 of 15 matches")
    • Clear: Remove search text and results (larger button for better accessibility)
    • Results: Displays found text with context
    • Consistent Button Sizing: All navigation buttons have uniform height for better visual consistency

    Search Shortcuts

    • Ctrl+F / Cmd+F: Toggle search bar on/off
    • Enter: Find next match
    • Shift+Enter: Find previous match
    • Escape: Close search bar

    Search Features

    • Real-time Search: Results update as you type
    • Case-insensitive: Finds matches regardless of capitalization
    • Word Boundaries: Respects word boundaries for accurate matching
    • Cross-view Search: Works in all view modes (Timestamps, Text, Translation)

    Playback Controls

    (since version 1.3.0)

    Loop Segment

    • Function: Automatically loop playback of selected segments
    • Usage: Check the "Loop Segment" checkbox
    • Behavior: When enabled, clicking on a transcript segment will set a loop range
    • Visual Feedback: Loop range is highlighted in the audio player

    Follow Audio

    • Function: Automatically scroll to current audio position
    • Usage: Check the "Follow Audio" checkbox
    • Behavior: Transcript automatically follows the audio playback
    • Benefits: Easy to follow along with long audio files

    Speed Controls

    • Function: Adjust audio playback speed
    • Range: 0.5x to 2.0x speed
    • Controls:
      • Speed Dropdown: Select from preset speeds or enter custom value
      • Decrease Button (-): Reduce speed by 0.05x increments
      • Increase Button (+): Increase speed by 0.05x increments
    • Persistence: Speed setting is saved between sessions
    • Button Sizing: Speed control buttons match the size of search navigation buttons for visual consistency

    Keyboard Shortcuts

    (since version 1.3.0)

    Audio Playback

    • Ctrl+P / Cmd+P: Play/Pause audio
    • Ctrl+Shift+P / Cmd+Shift+P: Replay current segment from start

    Timestamp Adjustment

    • Ctrl+← / Cmd+←: Decrease segment start time by 0.5s
    • Ctrl+→ / Cmd+→: Increase segment start time by 0.5s
    • Ctrl+Shift+← / Cmd+Shift+←: Decrease segment end time by 0.5s
    • Ctrl+Shift+→ / Cmd+Shift+→: Increase segment end time by 0.5s
    • Ctrl+F / Cmd+F: Toggle search bar
    • Ctrl+Alt+P / Cmd+Alt+P: Toggle playback controls
    • Ctrl+G / Cmd+G: Scroll to current position
    • Ctrl+O / Cmd+O: Open file import dialog
    • Enter: Find next match
    • Shift+Enter: Find previous match
    • Escape: Close search bar
    - + \ No newline at end of file diff --git a/docs/usage/translations.html b/docs/usage/translations.html index 24f1fc1e..ff42b8ef 100644 --- a/docs/usage/translations.html +++ b/docs/usage/translations.html @@ -4,13 +4,13 @@ Translations | Buzz - +

    Translations

    Default Translation task uses Whisper model ability to translate to English, however Large-V3-Turbo is not compatible with this standard. Since version 1.0.0 Buzz supports additional AI translations to any other language.

    To use translation feature you will need to configure OpenAI API key and translation settings. Set OpenAI API ket in Preferences. Buzz also supports custom locally running translation AIs that support OpenAI API. For more information on locally running AIs see ollama or LM Studio. For information on available custom APIs see this discussion thread.

    To configure translation for Live recordings enable it in Advances settings dialog of the Live Recording settings. Enter AI model to use and prompt with instructions for the AI on how to translate. Translation option is also available for files that already have speech recognised. Use Translate button on transcription viewer toolbar.

    For AI to know how to translate enter translation instructions in the "Instructions for AI" section. In your instructions you should describe to what language you want it to translate the text to. Also, you may need to add additional instructions to not add any notes or comments as AIs tend to add them. Example instructions to translate English subtitles to Spanish:

    You are a professional translator, skilled in translating English to Spanish. You will only translate each sentence sent to you into Spanish and not add any notes or comments.

    If you enable "Enable live recording transcription export" in Preferences, Live text transcripts will be exported to a text file as they get generated and translated. This file can be used to further integrate Live transcripts with other applications like OBS Studio.

    Approximate cost of translation for 1 hour long audio with ChatGPT gpt-4o model is around $0.50.

    - + \ No newline at end of file diff --git a/index.html b/index.html index 5a12e530..46467409 100644 --- a/index.html +++ b/index.html @@ -4,13 +4,13 @@ Buzz - +
    - + \ No newline at end of file