• Y
  • List All
  • Feedback
    • This Project
    • All Projects
Profile Account settings Log out
  • Favorite
  • Project
  • All
Loading...
  • Log in
  • Sign up
yjyoon / whisper_server_speaches star
  • Project homeH
  • CodeC
  • IssueI
  • Pull requestP
  • Review R
  • MilestoneM
  • BoardB
  • Files
  • Commit
  • Branches
whisper_server_speachesfaster_whisper_servergradio_app.py
Download as .zip file
File name
Commit message
Commit date
.github/workflows
update pre-commit deps, replace custom pyright hook
2024-07-03
examples
chore: update docker tag to latest
2024-06-03
faster_whisper_server
chore: fix ruff errors
2024-07-03
tests
chore: fix ruff errors
2024-07-03
.dockerignore
chore: ignore .env
2024-05-27
.envrc
init
2024-05-20
.gitattributes
docs: add live-transcription demo
2024-05-28
.gitignore
chore: update .gitignore
2024-07-03
.pre-commit-config.yaml
update pre-commit deps, replace custom pyright hook
2024-07-03
Dockerfile.cpu
fix task enum vals, fix env var parsing, improve gradio, use uv in dockerfile
2024-06-23
Dockerfile.cuda
fix task enum vals, fix env var parsing, improve gradio, use uv in dockerfile
2024-06-23
LICENSE
init
2024-05-20
README.md
Update README.md
2024-06-26
Taskfile.yaml
switch to using uv
2024-07-03
audio.wav
docs: update README.md
2024-05-27
compose.yaml
chore: update docker tag to latest
2024-06-03
flake.lock
init
2024-05-20
flake.nix
switch to using uv
2024-07-03
lsyncd.conf
chore: add lsyncd config
2024-06-03
pyproject.toml
chore: fix ruff errors
2024-07-03
requirements-all.txt
switch to using uv
2024-07-03
requirements-dev.txt
switch to using uv
2024-07-03
requirements.txt
switch to using uv
2024-07-03
File name
Commit message
Commit date
__init__.py
chore: rename to 'faster-whisper-server'
2024-05-27
asr.py
chore: fix ruff errors
2024-07-03
audio.py
chore: fix ruff errors
2024-07-03
config.py
chore: fix ruff errors
2024-07-03
core.py
chore: fix ruff errors
2024-07-03
gradio_app.py
chore: fix ruff errors
2024-07-03
logger.py
chore: fix ruff errors
2024-07-03
main.py
chore: fix ruff errors
2024-07-03
server_models.py
chore: fix ruff errors
2024-07-03
transcriber.py
chore: fix ruff errors
2024-07-03
utils.py
chore: rename to 'faster-whisper-server'
2024-05-27
Fedir Zadniprovskyi 2024-07-03 88f0467 chore: fix ruff errors UNIX
Raw Open in browser Change history
from collections.abc import Generator import os import gradio as gr import httpx from httpx_sse import connect_sse from faster_whisper_server.config import Config, Task TRANSCRIPTION_ENDPOINT = "/v1/audio/transcriptions" TRANSLATION_ENDPOINT = "/v1/audio/translations" def create_gradio_demo(config: Config) -> gr.Blocks: host = os.getenv("UVICORN_HOST", "0.0.0.0") port = int(os.getenv("UVICORN_PORT", "8000")) # NOTE: worth looking into generated clients http_client = httpx.Client(base_url=f"http://{host}:{port}", timeout=None) def handler(file_path: str, model: str, task: Task, temperature: float, stream: bool) -> Generator[str, None, None]: if stream: previous_transcription = "" for transcription in transcribe_audio_streaming(file_path, task, temperature, model): previous_transcription += transcription yield previous_transcription else: yield transcribe_audio(file_path, task, temperature, model) def transcribe_audio(file_path: str, task: Task, temperature: float, model: str) -> str: if task == Task.TRANSCRIBE: endpoint = TRANSCRIPTION_ENDPOINT elif task == Task.TRANSLATE: endpoint = TRANSLATION_ENDPOINT with open(file_path, "rb") as file: response = http_client.post( endpoint, files={"file": file}, data={ "model": model, "response_format": "text", "temperature": temperature, }, ) response.raise_for_status() return response.text def transcribe_audio_streaming( file_path: str, task: Task, temperature: float, model: str ) -> Generator[str, None, None]: with open(file_path, "rb") as file: kwargs = { "files": {"file": file}, "data": { "response_format": "text", "temperature": temperature, "model": model, "stream": True, }, } endpoint = TRANSCRIPTION_ENDPOINT if task == Task.TRANSCRIBE else TRANSLATION_ENDPOINT with connect_sse(http_client, "POST", endpoint, **kwargs) as event_source: for event in event_source.iter_sse(): yield event.data def update_model_dropdown() -> gr.Dropdown: res = http_client.get("/v1/models") res_data = res.json() models: list[str] = [model["id"] for model in res_data] assert config.whisper.model in models recommended_models = {model for model in models if model.startswith("Systran")} other_models = [model for model in models if model not in recommended_models] models = list(recommended_models) + other_models return gr.Dropdown( # no idea why it's complaining choices=models, # pyright: ignore[reportArgumentType] label="Model", value=config.whisper.model, ) model_dropdown = gr.Dropdown( choices=[config.whisper.model], label="Model", value=config.whisper.model, ) task_dropdown = gr.Dropdown( choices=[task.value for task in Task], label="Task", value=Task.TRANSCRIBE, ) temperature_slider = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label="Temperature", value=0.0) stream_checkbox = gr.Checkbox(label="Stream", value=True) with gr.Interface( title="Whisper Playground", description="""Consider supporting the project by starring the <a href="https://github.com/fedirz/faster-whisper-server">repository on GitHub</a>.""", # noqa: E501 inputs=[ gr.Audio(type="filepath"), model_dropdown, task_dropdown, temperature_slider, stream_checkbox, ], fn=handler, outputs="text", ) as demo: demo.load(update_model_dropdown, inputs=None, outputs=model_dropdown) return demo

          
        
    
    
Copyright Yona authors & © NAVER Corp. & NAVER LABS Supported by NAVER CLOUD PLATFORM

or
Sign in with github login with Google Sign in with Google
Reset password | Sign up