• Y
  • List All
  • Feedback
    • This Project
    • All Projects
Profile Account settings Log out
  • Favorite
  • Project
  • All
Loading...
  • Log in
  • Sign up
yjyoon / whisper_server_speaches star
  • Project homeH
  • CodeC
  • IssueI
  • Pull requestP
  • Review R
  • MilestoneM
  • BoardB
  • Files
  • Commit
  • Branches
whisper_server_speachessrcspeachesgradio_app.py
Download as .zip file
File name
Commit message
Commit date
.github/workflows
fix: tests
01-13
configuration
feat: add instrumentation
2024-12-17
docs
docs: update
01-13
examples
rename to `speaches`
01-12
scripts
chore: misc changes
2024-10-03
src/speaches
fix: tests
01-13
tests
fix: tests
01-13
.dockerignore
fix: .dockerignore
01-12
.envrc
init
2024-05-20
.gitattributes
chore(deps): update pre-commit hook astral-sh/ruff-pre-commit to v0.7.2
2024-11-02
.gitignore
chore: update .gitignore
2024-07-03
.pre-commit-config.yaml
chore(deps): update pre-commit hook python-jsonschema/check-jsonschema to v0.31.0
01-12
Dockerfile
chore(deps): update ghcr.io/astral-sh/uv docker tag to v0.5.18
01-12
LICENSE
init
2024-05-20
README.md
docs: update
01-13
Taskfile.yaml
chore(deps): add portaudio to flake.nix
01-13
audio.wav
chore: update volume names and mount points
01-10
compose.cpu.yaml
rename to `speaches`
01-12
compose.cuda-cdi.yaml
rename to `speaches`
01-12
compose.cuda.yaml
rename to `speaches`
01-12
compose.observability.yaml
chore(deps): update otel/opentelemetry-collector-contrib docker tag to v0.117.0
01-12
compose.yaml
rename to `speaches`
01-12
flake.lock
deps: update flake
2024-11-01
flake.nix
chore(deps): add portaudio to flake.nix
01-13
mkdocs.yml
docs/fix: home page 404
01-13
pyproject.toml
chore(deps): upgrade ruff
01-13
renovate.json
feat: renovate handle pre-commit
2024-11-01
uv.lock
chore(deps): upgrade ruff
01-13
File name
Commit message
Commit date
routers
feat: add kokoro tts support (#230)
01-13
__init__.py
rename to `speaches`
01-12
api_types.py
feat: add kokoro tts support (#230)
01-13
asr.py
feat: add kokoro tts support (#230)
01-13
audio.py
feat: add kokoro tts support (#230)
01-13
config.py
deprecate `config.preload_models`
01-13
dependencies.py
feat: add kokoro tts support (#230)
01-13
gradio_app.py
docs: update
01-13
hf_utils.py
fix: tests
01-13
kokoro_utils.py
feat: add kokoro tts support (#230)
01-13
logger.py
rename to `speaches`
01-12
main.py
deprecate `config.preload_models`
01-13
model_manager.py
feat: add kokoro tts support (#230)
01-13
piper_utils.py
feat: add kokoro tts support (#230)
01-13
text_utils.py
feat: add kokoro tts support (#230)
01-13
text_utils_test.py
feat: add kokoro tts support (#230)
01-13
transcriber.py
feat: add kokoro tts support (#230)
01-13
Fedir Zadniprovskyi 01-13 3580f89 docs: update UNIX
Raw Open in browser Change history
from collections.abc import AsyncGenerator from pathlib import Path import platform import gradio as gr import httpx from httpx_sse import aconnect_sse from openai import AsyncOpenAI from speaches import kokoro_utils from speaches.api_types import Voice from speaches.config import Config, Task from speaches.routers.speech import ( MAX_SAMPLE_RATE, MIN_SAMPLE_RATE, SUPPORTED_RESPONSE_FORMATS, ) TRANSCRIPTION_ENDPOINT = "/v1/audio/transcriptions" TRANSLATION_ENDPOINT = "/v1/audio/translations" TIMEOUT_SECONDS = 180 TIMEOUT = httpx.Timeout(timeout=TIMEOUT_SECONDS) DEFAULT_TEXT = "A rainbow is an optical phenomenon caused by refraction, internal reflection and dispersion of light in water droplets resulting in a continuous spectrum of light appearing in the sky." # noqa: E501 # NOTE: `gr.Request` seems to be passed in as the last positional (not keyword) argument def base_url_from_gradio_req(request: gr.Request) -> str: # NOTE: `request.request.url` seems to always have a path of "/gradio_api/queue/join" assert request.request is not None return f"{request.request.url.scheme}://{request.request.url.netloc}" def http_client_from_gradio_req(request: gr.Request, config: Config) -> httpx.AsyncClient: base_url = base_url_from_gradio_req(request) return httpx.AsyncClient( base_url=base_url, timeout=TIMEOUT, headers={"Authorization": f"Bearer {config.api_key}"} if config.api_key else None, ) def openai_client_from_gradio_req(request: gr.Request, config: Config) -> AsyncOpenAI: base_url = base_url_from_gradio_req(request) return AsyncOpenAI(base_url=f"{base_url}/v1", api_key=config.api_key if config.api_key else "cant-be-empty") def create_gradio_demo(config: Config) -> gr.Blocks: # noqa: C901, PLR0915 async def whisper_handler( file_path: str, model: str, task: Task, temperature: float, stream: bool, request: gr.Request ) -> AsyncGenerator[str, None]: http_client = http_client_from_gradio_req(request, config) if task == Task.TRANSCRIBE: endpoint = TRANSCRIPTION_ENDPOINT elif task == Task.TRANSLATE: endpoint = TRANSLATION_ENDPOINT if stream: previous_transcription = "" async for transcription in streaming_audio_task(http_client, file_path, endpoint, temperature, model): previous_transcription += transcription yield previous_transcription else: yield await audio_task(http_client, file_path, endpoint, temperature, model) async def audio_task( http_client: httpx.AsyncClient, file_path: str, endpoint: str, temperature: float, model: str ) -> str: with Path(file_path).open("rb") as file: # noqa: ASYNC230 response = await http_client.post( endpoint, files={"file": file}, data={ "model": model, "response_format": "text", "temperature": temperature, }, ) response.raise_for_status() return response.text async def streaming_audio_task( http_client: httpx.AsyncClient, file_path: str, endpoint: str, temperature: float, model: str ) -> AsyncGenerator[str, None]: with Path(file_path).open("rb") as file: # noqa: ASYNC230 kwargs = { "files": {"file": file}, "data": { "response_format": "text", "temperature": temperature, "model": model, "stream": True, }, } async with aconnect_sse(http_client, "POST", endpoint, **kwargs) as event_source: async for event in event_source.aiter_sse(): yield event.data async def update_whisper_model_dropdown(request: gr.Request) -> gr.Dropdown: openai_client = openai_client_from_gradio_req(request, config) models = (await openai_client.models.list()).data model_names: list[str] = [model.id for model in models] assert config.whisper.model in model_names recommended_models = {model for model in model_names if model.startswith("Systran")} other_models = [model for model in model_names if model not in recommended_models] model_names = list(recommended_models) + other_models return gr.Dropdown( choices=model_names, label="Model", value=config.whisper.model, ) async def update_voices_and_language_dropdown(model_id: str | None, request: gr.Request) -> dict: params = httpx.QueryParams({"model_id": model_id}) http_client = http_client_from_gradio_req(request, config) res = (await http_client.get("/v1/audio/speech/voices", params=params)).raise_for_status() voice_ids = [Voice.model_validate(x).voice_id for x in res.json()] return { voice_dropdown: gr.update(choices=voice_ids, value=voice_ids[0]), language_dropdown: gr.update(visible=model_id == "hexgrad/Kokoro-82M"), } async def handle_audio_speech( text: str, model: str, voice: str, language: str | None, response_format: str, speed: float, sample_rate: int | None, request: gr.Request, ) -> Path: openai_client = openai_client_from_gradio_req(request, config) res = await openai_client.audio.speech.create( input=text, model=model, voice=voice, # pyright: ignore[reportArgumentType] response_format=response_format, # pyright: ignore[reportArgumentType] speed=speed, extra_body={"language": language, "sample_rate": sample_rate}, ) audio_bytes = res.response.read() file_path = Path(f"audio.{response_format}") with file_path.open("wb") as file: # noqa: ASYNC230 file.write(audio_bytes) return file_path with gr.Blocks(title="Speaches Playground") as demo: gr.Markdown("# Speaches Playground") gr.Markdown( "### Consider supporting the project by starring the [speaches-ai/speaches repository on GitHub](https://github.com/speaches-ai/speaches)." ) gr.Markdown("### Documentation Website: https://speaches-ai.github.io/speaches") gr.Markdown( "### For additional details regarding the parameters, see the [API Documentation](https://speaches-ai.github.io/speaches/api)" ) with gr.Tab(label="Speech-to-Text"): audio = gr.Audio(type="filepath") whisper_model_dropdown = gr.Dropdown( choices=[config.whisper.model], label="Model", value=config.whisper.model, ) task_dropdown = gr.Dropdown( choices=[task.value for task in Task], label="Task", value=Task.TRANSCRIBE, ) temperature_slider = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label="Temperature", value=0.0) stream_checkbox = gr.Checkbox(label="Stream", value=True) button = gr.Button("Generate") output = gr.Textbox() # NOTE: the inputs order must match the `whisper_handler` signature button.click( whisper_handler, [audio, whisper_model_dropdown, task_dropdown, temperature_slider, stream_checkbox], output, ) with gr.Tab(label="Text-to-Speech"): model_dropdown_choices = ["hexgrad/Kokoro-82M", "rhasspy/piper-voices"] if platform.machine() != "x86_64": model_dropdown_choices.remove("rhasspy/piper-voices") gr.Textbox("Speech generation using `rhasspy/piper-voices` model is only supported on x86_64 machines.") text = gr.Textbox(label="Input Text", value=DEFAULT_TEXT, lines=3) stt_model_dropdown = gr.Dropdown( choices=model_dropdown_choices, label="Model", value="hexgrad/Kokoro-82M", ) voice_dropdown = gr.Dropdown( choices=["af"], label="Voice", value="af", ) language_dropdown = gr.Dropdown( choices=kokoro_utils.LANGUAGES, label="Language", value="en-us", visible=True ) stt_model_dropdown.change( update_voices_and_language_dropdown, inputs=[stt_model_dropdown], outputs=[voice_dropdown, language_dropdown], ) response_fromat_dropdown = gr.Dropdown( choices=SUPPORTED_RESPONSE_FORMATS, label="Response Format", value="wav", ) speed_slider = gr.Slider(minimum=0.25, maximum=4.0, step=0.05, label="Speed", value=1.0) sample_rate_slider = gr.Number( minimum=MIN_SAMPLE_RATE, maximum=MAX_SAMPLE_RATE, label="Desired Sample Rate", info=""" Setting this will resample the generated audio to the desired sample rate. You may want to set this if you are going to use 'rhasspy/piper-voices' with voices of different qualities but want to keep the same sample rate. Default: None (No resampling) """, # noqa: E501 value=lambda: None, ) button = gr.Button("Generate Speech") output = gr.Audio(type="filepath") button.click( handle_audio_speech, [ text, stt_model_dropdown, voice_dropdown, language_dropdown, response_fromat_dropdown, speed_slider, sample_rate_slider, ], output, ) demo.load(update_whisper_model_dropdown, inputs=None, outputs=whisper_model_dropdown) demo.load( update_voices_and_language_dropdown, inputs=[stt_model_dropdown], outputs=[voice_dropdown, language_dropdown], ) return demo

          
        
    
    
Copyright Yona authors & © NAVER Corp. & NAVER LABS Supported by NAVER CLOUD PLATFORM

or
Sign in with github login with Google Sign in with Google
Reset password | Sign up