• Y
  • List All
  • Feedback
    • This Project
    • All Projects
Profile Account settings Log out
  • Favorite
  • Project
  • All
Loading...
  • Log in
  • Sign up
yjyoon / whisper_server_speaches star
  • Project homeH
  • CodeC
  • IssueI
  • Pull requestP
  • Review R
  • MilestoneM
  • BoardB
  • Files
  • Commit
  • Branches
whisper_server_speachesspeachesasr.py
Download as .zip file
File name
Commit message
Commit date
speaches
chore: Dockerfile envs, log ws close, etc.
2024-05-20
tests
init
2024-05-20
.dockerignore
init
2024-05-20
.envrc
init
2024-05-20
.gitignore
init
2024-05-20
.pre-commit-config.yaml
init
2024-05-20
Dockerfile.cpu
chore: Dockerfile envs, log ws close, etc.
2024-05-20
Dockerfile.cuda
chore: Dockerfile envs, log ws close, etc.
2024-05-20
LICENSE
init
2024-05-20
README.md
docs: add WIP warning
2024-05-20
Taskfile.yaml
chore: Dockerfile envs, log ws close, etc.
2024-05-20
compose.yaml
init
2024-05-20
flake.lock
init
2024-05-20
flake.nix
init
2024-05-20
poetry.lock
init
2024-05-20
pyproject.toml
init
2024-05-20
File name
Commit message
Commit date
__init__.py
init
2024-05-20
asr.py
init
2024-05-20
audio.py
init
2024-05-20
config.py
chore: Dockerfile envs, log ws close, etc.
2024-05-20
core.py
init
2024-05-20
logger.py
init
2024-05-20
main.py
chore: Dockerfile envs, log ws close, etc.
2024-05-20
server_models.py
init
2024-05-20
transcriber.py
init
2024-05-20
Fedir Zadniprovskyi 2024-05-20 8ad3023 init UNIX
Raw Open in browser Change history
import asyncio import time from typing import Iterable from faster_whisper import transcribe from pydantic import BaseModel from speaches.audio import Audio from speaches.config import Language from speaches.core import Transcription, Word from speaches.logger import logger class TranscribeOpts(BaseModel): language: Language | None vad_filter: bool condition_on_previous_text: bool class FasterWhisperASR: def __init__( self, whisper: transcribe.WhisperModel, transcribe_opts: TranscribeOpts, ) -> None: self.whisper = whisper self.transcribe_opts = transcribe_opts def _transcribe( self, audio: Audio, prompt: str | None = None, ) -> tuple[Transcription, transcribe.TranscriptionInfo]: start = time.perf_counter() segments, transcription_info = self.whisper.transcribe( audio.data, initial_prompt=prompt, word_timestamps=True, **self.transcribe_opts.model_dump(), ) words = words_from_whisper_segments(segments) for word in words: word.offset(audio.start) transcription = Transcription(words) end = time.perf_counter() logger.info( f"Transcribed {audio} in {end - start:.2f} seconds. Prompt: {prompt}. Transcription: {transcription.text}" ) return (transcription, transcription_info) async def transcribe( self, audio: Audio, prompt: str | None = None, ) -> tuple[Transcription, transcribe.TranscriptionInfo]: """Wrapper around _transcribe so it can be used in async context""" # is this the optimal way to execute a blocking call in an async context? # TODO: verify performance when running inference on a CPU return await asyncio.get_running_loop().run_in_executor( None, self._transcribe, audio, prompt, ) def words_from_whisper_segments(segments: Iterable[transcribe.Segment]) -> list[Word]: words: list[Word] = [] for segment in segments: assert segment.words is not None words.extend( Word( start=word.start, end=word.end, text=word.word, probability=word.probability, ) for word in segment.words ) return words

          
        
    
    
Copyright Yona authors & © NAVER Corp. & NAVER LABS Supported by NAVER CLOUD PLATFORM

or
Sign in with github login with Google Sign in with Google
Reset password | Sign up