• Y
  • List All
  • Feedback
    • This Project
    • All Projects
Profile Account settings Log out
  • Favorite
  • Project
  • All
Loading...
  • Log in
  • Sign up
yjyoon / whisper_server_speaches star
  • Project homeH
  • CodeC
  • IssueI
  • Pull requestP
  • Review R
  • MilestoneM
  • BoardB
  • Files
  • Commit
  • Branches
whisper_server_speachesspeachesserver_models.py
Download as .zip file
File name
Commit message
Commit date
.github/workflows
feat: add gha workflow for building and pushing docker images
2024-05-27
speaches
feat: support loading multiple models
2024-05-27
tests
refactor: simplify tests
2024-05-23
.dockerignore
feat: add gha workflow for building and pushing docker images
2024-05-27
.envrc
init
2024-05-20
.gitignore
feat: add gha workflow for building and pushing docker images
2024-05-27
.pre-commit-config.yaml
style: add ruff
2024-05-21
Dockerfile.cpu
build: docker don't install dev deps
2024-05-25
Dockerfile.cuda
fix: Dockerfile.cuda whisper model env
2024-05-26
LICENSE
init
2024-05-20
README.md
docs: add examples, roadmap, etc.
2024-05-21
Taskfile.yaml
fix: circular import
2024-05-26
compose.yaml
fix: docker multi-arch builds
2024-05-23
flake.lock
init
2024-05-20
flake.nix
feat: add gha workflow for building and pushing docker images
2024-05-27
poetry.lock
deps: add youtube-dl as dev dependency
2024-05-25
pyproject.toml
build: docker don't install dev deps
2024-05-25
File name
Commit message
Commit date
__init__.py
init
2024-05-20
asr.py
feat: further improve openai compatabilit + refactor
2024-05-25
audio.py
style: add ruff
2024-05-21
config.py
feat: support loading multiple models
2024-05-27
core.py
style: add ruff
2024-05-21
logger.py
init
2024-05-20
main.py
feat: support loading multiple models
2024-05-27
server_models.py
fix: circular import
2024-05-26
transcriber.py
init
2024-05-20
utils.py
feat: further improve openai compatabilit + refactor
2024-05-25
Fedir Zadniprovskyi 2024-05-26 aa5390b fix: circular import UNIX
Raw Open in browser Change history
from __future__ import annotations from faster_whisper.transcribe import Segment, TranscriptionInfo, Word from pydantic import BaseModel from speaches import utils from speaches.core import Transcription # https://platform.openai.com/docs/api-reference/audio/json-object class TranscriptionJsonResponse(BaseModel): text: str @classmethod def from_segments(cls, segments: list[Segment]) -> TranscriptionJsonResponse: return cls(text=utils.segments_text(segments)) @classmethod def from_transcription( cls, transcription: Transcription ) -> TranscriptionJsonResponse: return cls(text=transcription.text) class WordObject(BaseModel): start: float end: float word: str probability: float @classmethod def from_word(cls, word: Word) -> WordObject: return cls( start=word.start, end=word.end, word=word.word, probability=word.probability, ) class SegmentObject(BaseModel): id: int seek: int start: float end: float text: str tokens: list[int] temperature: float avg_logprob: float compression_ratio: float no_speech_prob: float @classmethod def from_segment(cls, segment: Segment) -> SegmentObject: return cls( id=segment.id, seek=segment.seek, start=segment.start, end=segment.end, text=segment.text, tokens=segment.tokens, temperature=segment.temperature, avg_logprob=segment.avg_logprob, compression_ratio=segment.compression_ratio, no_speech_prob=segment.no_speech_prob, ) # https://platform.openai.com/docs/api-reference/audio/verbose-json-object class TranscriptionVerboseJsonResponse(BaseModel): task: str = "transcribe" language: str duration: float text: str words: list[WordObject] segments: list[SegmentObject] @classmethod def from_segment( cls, segment: Segment, transcription_info: TranscriptionInfo ) -> TranscriptionVerboseJsonResponse: return cls( language=transcription_info.language, duration=segment.end - segment.start, text=segment.text, words=( [WordObject.from_word(word) for word in segment.words] if type(segment.words) == list else [] ), segments=[SegmentObject.from_segment(segment)], ) @classmethod def from_segments( cls, segments: list[Segment], transcription_info: TranscriptionInfo ) -> TranscriptionVerboseJsonResponse: return cls( language=transcription_info.language, duration=transcription_info.duration, text=utils.segments_text(segments), segments=[SegmentObject.from_segment(segment) for segment in segments], words=[ WordObject.from_word(word) for word in utils.words_from_segments(segments) ], ) @classmethod def from_transcription( cls, transcription: Transcription ) -> TranscriptionVerboseJsonResponse: return cls( language="english", # FIX: hardcoded duration=transcription.duration, text=transcription.text, words=[ WordObject( start=word.start, end=word.end, word=word.text, probability=word.probability, ) for word in transcription.words ], segments=[], # FIX: hardcoded )

          
        
    
    
Copyright Yona authors & © NAVER Corp. & NAVER LABS Supported by NAVER CLOUD PLATFORM

or
Sign in with github login with Google Sign in with Google
Reset password | Sign up