Dominik Macháček 2024-04-17
better documentation, help message and logging prints
@54634f008ee362a3986327738407297766d9ce96
README.md
--- README.md
+++ README.md
@@ -183,7 +183,7 @@
 
 ### Server -- real-time from mic
 
-`whisper_online_server.py` has the same model options as `whisper_online.py`, plus `--host` and `--port` of the TCP connection. See help message (`-h` option).
+`whisper_online_server.py` has the same model options as `whisper_online.py`, plus `--host` and `--port` of the TCP connection and the `--warmup-file`. See the help message (`-h` option).
 
 Client example:
 
whisper_online.py
--- whisper_online.py
+++ whisper_online.py
@@ -625,7 +625,7 @@
     # load the audio into the LRU cache before we start the timer
     a = load_audio_chunk(audio_path,0,1)
 
-    # warm up the ASR, because the very first transcribe takes much more time than the other
+    # warm up the ASR because the very first transcribe takes much more time than the other
     asr.transcribe(a)
 
     beg = args.start_at
whisper_online_server.py
--- whisper_online_server.py
+++ whisper_online_server.py
@@ -10,8 +10,8 @@
 # server options
 parser.add_argument("--host", type=str, default='localhost')
 parser.add_argument("--port", type=int, default=43007)
-
-parser.add_argument("--warmup-file", type=str, dest="warmup_file")
+parser.add_argument("--warmup-file", type=str, dest="warmup_file", 
+        help="The path to a speech audio wav file to warm up Whisper so that the very first chunk processing is fast. It can be e.g. https://github.com/ggerganov/whisper.cpp/raw/master/samples/jfk.wav .")
 
 
 # options from whisper_online
@@ -41,19 +41,18 @@
     tokenizer = None
 online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec))
 
-
-
-if args.warmup_file and os.path.exists(args.warmup_file):
-    # load the audio into the LRU cache before we start the timer
-    a = load_audio_chunk(args.warmup_file,0,1)
-
-    # TODO: it should be tested whether it's meaningful
-    # warm up the ASR, because the very first transcribe takes much more time than the other
-    asr.transcribe(a)
+# warm up the ASR because the very first transcribe takes more time than the others. 
+# Test results in https://github.com/ufal/whisper_streaming/pull/81
+msg = "Whisper is not warmed up. The first chunk processing may take longer."
+if args.warmup_file:
+    if os.path.isfile(args.warmup_file):
+        a = load_audio_chunk(args.warmup_file,0,1)
+        asr.transcribe(a)
+        print("INFO: Whisper is warmed up.",file=sys.stderr)
+    else:
+        print("WARNING: The warm up file is not available. "+msg,file=sys.stderr)
 else:
-    print("Whisper is not warmed up",file=sys.stderr)
-
-
+    print("WARNING: " + msg, file=sys.stderr)
 
 
 ######### Server objects
Add a comment
List