Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,12 +7,13 @@ import numpy as np
|
|
7 |
import requests
|
8 |
import os
|
9 |
|
|
|
10 |
ASR_MODEL_NAME = "openai/whisper-medium.en"
|
11 |
asr_pipe = pipeline(
|
12 |
task="automatic-speech-recognition",
|
13 |
model=ASR_MODEL_NAME,
|
14 |
chunk_length_s=30,
|
15 |
-
device='cuda',
|
16 |
)
|
17 |
|
18 |
all_special_ids = asr_pipe.tokenizer.all_special_ids
|
@@ -29,6 +30,8 @@ def _preload_and_load_models():
|
|
29 |
device_map="auto", # or.to('cuda')
|
30 |
) #.to('cuda') # Explicitly move to CUDA after loading
|
31 |
|
|
|
|
|
32 |
@spaces.GPU(required=True)
|
33 |
def process_audio(microphone, state, task="transcribe"):
|
34 |
if microphone is None:
|
|
|
7 |
import requests
|
8 |
import os
|
9 |
|
10 |
+
# Load Whisper model
|
11 |
ASR_MODEL_NAME = "openai/whisper-medium.en"
|
12 |
asr_pipe = pipeline(
|
13 |
task="automatic-speech-recognition",
|
14 |
model=ASR_MODEL_NAME,
|
15 |
chunk_length_s=30,
|
16 |
+
device='cuda' if torch.cuda.is_available() else 'cpu', # Use GPU if available
|
17 |
)
|
18 |
|
19 |
all_special_ids = asr_pipe.tokenizer.all_special_ids
|
|
|
30 |
device_map="auto", # or.to('cuda')
|
31 |
) #.to('cuda') # Explicitly move to CUDA after loading
|
32 |
|
33 |
+
_preload_and_load_models()
|
34 |
+
|
35 |
@spaces.GPU(required=True)
|
36 |
def process_audio(microphone, state, task="transcribe"):
|
37 |
if microphone is None:
|