Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Upload 4 files
Browse files- AV_Huggy.png +0 -0
- README.md +5 -4
- app.py +71 -0
- requirements.txt +2 -0
AV_Huggy.png
ADDED
![]() |
README.md
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.25.2
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
|
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Blazing Fast Whisper
|
3 |
+
emoji: π
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
sdk_version: 5.25.2
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
11 |
+
short_description: Blazing Fast Whisper Deployed on HF Inference Endpoints
|
12 |
---
|
13 |
|
14 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from pathlib import Path
|
3 |
+
from httpx import AsyncClient
|
4 |
+
|
5 |
+
import gradio as gr
|
6 |
+
import numpy as np
|
7 |
+
from dotenv import load_dotenv
|
8 |
+
from fastrtc import (
|
9 |
+
AdditionalOutputs,
|
10 |
+
ReplyOnPause,
|
11 |
+
Stream,
|
12 |
+
audio_to_bytes,
|
13 |
+
get_turn_credentials_async,
|
14 |
+
get_turn_credentials,
|
15 |
+
)
|
16 |
+
from gradio.utils import get_space
|
17 |
+
|
18 |
+
cur_dir = Path(__file__).parent
|
19 |
+
|
20 |
+
load_dotenv()
|
21 |
+
|
22 |
+
|
23 |
+
client = AsyncClient()
|
24 |
+
|
25 |
+
|
26 |
+
async def transcribe(audio: tuple[int, np.ndarray], transcript: str):
|
27 |
+
response = await client.post(
|
28 |
+
url="https://douatiz8x2itm3yn.us-east-1.aws.endpoints.huggingface.cloud/api/v1/audio/transcriptions",
|
29 |
+
headers={"Authorization": f"Bearer {os.getenv('HF_TOKEN')}"},
|
30 |
+
files={"file": audio_to_bytes(audio)},
|
31 |
+
data={
|
32 |
+
"response_format": "text",
|
33 |
+
},
|
34 |
+
)
|
35 |
+
yield AdditionalOutputs(transcript + " " + response.text)
|
36 |
+
|
37 |
+
|
38 |
+
transcript = gr.Textbox(label="Transcript")
|
39 |
+
stream = Stream(
|
40 |
+
ReplyOnPause(transcribe),
|
41 |
+
modality="audio",
|
42 |
+
mode="send",
|
43 |
+
additional_inputs=[transcript],
|
44 |
+
additional_outputs=[transcript],
|
45 |
+
additional_outputs_handler=lambda a, b: b,
|
46 |
+
rtc_configuration=get_turn_credentials_async if get_space() else None,
|
47 |
+
server_rtc_configuration=get_turn_credentials(ttl=604_800),
|
48 |
+
concurrency_limit=20 if get_space() else None,
|
49 |
+
time_limit=300,
|
50 |
+
ui_args={"title": ""},
|
51 |
+
)
|
52 |
+
|
53 |
+
with gr.Blocks() as demo:
|
54 |
+
gr.HTML(
|
55 |
+
"""
|
56 |
+
<h1 style='text-align: center; display: flex; align-items: center; justify-content: center;'>
|
57 |
+
<img src="/gradio_api/file=AV_Huggy.png" alt="Streaming Huggy" style="height: 50px; margin-right: 10px"> Really Fast Whisper
|
58 |
+
</h1>
|
59 |
+
"""
|
60 |
+
)
|
61 |
+
gr.HTML(
|
62 |
+
"""
|
63 |
+
<h2 style='text-align: center'>
|
64 |
+
Powered by <a href="https://huggingface.co/hfendpoints/whisper-large-v3">HF Inference Endpoints</a> and <a href="https://fastrtc.org/">FastRTC</a>
|
65 |
+
</h2>
|
66 |
+
"""
|
67 |
+
)
|
68 |
+
stream.ui.render()
|
69 |
+
|
70 |
+
if __name__ == "__main__":
|
71 |
+
demo.launch(allowed_paths=["AV_Huggy.png"])
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
fastrtc[vad]==0.0.22.rc2
|
2 |
+
python-dotenv
|