kyutai/scripts/transcribe_from_mic_via_rust_server.py

136 lines
4.5 KiB
Python
Raw Normal View History

# /// script
# requires-python = ">=3.12"
# dependencies = [
# "msgpack",
# "numpy",
# "sounddevice",
# "websockets",
# ]
# ///
import argparse
import asyncio
import signal
2025-06-25 08:50:14 +00:00
import msgpack
import numpy as np
import sounddevice as sd
import websockets
2025-06-25 17:08:35 +00:00
SAMPLE_RATE = 24000
2025-06-25 17:08:35 +00:00
# The VAD has several prediction heads, each of which tries to determine whether there
# has been a pause of a given length. The lengths are 0.5, 1.0, 2.0, and 3.0 seconds.
# Lower indices predict pauses more aggressively. In Unmute, we use 2.0 seconds = index 2.
PAUSE_PREDICTION_HEAD_INDEX = 2
2025-06-25 08:50:14 +00:00
2025-06-25 17:08:35 +00:00
async def receive_messages(websocket, show_vad: bool = False):
"""Receive and process messages from the WebSocket server."""
try:
2025-06-25 17:08:35 +00:00
speech_started = False
async for message in websocket:
data = msgpack.unpackb(message, raw=False)
2025-06-25 17:08:35 +00:00
# The Step message only gets sent if the model has semantic VAD available
if data["type"] == "Step" and show_vad:
pause_prediction = data["prs"][PAUSE_PREDICTION_HEAD_INDEX]
if pause_prediction > 0.5 and speech_started:
print("| ", end="", flush=True)
speech_started = False
elif data["type"] == "Word":
print(data["text"], end=" ", flush=True)
2025-06-25 17:08:35 +00:00
speech_started = True
except websockets.ConnectionClosed:
print("Connection closed while receiving messages.")
2025-06-25 15:54:30 +00:00
async def send_messages(websocket, audio_queue):
"""Send audio data from microphone to WebSocket server."""
try:
# Start by draining the queue to avoid lags
while not audio_queue.empty():
await audio_queue.get()
2025-06-25 17:08:35 +00:00
print("Starting the transcription")
2025-06-25 17:08:35 +00:00
while True:
audio_data = await audio_queue.get()
chunk = {"type": "Audio", "pcm": [float(x) for x in audio_data]}
msg = msgpack.packb(chunk, use_bin_type=True, use_single_float=True)
await websocket.send(msg)
2025-06-25 17:08:35 +00:00
except websockets.ConnectionClosed:
print("Connection closed while sending messages.")
2025-06-25 08:50:14 +00:00
2025-06-25 17:08:35 +00:00
async def stream_audio(url: str, api_key: str, show_vad: bool):
"""Stream audio data to a WebSocket server."""
print("Starting microphone recording...")
print("Press Ctrl+C to stop recording")
2025-06-25 15:54:30 +00:00
audio_queue = asyncio.Queue()
loop = asyncio.get_event_loop()
2025-06-25 08:50:14 +00:00
def audio_callback(indata, frames, time, status):
2025-06-25 08:50:14 +00:00
loop.call_soon_threadsafe(
audio_queue.put_nowait, indata[:, 0].astype(np.float32).copy()
)
2025-06-25 08:50:14 +00:00
# Start audio stream
with sd.InputStream(
2025-06-25 17:08:35 +00:00
samplerate=SAMPLE_RATE,
channels=1,
2025-06-25 08:50:14 +00:00
dtype="float32",
callback=audio_callback,
2025-06-25 08:50:14 +00:00
blocksize=1920, # 80ms blocks
):
headers = {"kyutai-api-key": api_key}
2025-06-25 15:54:30 +00:00
# Instead of using the header, you can authenticate by adding `?auth_id={api_key}` to the URL
async with websockets.connect(url, additional_headers=headers) as websocket:
2025-06-25 15:54:30 +00:00
send_task = asyncio.create_task(send_messages(websocket, audio_queue))
2025-06-25 17:08:35 +00:00
receive_task = asyncio.create_task(
receive_messages(websocket, show_vad=show_vad)
)
await asyncio.gather(send_task, receive_task)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Real-time microphone transcription")
parser.add_argument(
"--url",
help="The URL of the server to which to send the audio",
default="ws://127.0.0.1:8080",
)
2025-06-25 15:54:30 +00:00
parser.add_argument("--api-key", default="public_token")
2025-06-25 08:50:14 +00:00
parser.add_argument(
"--list-devices", action="store_true", help="List available audio devices"
)
parser.add_argument(
"--device", type=int, help="Input device ID (use --list-devices to see options)"
)
2025-06-25 17:08:35 +00:00
parser.add_argument(
"--show-vad",
action="store_true",
help="Visualize the predictions of the semantic voice activity detector with a '|' symbol",
)
2025-06-25 08:50:14 +00:00
args = parser.parse_args()
2025-06-25 08:50:14 +00:00
def handle_sigint(signum, frame):
2025-06-25 17:08:35 +00:00
print("Interrupted by user") # Don't complain about KeyboardInterrupt
exit(0)
signal.signal(signal.SIGINT, handle_sigint)
if args.list_devices:
print("Available audio devices:")
print(sd.query_devices())
exit(0)
2025-06-25 08:50:14 +00:00
if args.device is not None:
sd.default.device[0] = args.device # Set input device
2025-06-25 08:50:14 +00:00
url = f"{args.url}/api/asr-streaming"
2025-06-25 17:08:35 +00:00
asyncio.run(stream_audio(url, args.api_key, args.show_vad))