import gradio as gr import spaces import torch import soundfile as sf import numpy as np import librosa import math from transformers import MoonshineForConditionalGeneration, AutoProcessor device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny").to(device).to(torch_dtype) processor = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny") TOKENS_PER_SEC = 12.0 MIN_NEW_TOKENS = 48 MAX_NEW_TOKENS_CAP = 1600 @spaces.GPU def transcribe_audio(audio_file): if not audio_file: return "No audio provided." audio_array, sr = sf.read(audio_file) if audio_array.ndim > 1: audio_array = np.mean(audio_array, axis=1) target_sr = processor.feature_extractor.sampling_rate if sr != target_sr: audio_array = librosa.resample(audio_array, orig_sr=sr, target_sr=target_sr) inputs = processor(audio_array, sampling_rate=target_sr, return_tensors="pt") inputs = {k: v.to(device=device, dtype=torch_dtype) for k, v in inputs.items()} duration_sec = len(audio_array) / float(target_sr) max_new_tokens = min(MAX_NEW_TOKENS_CAP, max(MIN_NEW_TOKENS, int(math.ceil(duration_sec * TOKENS_PER_SEC)))) generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=max_new_tokens, no_repeat_ngram_size=4, repetition_penalty=1.05) return processor.decode(generated_ids[0], skip_special_tokens=True) theme = gr.themes.Ocean(primary_hue="indigo", secondary_hue="fuchsia", neutral_hue="slate").set(button_large_radius="*radius_sm") with gr.Blocks(theme=theme) as demo: gr.Markdown("## Moonshine Tiny STT - 27M Parameters") gr.HTML("""