添加微信号:AIGC_Tech,公众号小助手会拉你进群!点击下方名片关注AIGC Studio公众号!
点击下方名片关注AIGC Studio公众号!获取最新AI前沿应用/AIGC实践教程!
在语音生成领域,开源模型正以迅猛之势崛起,为开发者与创作者带来前所未有的便利与可能。今天要给大家介绍的Maya1,堪称开源语音模型中的"王炸"选手!它独树一帜,在一个软件包中集成了20多种情绪、零样本语音设计、可用于生产的流媒体等强大功能,还拥有3B参数。更厉害的是,它对硬件要求友好,单GPU且显存16GB以上就能运行,延迟低于100毫秒。无论是创造独特声音,还是赋予情感标签,Maya1都能轻松搞定,性能还优于顶级专有模型,商用部署也不在话下,目前已经冲到了Huggingface热门榜Top3,一起来了解一下吧!
相关链接
网站: mayaresearch.ai Twitter/X: @mayaresearch_ai Hugging Face: maya-research 试用:https://www.mayaresearch.ai/studio/text-to-speech
Maya1 的独特之处:重要的语音设计特性
自然语言语音控制 像给配音演员做简要说明一样描述声音:无需复杂参数,无需训练数据,只需描述并生成即可。
用于表达性言语的内联情感标签 在文本中恰当的位置添加情感, 支持情绪 12 种以上情绪。
流媒体音频生成 采用 SNAC 神经编解码器的实时语音合成(~0.98 kbps)。非常适合:
语音助手 交互式人工智能代理 实时内容生成 游戏角色 播客和有声读物 生产就绪型基础设施
在单GPU上运行 vLLM 集成规模化 自动前缀缓存以提高效率 24 kHz 音频输出 兼容 WebAudio,可用于浏览器播放
使用教程
#!/usr/bin/env python3
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from snac import SNAC
import soundfile as sf
import numpy as np
CODE_START_TOKEN_ID = 128257
CODE_END_TOKEN_ID = 128258
CODE_TOKEN_OFFSET = 128266
SNAC_MIN_ID = 128266
SNAC_MAX_ID = 156937
SNAC_TOKENS_PER_FRAME = 7
SOH_ID = 128259
EOH_ID = 128260
SOA_ID = 128261
BOS_ID = 128000
TEXT_EOT_ID = 128009
def build_prompt(tokenizer, description: str, text: str) -> str:
"""Build formatted prompt for Maya1."""
soh_token = tokenizer.decode([SOH_ID])
eoh_token = tokenizer.decode([EOH_ID])
soa_token = tokenizer.decode([SOA_ID])
sos_token = tokenizer.decode([CODE_START_TOKEN_ID])
eot_token = tokenizer.decode([TEXT_EOT_ID])
bos_token = tokenizer.bos_token
formatted_text = f'<description="{description}"> {text}'
prompt = (
soh_token + bos_token + formatted_text + eot_token +
eoh_token + soa_token + sos_token
)
return prompt
def extract_snac_codes(token_ids: list) -> list:
"""Extract SNAC codes from generated tokens."""
try:
eos_idx = token_ids.index(CODE_END_TOKEN_ID)
except ValueError:
eos_idx = len(token_ids)
snac_codes = [
token_id for token_id in token_ids[:eos_idx]
if SNAC_MIN_ID <= token_id <= SNAC_MAX_ID
]
return snac_codes
def unpack_snac_from_7(snac_tokens: list) -> list:
"""Unpack 7-token SNAC frames to 3 hierarchical levels."""
if snac_tokens and snac_tokens[-1] == CODE_END_TOKEN_ID:
snac_tokens = snac_tokens[:-1]
frames = len(snac_tokens) // SNAC_TOKENS_PER_FRAME
snac_tokens = snac_tokens[:frames * SNAC_TOKENS_PER_FRAME]
if frames == 0:
return [[], [], []]
l1, l2, l3 = [], [], []
for i in range(frames):
slots = snac_tokens[i*7:(i+1)*7]
l1.append((slots[0] - CODE_TOKEN_OFFSET) % 4096)
l2.extend([
(slots[1] - CODE_TOKEN_OFFSET) % 4096,
(slots[4] - CODE_TOKEN_OFFSET) % 4096,
])
l3.extend([
(slots[2] - CODE_TOKEN_OFFSET) % 4096,
(slots[3] - CODE_TOKEN_OFFSET) % 4096,
(slots[5] - CODE_TOKEN_OFFSET) % 4096,
(slots[6] - CODE_TOKEN_OFFSET) % 4096,
])
return [l1, l2, l3]
def main():
# Load the best open source voice AI model
print("\n[1/3] Loading Maya1 model...")
model = AutoModelForCausalLM.from_pretrained(
"maya-research/maya1",
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True
)
tokenizer = AutoTokenizer.from_pretrained(
"maya-research/maya1",
trust_remote_code=True
)
print(f"Model loaded: {len(tokenizer)} tokens in vocabulary")
# Load SNAC audio decoder (24kHz)
print("\n[2/3] Loading SNAC audio decoder...")
snac_model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz").eval()
if torch.cuda.is_available():
snac_model = snac_model.to("cuda")
print("SNAC decoder loaded")
# Design your voice with natural language
description = "Realistic male voice in the 30s age with american accent. Normal pitch, warm timbre, conversational pacing."
text = "Hello! This is Maya1 <laugh_harder> the best open source voice AI model with emotions."
print("\n[3/3] Generating speech...")
print(f"Description: {description}")
print(f"Text: {text}")
# Create prompt with proper formatting
prompt = build_prompt(tokenizer, description, text)
# Debug: Show prompt details
print(f"\nPrompt preview (first 200 chars):")
print(f" {repr(prompt[:200])}")
print(f" Prompt length: {len(prompt)} chars")
# Generate emotional speech
inputs = tokenizer(prompt, return_tensors="pt")
print(f" Input token count: {inputs['input_ids'].shape[1]} tokens")
if torch.cuda.is_available():
inputs = {k: v.to("cuda") for k, v in inputs.items()}
with torch.inference_mode():
outputs = model.generate(
**inputs,
max_new_tokens=2048, # Increase to let model finish naturally
min_new_tokens=28, # At least 4 SNAC frames
temperature=0.4,
top_p=0.9,
repetition_penalty=1.1, # Prevent loops
do_sample=True,
eos_token_id=CODE_END_TOKEN_ID, # Stop at end of speech token
pad_token_id=tokenizer.pad_token_id,
)
# Extract generated tokens (everything after the input prompt)
generated_ids = outputs[0, inputs['input_ids'].shape[1]:].tolist()
print(f"Generated {len(generated_ids)} tokens")
# Debug: Check what tokens we got
print(f" First 20 tokens: {generated_ids[:20]}")
print(f" Last 20 tokens: {generated_ids[-20:]}")
# Check if EOS was generated
if CODE_END_TOKEN_ID in generated_ids:
eos_position = generated_ids.index(CODE_END_TOKEN_ID)
print(f" EOS token found at position {eos_position}/{len(generated_ids)}")
# Extract SNAC audio tokens
snac_tokens = extract_snac_codes(generated_ids)
print(f"Extracted {len(snac_tokens)} SNAC tokens")
# Debug: Analyze token types
snac_count = sum(1 for t in generated_ids if SNAC_MIN_ID <= t <= SNAC_MAX_ID)
other_count = sum(1 for t in generated_ids if t < SNAC_MIN_ID or t > SNAC_MAX_ID)
print(f" SNAC tokens in output: {snac_count}")
print(f" Other tokens in output: {other_count}")
# Check for SOS token
if CODE_START_TOKEN_ID in generated_ids:
sos_pos = generated_ids.index(CODE_START_TOKEN_ID)
print(f" SOS token at position: {sos_pos}")
else:
print(f" No SOS token found in generated output!")
if len(snac_tokens) < 7:
print("Error: Not enough SNAC tokens generated")
return
# Unpack SNAC tokens to 3 hierarchical levels
levels = unpack_snac_from_7(snac_tokens)
frames = len(levels[0])
print(f"Unpacked to {frames} frames")
print(f" L1: {len(levels[0])} codes")
print(f" L2: {len(levels[1])} codes")
print(f" L3: {len(levels[2])} codes")
# Convert to tensors
device = "cuda" if torch.cuda.is_available() else "cpu"
codes_tensor = [
torch.tensor(level, dtype=torch.long, device=device).unsqueeze(0)
for level in levels
]
# Generate final audio with SNAC decoder
print("\n[4/4] Decoding to audio...")
with torch.inference_mode():
z_q = snac_model.quantizer.from_codes(codes_tensor)
audio = snac_model.decoder(z_q)[0, 0].cpu().numpy()
# Trim warmup samples (first 2048 samples)
if len(audio) > 2048:
audio = audio[2048:]
duration_sec = len(audio) / 24000
print(f"Audio generated: {len(audio)} samples ({duration_sec:.2f}s)")
# Save your emotional voice output
output_file = "output.wav"
sf.write(output_file, audio, 24000)
print(f"\nVoice generated successfully!")
if __name__ == "__main__":
main()
方法概述
架构:用于语音的 3B 参数 Llama 主干网 预训练了一个3B 参数的仅解码器转换器(Llama 风格)来预测SNAC 神经编解码器标记,而不是原始波形。
为什么选择 SNAC?
多尺度分层结构(≈12/23/47 Hz)使自回归序列保持紧凑,从而实现 ~0.98 kbps 的实时流传输。
训练数据:是什么让我们的语音人工智能成为最佳选择
预训练:互联网规模的英语语音语料库,用于实现广泛的声学覆盖和自然的协同发音。
监督式微调:专有的、精心整理的录音室录音数据集,包含:
人工验证的语音描述
每个样本包含 20 多个情感标签
多口音英语覆盖
角色和角色变体
数据管道卓越性:
24 kHz 单声道重采样, 23 LUFS 归一化 VAD 静音修剪,持续时间限制为 (1-14 秒) 强制对齐 (MFA) 以确保短语边界清晰 MinHash-LSH 文本去重 Chromaprint音频去重 采用 7 标记帧打包的 SNAC 编码
感谢你看到这里,添加小助手 AIGC_Tech 加入官方 AIGC读者交流群,下方扫码加入 AIGC Studio 星球,获取前沿AI应用、AIGC实践教程、大厂面试经验、AI学习路线以及IT类入门到精通学习资料等,欢迎一起交流学习💗~
没有评论:
发表评论