更新
This commit is contained in:
13
models/CosyVoice/runtime/python/Dockerfile
Normal file
13
models/CosyVoice/runtime/python/Dockerfile
Normal file
@@ -0,0 +1,13 @@
|
||||
FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
WORKDIR /opt/CosyVoice
|
||||
|
||||
RUN sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
|
||||
RUN apt-get update -y
|
||||
RUN apt-get -y install git unzip git-lfs g++
|
||||
RUN git lfs install
|
||||
RUN git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git
|
||||
# here we use python==3.10 because we cannot find an image which have both python3.8 and torch2.0.1-cu118 installed
|
||||
RUN cd CosyVoice && pip3 install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ --trusted-host=mirrors.aliyun.com --no-cache-dir
|
||||
RUN cd CosyVoice/runtime/python/grpc && python3 -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. cosyvoice.proto
|
||||
92
models/CosyVoice/runtime/python/fastapi/client.py
Normal file
92
models/CosyVoice/runtime/python/fastapi/client.py
Normal file
@@ -0,0 +1,92 @@
|
||||
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import logging
|
||||
import requests
|
||||
import torch
|
||||
import torchaudio
|
||||
import numpy as np
|
||||
|
||||
|
||||
def main():
|
||||
url = "http://{}:{}/inference_{}".format(args.host, args.port, args.mode)
|
||||
if args.mode == 'sft':
|
||||
payload = {
|
||||
'tts_text': args.tts_text,
|
||||
'spk_id': args.spk_id
|
||||
}
|
||||
response = requests.request("GET", url, data=payload, stream=True)
|
||||
elif args.mode == 'zero_shot':
|
||||
payload = {
|
||||
'tts_text': args.tts_text,
|
||||
'prompt_text': args.prompt_text
|
||||
}
|
||||
files = [('prompt_wav', ('prompt_wav', open(args.prompt_wav, 'rb'), 'application/octet-stream'))]
|
||||
response = requests.request("GET", url, data=payload, files=files, stream=True)
|
||||
elif args.mode == 'cross_lingual':
|
||||
payload = {
|
||||
'tts_text': args.tts_text,
|
||||
}
|
||||
files = [('prompt_wav', ('prompt_wav', open(args.prompt_wav, 'rb'), 'application/octet-stream'))]
|
||||
response = requests.request("GET", url, data=payload, files=files, stream=True)
|
||||
else:
|
||||
payload = {
|
||||
'tts_text': args.tts_text,
|
||||
'spk_id': args.spk_id,
|
||||
'instruct_text': args.instruct_text
|
||||
}
|
||||
response = requests.request("GET", url, data=payload, stream=True)
|
||||
tts_audio = b''
|
||||
for r in response.iter_content(chunk_size=16000):
|
||||
tts_audio += r
|
||||
tts_speech = torch.from_numpy(np.array(np.frombuffer(tts_audio, dtype=np.int16))).unsqueeze(dim=0)
|
||||
logging.info('save response to {}'.format(args.tts_wav))
|
||||
torchaudio.save(args.tts_wav, tts_speech, target_sr)
|
||||
logging.info('get response')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--host',
|
||||
type=str,
|
||||
default='0.0.0.0')
|
||||
parser.add_argument('--port',
|
||||
type=int,
|
||||
default='50000')
|
||||
parser.add_argument('--mode',
|
||||
default='sft',
|
||||
choices=['sft', 'zero_shot', 'cross_lingual', 'instruct'],
|
||||
help='request mode')
|
||||
parser.add_argument('--tts_text',
|
||||
type=str,
|
||||
default='你好,我是通义千问语音合成大模型,请问有什么可以帮您的吗?')
|
||||
parser.add_argument('--spk_id',
|
||||
type=str,
|
||||
default='中文女')
|
||||
parser.add_argument('--prompt_text',
|
||||
type=str,
|
||||
default='希望你以后能够做的比我还好呦。')
|
||||
parser.add_argument('--prompt_wav',
|
||||
type=str,
|
||||
default='../../../asset/zero_shot_prompt.wav')
|
||||
parser.add_argument('--instruct_text',
|
||||
type=str,
|
||||
default='Theo \'Crimson\', is a fiery, passionate rebel leader. \
|
||||
Fights with fervor for justice, but struggles with impulsiveness.')
|
||||
parser.add_argument('--tts_wav',
|
||||
type=str,
|
||||
default='demo.wav')
|
||||
args = parser.parse_args()
|
||||
prompt_sr, target_sr = 16000, 22050
|
||||
main()
|
||||
95
models/CosyVoice/runtime/python/fastapi/server.py
Normal file
95
models/CosyVoice/runtime/python/fastapi/server.py
Normal file
@@ -0,0 +1,95 @@
|
||||
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import logging
|
||||
logging.getLogger('matplotlib').setLevel(logging.WARNING)
|
||||
from fastapi import FastAPI, UploadFile, Form, File
|
||||
from fastapi.responses import StreamingResponse
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
import uvicorn
|
||||
import numpy as np
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.append('{}/../../..'.format(ROOT_DIR))
|
||||
sys.path.append('{}/../../../third_party/Matcha-TTS'.format(ROOT_DIR))
|
||||
from cosyvoice.cli.cosyvoice import AutoModel
|
||||
from cosyvoice.utils.file_utils import load_wav
|
||||
|
||||
app = FastAPI()
|
||||
# set cross region allowance
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"])
|
||||
|
||||
|
||||
def generate_data(model_output):
|
||||
for i in model_output:
|
||||
tts_audio = (i['tts_speech'].numpy() * (2 ** 15)).astype(np.int16).tobytes()
|
||||
yield tts_audio
|
||||
|
||||
|
||||
@app.get("/inference_sft")
|
||||
@app.post("/inference_sft")
|
||||
async def inference_sft(tts_text: str = Form(), spk_id: str = Form()):
|
||||
model_output = cosyvoice.inference_sft(tts_text, spk_id)
|
||||
return StreamingResponse(generate_data(model_output))
|
||||
|
||||
|
||||
@app.get("/inference_zero_shot")
|
||||
@app.post("/inference_zero_shot")
|
||||
async def inference_zero_shot(tts_text: str = Form(), prompt_text: str = Form(), prompt_wav: UploadFile = File()):
|
||||
prompt_speech_16k = load_wav(prompt_wav.file, 16000)
|
||||
model_output = cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k)
|
||||
return StreamingResponse(generate_data(model_output))
|
||||
|
||||
|
||||
@app.get("/inference_cross_lingual")
|
||||
@app.post("/inference_cross_lingual")
|
||||
async def inference_cross_lingual(tts_text: str = Form(), prompt_wav: UploadFile = File()):
|
||||
prompt_speech_16k = load_wav(prompt_wav.file, 16000)
|
||||
model_output = cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k)
|
||||
return StreamingResponse(generate_data(model_output))
|
||||
|
||||
|
||||
@app.get("/inference_instruct")
|
||||
@app.post("/inference_instruct")
|
||||
async def inference_instruct(tts_text: str = Form(), spk_id: str = Form(), instruct_text: str = Form()):
|
||||
model_output = cosyvoice.inference_instruct(tts_text, spk_id, instruct_text)
|
||||
return StreamingResponse(generate_data(model_output))
|
||||
|
||||
|
||||
@app.get("/inference_instruct2")
|
||||
@app.post("/inference_instruct2")
|
||||
async def inference_instruct2(tts_text: str = Form(), instruct_text: str = Form(), prompt_wav: UploadFile = File()):
|
||||
prompt_speech_16k = load_wav(prompt_wav.file, 16000)
|
||||
model_output = cosyvoice.inference_instruct2(tts_text, instruct_text, prompt_speech_16k)
|
||||
return StreamingResponse(generate_data(model_output))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--port',
|
||||
type=int,
|
||||
default=50000)
|
||||
parser.add_argument('--model_dir',
|
||||
type=str,
|
||||
default='iic/CosyVoice2-0.5B',
|
||||
help='local path or modelscope repo id')
|
||||
args = parser.parse_args()
|
||||
cosyvoice = AutoModel(model_dir=args.model_dir)
|
||||
uvicorn.run(app, host="0.0.0.0", port=args.port)
|
||||
106
models/CosyVoice/runtime/python/grpc/client.py
Normal file
106
models/CosyVoice/runtime/python/grpc/client.py
Normal file
@@ -0,0 +1,106 @@
|
||||
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import sys
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.append('{}/../../..'.format(ROOT_DIR))
|
||||
sys.path.append('{}/../../../third_party/Matcha-TTS'.format(ROOT_DIR))
|
||||
import logging
|
||||
import argparse
|
||||
import torchaudio
|
||||
import cosyvoice_pb2
|
||||
import cosyvoice_pb2_grpc
|
||||
import grpc
|
||||
import torch
|
||||
import numpy as np
|
||||
from cosyvoice.utils.file_utils import load_wav
|
||||
|
||||
|
||||
def main():
|
||||
with grpc.insecure_channel("{}:{}".format(args.host, args.port)) as channel:
|
||||
stub = cosyvoice_pb2_grpc.CosyVoiceStub(channel)
|
||||
request = cosyvoice_pb2.Request()
|
||||
if args.mode == 'sft':
|
||||
logging.info('send sft request')
|
||||
sft_request = cosyvoice_pb2.sftRequest()
|
||||
sft_request.spk_id = args.spk_id
|
||||
sft_request.tts_text = args.tts_text
|
||||
request.sft_request.CopyFrom(sft_request)
|
||||
elif args.mode == 'zero_shot':
|
||||
logging.info('send zero_shot request')
|
||||
zero_shot_request = cosyvoice_pb2.zeroshotRequest()
|
||||
zero_shot_request.tts_text = args.tts_text
|
||||
zero_shot_request.prompt_text = args.prompt_text
|
||||
prompt_speech = load_wav(args.prompt_wav, 16000)
|
||||
zero_shot_request.prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
|
||||
request.zero_shot_request.CopyFrom(zero_shot_request)
|
||||
elif args.mode == 'cross_lingual':
|
||||
logging.info('send cross_lingual request')
|
||||
cross_lingual_request = cosyvoice_pb2.crosslingualRequest()
|
||||
cross_lingual_request.tts_text = args.tts_text
|
||||
prompt_speech = load_wav(args.prompt_wav, 16000)
|
||||
cross_lingual_request.prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
|
||||
request.cross_lingual_request.CopyFrom(cross_lingual_request)
|
||||
else:
|
||||
logging.info('send instruct request')
|
||||
instruct_request = cosyvoice_pb2.instructRequest()
|
||||
instruct_request.tts_text = args.tts_text
|
||||
instruct_request.spk_id = args.spk_id
|
||||
instruct_request.instruct_text = args.instruct_text
|
||||
request.instruct_request.CopyFrom(instruct_request)
|
||||
|
||||
response = stub.Inference(request)
|
||||
tts_audio = b''
|
||||
for r in response:
|
||||
tts_audio += r.tts_audio
|
||||
tts_speech = torch.from_numpy(np.array(np.frombuffer(tts_audio, dtype=np.int16))).unsqueeze(dim=0)
|
||||
logging.info('save response to {}'.format(args.tts_wav))
|
||||
torchaudio.save(args.tts_wav, tts_speech, target_sr)
|
||||
logging.info('get response')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--host',
|
||||
type=str,
|
||||
default='0.0.0.0')
|
||||
parser.add_argument('--port',
|
||||
type=int,
|
||||
default='50000')
|
||||
parser.add_argument('--mode',
|
||||
default='sft',
|
||||
choices=['sft', 'zero_shot', 'cross_lingual', 'instruct'],
|
||||
help='request mode')
|
||||
parser.add_argument('--tts_text',
|
||||
type=str,
|
||||
default='你好,我是通义千问语音合成大模型,请问有什么可以帮您的吗?')
|
||||
parser.add_argument('--spk_id',
|
||||
type=str,
|
||||
default='中文女')
|
||||
parser.add_argument('--prompt_text',
|
||||
type=str,
|
||||
default='希望你以后能够做的比我还好呦。')
|
||||
parser.add_argument('--prompt_wav',
|
||||
type=str,
|
||||
default='../../../asset/zero_shot_prompt.wav')
|
||||
parser.add_argument('--instruct_text',
|
||||
type=str,
|
||||
default='Theo \'Crimson\', is a fiery, passionate rebel leader. \
|
||||
Fights with fervor for justice, but struggles with impulsiveness.')
|
||||
parser.add_argument('--tts_wav',
|
||||
type=str,
|
||||
default='demo.wav')
|
||||
args = parser.parse_args()
|
||||
prompt_sr, target_sr = 16000, 22050
|
||||
main()
|
||||
43
models/CosyVoice/runtime/python/grpc/cosyvoice.proto
Normal file
43
models/CosyVoice/runtime/python/grpc/cosyvoice.proto
Normal file
@@ -0,0 +1,43 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package cosyvoice;
|
||||
option go_package = "protos/";
|
||||
|
||||
service CosyVoice{
|
||||
rpc Inference(Request) returns (stream Response) {}
|
||||
}
|
||||
|
||||
message Request{
|
||||
oneof RequestPayload {
|
||||
sftRequest sft_request = 1;
|
||||
zeroshotRequest zero_shot_request = 2;
|
||||
crosslingualRequest cross_lingual_request = 3;
|
||||
instructRequest instruct_request = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message sftRequest{
|
||||
string spk_id = 1;
|
||||
string tts_text = 2;
|
||||
}
|
||||
|
||||
message zeroshotRequest{
|
||||
string tts_text = 1;
|
||||
string prompt_text = 2;
|
||||
bytes prompt_audio = 3;
|
||||
}
|
||||
|
||||
message crosslingualRequest{
|
||||
string tts_text = 1;
|
||||
bytes prompt_audio = 2;
|
||||
}
|
||||
|
||||
message instructRequest{
|
||||
string tts_text = 1;
|
||||
string spk_id = 2;
|
||||
string instruct_text = 3;
|
||||
}
|
||||
|
||||
message Response{
|
||||
bytes tts_audio = 1;
|
||||
}
|
||||
90
models/CosyVoice/runtime/python/grpc/server.py
Normal file
90
models/CosyVoice/runtime/python/grpc/server.py
Normal file
@@ -0,0 +1,90 @@
|
||||
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import sys
|
||||
from concurrent import futures
|
||||
import argparse
|
||||
import cosyvoice_pb2
|
||||
import cosyvoice_pb2_grpc
|
||||
import logging
|
||||
logging.getLogger('matplotlib').setLevel(logging.WARNING)
|
||||
import grpc
|
||||
import torch
|
||||
import numpy as np
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
sys.path.append('{}/../../..'.format(ROOT_DIR))
|
||||
sys.path.append('{}/../../../third_party/Matcha-TTS'.format(ROOT_DIR))
|
||||
from cosyvoice.cli.cosyvoice import AutoModel
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG,
|
||||
format='%(asctime)s %(levelname)s %(message)s')
|
||||
|
||||
|
||||
class CosyVoiceServiceImpl(cosyvoice_pb2_grpc.CosyVoiceServicer):
|
||||
def __init__(self, args):
|
||||
self.cosyvoice = AutoModel(model_dir=args.model_dir)
|
||||
logging.info('grpc service initialized')
|
||||
|
||||
def Inference(self, request, context):
|
||||
if request.HasField('sft_request'):
|
||||
logging.info('get sft inference request')
|
||||
model_output = self.cosyvoice.inference_sft(request.sft_request.tts_text, request.sft_request.spk_id)
|
||||
elif request.HasField('zero_shot_request'):
|
||||
logging.info('get zero_shot inference request')
|
||||
prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(request.zero_shot_request.prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
|
||||
prompt_speech_16k = prompt_speech_16k.float() / (2**15)
|
||||
model_output = self.cosyvoice.inference_zero_shot(request.zero_shot_request.tts_text,
|
||||
request.zero_shot_request.prompt_text,
|
||||
prompt_speech_16k)
|
||||
elif request.HasField('cross_lingual_request'):
|
||||
logging.info('get cross_lingual inference request')
|
||||
prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(request.cross_lingual_request.prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
|
||||
prompt_speech_16k = prompt_speech_16k.float() / (2**15)
|
||||
model_output = self.cosyvoice.inference_cross_lingual(request.cross_lingual_request.tts_text, prompt_speech_16k)
|
||||
else:
|
||||
logging.info('get instruct inference request')
|
||||
model_output = self.cosyvoice.inference_instruct(request.instruct_request.tts_text,
|
||||
request.instruct_request.spk_id,
|
||||
request.instruct_request.instruct_text)
|
||||
|
||||
logging.info('send inference response')
|
||||
for i in model_output:
|
||||
response = cosyvoice_pb2.Response()
|
||||
response.tts_audio = (i['tts_speech'].numpy() * (2 ** 15)).astype(np.int16).tobytes()
|
||||
yield response
|
||||
|
||||
|
||||
def main():
|
||||
grpcServer = grpc.server(futures.ThreadPoolExecutor(max_workers=args.max_conc), maximum_concurrent_rpcs=args.max_conc)
|
||||
cosyvoice_pb2_grpc.add_CosyVoiceServicer_to_server(CosyVoiceServiceImpl(args), grpcServer)
|
||||
grpcServer.add_insecure_port('0.0.0.0:{}'.format(args.port))
|
||||
grpcServer.start()
|
||||
logging.info("server listening on 0.0.0.0:{}".format(args.port))
|
||||
grpcServer.wait_for_termination()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--port',
|
||||
type=int,
|
||||
default=50000)
|
||||
parser.add_argument('--max_conc',
|
||||
type=int,
|
||||
default=4)
|
||||
parser.add_argument('--model_dir',
|
||||
type=str,
|
||||
default='iic/CosyVoice2-0.5B',
|
||||
help='local path or modelscope repo id')
|
||||
args = parser.parse_args()
|
||||
main()
|
||||
Reference in New Issue
Block a user