Spaces:
Runtime error
Runtime error
Upload 17 files
Browse files- app.py +2 -9
- text/chinese_bert.py +2 -2
- text/english.py +2 -2
- text/english_bert_mock.py +2 -2
- text/japanese_bert.py +2 -2
app.py
CHANGED
|
@@ -362,7 +362,7 @@ if __name__ == "__main__":
|
|
| 362 |
)
|
| 363 |
speaker_ids = hps.data.spk2id
|
| 364 |
speakers = list(speaker_ids.keys())
|
| 365 |
-
languages = ["
|
| 366 |
with gr.Blocks() as app:
|
| 367 |
with gr.Row():
|
| 368 |
with gr.Column():
|
|
@@ -370,13 +370,6 @@ if __name__ == "__main__":
|
|
| 370 |
label="输入文本内容",
|
| 371 |
placeholder="""
|
| 372 |
目前只支持日语!!
|
| 373 |
-
如果你选择语言为\'mix\',必须按照格式输入,否则报错:
|
| 374 |
-
格式举例(zh是中文,jp是日语,不区分大小写;说话人举例:gongzi):
|
| 375 |
-
[说话人1]<zh>你好,こんにちは! <jp>こんにちは,世界。
|
| 376 |
-
[说话人2]<zh>你好吗?<jp>元気ですか?
|
| 377 |
-
[说话人3]<zh>谢谢。<jp>どういたしまして。
|
| 378 |
-
...
|
| 379 |
-
另外,所有的语言选项都可以用'|'分割长段实现分句生成。
|
| 380 |
""",
|
| 381 |
)
|
| 382 |
slicer = gr.Button("快速切分", variant="primary")
|
|
@@ -399,7 +392,7 @@ if __name__ == "__main__":
|
|
| 399 |
minimum=0.1, maximum=2, value=1.0, step=0.1, label="Length"
|
| 400 |
)
|
| 401 |
language = gr.Dropdown(
|
| 402 |
-
choices=languages, value=languages[
|
| 403 |
)
|
| 404 |
btn = gr.Button("生成音频!", variant="primary")
|
| 405 |
with gr.Column():
|
|
|
|
| 362 |
)
|
| 363 |
speaker_ids = hps.data.spk2id
|
| 364 |
speakers = list(speaker_ids.keys())
|
| 365 |
+
languages = ["JP"]
|
| 366 |
with gr.Blocks() as app:
|
| 367 |
with gr.Row():
|
| 368 |
with gr.Column():
|
|
|
|
| 370 |
label="输入文本内容",
|
| 371 |
placeholder="""
|
| 372 |
目前只支持日语!!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 373 |
""",
|
| 374 |
)
|
| 375 |
slicer = gr.Button("快速切分", variant="primary")
|
|
|
|
| 392 |
minimum=0.1, maximum=2, value=1.0, step=0.1, label="Length"
|
| 393 |
)
|
| 394 |
language = gr.Dropdown(
|
| 395 |
+
choices=languages, value=languages[0], label="Language"
|
| 396 |
)
|
| 397 |
btn = gr.Button("生成音频!", variant="primary")
|
| 398 |
with gr.Column():
|
text/chinese_bert.py
CHANGED
|
@@ -5,8 +5,8 @@ from transformers import AutoModelForMaskedLM, AutoTokenizer
|
|
| 5 |
|
| 6 |
from config import config
|
| 7 |
|
| 8 |
-
|
| 9 |
-
LOCAL_PATH = 'hfl/chinese-roberta-wwm-ext-large'
|
| 10 |
|
| 11 |
tokenizer = AutoTokenizer.from_pretrained(LOCAL_PATH)
|
| 12 |
|
|
|
|
| 5 |
|
| 6 |
from config import config
|
| 7 |
|
| 8 |
+
LOCAL_PATH = "./bert/chinese-roberta-wwm-ext-large"
|
| 9 |
+
#LOCAL_PATH = 'hfl/chinese-roberta-wwm-ext-large'
|
| 10 |
|
| 11 |
tokenizer = AutoTokenizer.from_pretrained(LOCAL_PATH)
|
| 12 |
|
text/english.py
CHANGED
|
@@ -10,8 +10,8 @@ current_file_path = os.path.dirname(__file__)
|
|
| 10 |
CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
|
| 11 |
CACHE_PATH = os.path.join(current_file_path, "cmudict_cache.pickle")
|
| 12 |
_g2p = G2p()
|
| 13 |
-
|
| 14 |
-
LOCAL_PATH = 'microsoft/deberta-v3-large'
|
| 15 |
tokenizer = DebertaV2Tokenizer.from_pretrained(LOCAL_PATH)
|
| 16 |
|
| 17 |
arpa = {
|
|
|
|
| 10 |
CMU_DICT_PATH = os.path.join(current_file_path, "cmudict.rep")
|
| 11 |
CACHE_PATH = os.path.join(current_file_path, "cmudict_cache.pickle")
|
| 12 |
_g2p = G2p()
|
| 13 |
+
LOCAL_PATH = "./bert/deberta-v3-large"
|
| 14 |
+
#LOCAL_PATH = 'microsoft/deberta-v3-large'
|
| 15 |
tokenizer = DebertaV2Tokenizer.from_pretrained(LOCAL_PATH)
|
| 16 |
|
| 17 |
arpa = {
|
text/english_bert_mock.py
CHANGED
|
@@ -6,8 +6,8 @@ from transformers import DebertaV2Model, DebertaV2Tokenizer
|
|
| 6 |
from config import config
|
| 7 |
|
| 8 |
|
| 9 |
-
|
| 10 |
-
LOCAL_PATH = 'microsoft/deberta-v2-xlarge'
|
| 11 |
|
| 12 |
tokenizer = DebertaV2Tokenizer.from_pretrained(LOCAL_PATH)
|
| 13 |
|
|
|
|
| 6 |
from config import config
|
| 7 |
|
| 8 |
|
| 9 |
+
LOCAL_PATH = "./bert/deberta-v3-large"
|
| 10 |
+
#LOCAL_PATH = 'microsoft/deberta-v2-xlarge'
|
| 11 |
|
| 12 |
tokenizer = DebertaV2Tokenizer.from_pretrained(LOCAL_PATH)
|
| 13 |
|
text/japanese_bert.py
CHANGED
|
@@ -6,8 +6,8 @@ from transformers import AutoModelForMaskedLM, AutoTokenizer
|
|
| 6 |
from config import config
|
| 7 |
from text.japanese import text2sep_kata
|
| 8 |
|
| 9 |
-
|
| 10 |
-
LOCAL_PATH = 'ku-nlp/deberta-v2-large-japanese-char-wwm'
|
| 11 |
|
| 12 |
tokenizer = AutoTokenizer.from_pretrained(LOCAL_PATH)
|
| 13 |
|
|
|
|
| 6 |
from config import config
|
| 7 |
from text.japanese import text2sep_kata
|
| 8 |
|
| 9 |
+
LOCAL_PATH = "./bert/deberta-v2-large-japanese-char-wwm"
|
| 10 |
+
#LOCAL_PATH = 'ku-nlp/deberta-v2-large-japanese-char-wwm'
|
| 11 |
|
| 12 |
tokenizer = AutoTokenizer.from_pretrained(LOCAL_PATH)
|
| 13 |
|