admin commited on
Commit
6b28b6a
·
1 Parent(s): 35c6ae5
Files changed (3) hide show
  1. README.md +7 -7
  2. app.py +126 -159
  3. requirements.txt +0 -2
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
- title: LLM APIs
3
- emoji: 💬🤖
4
- colorFrom: blue
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 5.23.1
8
  app_file: app.py
9
  pinned: false
10
- license: mit
11
- short_description: LLM API Aggregation Deployment
12
  ---
 
1
  ---
2
+ title: eLuvLetter
3
+ emoji: 💌
4
+ colorFrom: red
5
+ colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 5.22.0
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
+ short_description: eLuvLetter Custom Configurator
12
  ---
app.py CHANGED
@@ -1,165 +1,132 @@
1
  import os
 
 
2
  import gradio as gr
3
- from openai import OpenAI
4
-
5
-
6
- def predict(
7
- message,
8
- history,
9
- system_prompt,
10
- model,
11
- api_url,
12
- api_key,
13
- max_tk,
14
- temp,
15
- top_p,
16
- ):
17
- if not api_key:
18
- return "Please set valid api keys in settings first."
19
-
20
- # Format history with a given chat template
21
- msgs = [{"role": "system", "content": system_prompt}]
22
- for user, assistant in history:
23
- msgs.append({"role": "user", "content": user})
24
- msgs.append({"role": "system", "content": assistant})
25
-
26
- msgs.append({"role": "user", "content": message})
27
- try:
28
- client = OpenAI(api_key=api_key, base_url=api_url)
29
- response = client.chat.completions.create(
30
- model=model,
31
- messages=msgs,
32
- max_tokens=max_tk,
33
- temperature=temp,
34
- top_p=top_p,
35
- stream=False,
36
- ).to_dict()["choices"][0]["message"]["content"]
37
-
38
- except Exception as e:
39
- response = f"{e}"
40
-
41
- return response
42
-
43
-
44
- def deepseek(
45
- message,
46
- history,
47
- model,
48
- api_key,
49
- system_prompt,
50
- max_tk,
51
- temp,
52
- top_p,
53
- ):
54
- response = predict(
55
- message,
56
- history,
57
- system_prompt,
58
- model,
59
- "https://api.deepseek.com",
60
- api_key,
61
- max_tk,
62
- temp,
63
- top_p,
64
- )
65
- outputs = []
66
- for new_token in response:
67
- outputs.append(new_token)
68
- yield "".join(outputs)
69
-
70
-
71
- def kimi(
72
- message,
73
- history,
74
- model,
75
- api_key,
76
- system_prompt,
77
- max_tk,
78
- temp,
79
- top_p,
80
  ):
81
- response = predict(
82
- message,
83
- history,
84
- system_prompt,
85
- model,
86
- "https://api.moonshot.cn/v1",
87
- api_key,
88
- max_tk,
89
- temp,
90
- top_p,
91
- )
92
- outputs = []
93
- for new_token in response:
94
- outputs.append(new_token)
95
- yield "".join(outputs)
 
 
 
 
 
 
 
 
 
 
 
 
 
96
 
97
 
98
  if __name__ == "__main__":
99
- with gr.Blocks() as demo: # Create Gradio interface
100
- gr.Markdown("# LLM API Aggregation Deployment")
101
- with gr.Tab("DeepSeek"):
102
- with gr.Accordion(label="⚙️ Settings", open=False) as ds_acc:
103
- ds_model = gr.Dropdown(
104
- choices=["deepseek-chat", "deepseek-reasoner"],
105
- value="deepseek-chat",
106
- label="Select a model",
107
- )
108
- ds_key = gr.Textbox(
109
- os.getenv("ds_api_key"),
110
- type="password",
111
- label="API key",
112
- )
113
- ds_sys = gr.Textbox(
114
- "You are a useful assistant. first recognize user request and then reply carfuly and thinking",
115
- label="System prompt",
116
- )
117
- ds_maxtk = gr.Slider(0, 32000, 10000, label="Max new tokens")
118
- ds_temp = gr.Slider(0, 1, 0.3, label="Temperature")
119
- ds_topp = gr.Slider(0, 1, 0.95, label="Top P sampling")
120
-
121
- gr.ChatInterface(
122
- deepseek,
123
- additional_inputs=[
124
- ds_model,
125
- ds_key,
126
- ds_sys,
127
- ds_maxtk,
128
- ds_temp,
129
- ds_topp,
130
- ],
131
- )
132
-
133
- with gr.Tab("Kimi"):
134
- with gr.Accordion(label="⚙️ Settings", open=False) as kimi_acc:
135
- kimi_model = gr.Dropdown(
136
- choices=["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"],
137
- value="moonshot-v1-32k",
138
- label="Select a model",
139
- )
140
- kimi_key = gr.Textbox(
141
- os.getenv("kimi_api_key"),
142
- type="password",
143
- label="API key",
144
- )
145
- kimi_sys = gr.Textbox(
146
- "You are a useful assistant. first recognize user request and then reply carfuly and thinking",
147
- label="System prompt",
148
- )
149
- kimi_maxtk = gr.Slider(0, 32000, 10000, label="Max new tokens")
150
- kimi_temp = gr.Slider(0, 1, 0.3, label="Temperature")
151
- kimi_topp = gr.Slider(0, 1, 0.95, label="Top P sampling")
152
-
153
- gr.ChatInterface(
154
- kimi,
155
- additional_inputs=[
156
- kimi_model,
157
- kimi_key,
158
- kimi_sys,
159
- kimi_maxtk,
160
- kimi_temp,
161
- kimi_topp,
162
- ],
163
- )
164
-
165
- demo.queue().launch()
 
1
  import os
2
+ import json
3
+ import base64
4
  import gradio as gr
5
+
6
+
7
+ def oversize(file_path: str, size_kb=1024):
8
+ size_bytes = size_kb * 1024
9
+ file_size = os.path.getsize(file_path)
10
+ return file_size >= size_bytes
11
+
12
+
13
+ def toBase64(file_path: str):
14
+ if not file_path:
15
+ file_path = "./example.mp3"
16
+
17
+ if oversize(file_path):
18
+ return ""
19
+
20
+ with open(file_path, "rb") as audio_file:
21
+ audio_data = audio_file.read()
22
+
23
+ base64_encoded = base64.b64encode(audio_data)
24
+ return "data:audio/mpeg;base64," + base64_encoded.decode("utf-8")
25
+
26
+
27
+ def infer(
28
+ recipient: str,
29
+ sender: str,
30
+ salutation: str,
31
+ signature: str,
32
+ body: str,
33
+ title: str,
34
+ bgm: str,
35
+ out_json="./content.json",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  ):
37
+ if not bgm:
38
+ return None, "Please upload a BGM"
39
+
40
+ if os.path.exists(out_json):
41
+ os.remove(out_json)
42
+
43
+ content = {
44
+ "recipient": recipient.replace(" ", " "),
45
+ "sender": sender.replace(" ", " "),
46
+ "salutation": salutation.replace(" ", " "),
47
+ "signature": signature.replace(" ", " "),
48
+ "body": body.replace(" ", " "),
49
+ "title": title,
50
+ "bgm": toBase64(bgm),
51
+ }
52
+
53
+ if not content["bgm"]:
54
+ return None, "Your uploaded BGM is too large"
55
+
56
+ with open(out_json, "w", encoding="utf-8") as json_file:
57
+ json.dump(
58
+ content,
59
+ json_file,
60
+ ensure_ascii=False,
61
+ indent=4,
62
+ )
63
+
64
+ return out_json, "Generation success"
65
 
66
 
67
  if __name__ == "__main__":
68
+ with gr.Blocks() as demo:
69
+ gr.Interface(
70
+ fn=infer,
71
+ inputs=[
72
+ gr.Textbox(
73
+ label="Recipient",
74
+ placeholder="The recipient centered on front of the envelope",
75
+ ),
76
+ gr.Textbox(
77
+ label="Sender",
78
+ placeholder="The sender name on back of the envelope",
79
+ ),
80
+ gr.Textbox(
81
+ label="Salutation",
82
+ placeholder="The salutation in letter's upper left corner",
83
+ ),
84
+ gr.Textbox(
85
+ label="Signature",
86
+ placeholder="The signature in letter's lower right corner",
87
+ ),
88
+ gr.TextArea(
89
+ label="Body",
90
+ placeholder="Body of the letter, <br> represents a line break and the number after ^ represents the number of milliseconds that the typewriter's effects pause",
91
+ ),
92
+ gr.Textbox(
93
+ label="Title",
94
+ placeholder="Browser tab text",
95
+ ),
96
+ gr.Audio(
97
+ label="BGM",
98
+ type="filepath",
99
+ format="mp3",
100
+ ),
101
+ ],
102
+ outputs=[
103
+ gr.File(label="Download JSON file"),
104
+ gr.Textbox(label="Status bar"),
105
+ ],
106
+ examples=[
107
+ [
108
+ "To Hiro",
109
+ "Mika",
110
+ "弘树",
111
+ "美嘉",
112
+ " 如果那天...^600没有^200见到你<br> 我想我^600不会^200那么伤心<br> 那么难过<br> 不会^200泪流满面<br> 但是^600如果^200没有遇见你<br> 我就^200不会了解^600如此高兴<br> 如此^200温柔<br> 如此^200可爱<br> 如此^200温暖<br> 如此^200幸福^200的感觉<br> ^600现在^600还好吗?<br> 我...^600现在还和天空^200恋爱着",
113
+ "eLuvLetter",
114
+ "./example.mp3",
115
+ ]
116
+ ],
117
+ title="eLuvLetter JSON Generator",
118
+ submit_btn="Generate",
119
+ clear_btn="Clear",
120
+ flagging_mode="never",
121
+ cache_examples=False,
122
+ description="""
123
+ This tool can generate your customized content.json to replace the font/content.json in your forked <a href='https://github.com/Society-Genius/eLuvLetter' target='_blank'>eLuvLetter</a> repository, in which the BGM widget is used to upload the audio played when opening the envelope, it is recommended not to be too large, please make sure the audio is completely uploaded before clicking the Generate button.""",
124
+ )
125
+
126
+ gr.HTML(
127
+ """
128
+ <iframe src="//player.bilibili.com/player.html?bvid=BV1hergYREEG&autoplay=0" scrolling="no" border="0" frameborder="no" framespacing="0" allowfullscreen="true" width="100%" style="aspect-ratio: 16 / 9;"></iframe>
129
+ """
130
+ )
131
+
132
+ demo.launch()
 
 
requirements.txt DELETED
@@ -1,2 +0,0 @@
1
- huggingface_hub==0.25.2
2
- openai