Spaces:
Build error
Build error
Commit
Β·
29b84f7
1
Parent(s):
71c3b19
fix app.py
Browse files
app.py
CHANGED
|
@@ -230,62 +230,116 @@ def combine_history(prompt):
|
|
| 230 |
return total_prompt
|
| 231 |
|
| 232 |
|
| 233 |
-
def main():
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
if __name__ == '__main__':
|
| 291 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
return total_prompt
|
| 231 |
|
| 232 |
|
| 233 |
+
# def main():
|
| 234 |
+
# st.title('internlm2_5-7b-chat-assistant')
|
| 235 |
+
|
| 236 |
+
# # torch.cuda.empty_cache()
|
| 237 |
+
# print('load model begin.')
|
| 238 |
+
# model, tokenizer = load_model()
|
| 239 |
+
# print('load model end.')
|
| 240 |
+
|
| 241 |
+
# generation_config = prepare_generation_config()
|
| 242 |
+
|
| 243 |
+
# # Initialize chat history
|
| 244 |
+
# if 'messages' not in st.session_state:
|
| 245 |
+
# st.session_state.messages = []
|
| 246 |
+
|
| 247 |
+
# # Display chat messages from history on app rerun
|
| 248 |
+
# for message in st.session_state.messages:
|
| 249 |
+
# with st.chat_message(message['role'], avatar=message.get('avatar')):
|
| 250 |
+
# st.markdown(message['content'])
|
| 251 |
+
|
| 252 |
+
# # Accept user input
|
| 253 |
+
# if prompt := st.chat_input('What is up?'):
|
| 254 |
+
# # Display user message in chat message container
|
| 255 |
+
|
| 256 |
+
# with st.chat_message('user', avatar='user'):
|
| 257 |
+
|
| 258 |
+
# st.markdown(prompt)
|
| 259 |
+
# real_prompt = combine_history(prompt)
|
| 260 |
+
# # Add user message to chat history
|
| 261 |
+
# st.session_state.messages.append({
|
| 262 |
+
# 'role': 'user',
|
| 263 |
+
# 'content': prompt,
|
| 264 |
+
# 'avatar': 'user'
|
| 265 |
+
# })
|
| 266 |
+
|
| 267 |
+
# with st.chat_message('robot', avatar='assistant'):
|
| 268 |
+
|
| 269 |
+
# message_placeholder = st.empty()
|
| 270 |
+
# for cur_response in generate_interactive(
|
| 271 |
+
# model=model,
|
| 272 |
+
# tokenizer=tokenizer,
|
| 273 |
+
# prompt=real_prompt,
|
| 274 |
+
# additional_eos_token_id=92542,
|
| 275 |
+
# device='cuda:0',
|
| 276 |
+
# **asdict(generation_config),
|
| 277 |
+
# ):
|
| 278 |
+
# # Display robot response in chat message container
|
| 279 |
+
# message_placeholder.markdown(cur_response + 'β')
|
| 280 |
+
# message_placeholder.markdown(cur_response)
|
| 281 |
+
# # Add robot response to chat history
|
| 282 |
+
# st.session_state.messages.append({
|
| 283 |
+
# 'role': 'robot',
|
| 284 |
+
# 'content': cur_response, # pylint: disable=undefined-loop-variable
|
| 285 |
+
# 'avatar': 'assistant',
|
| 286 |
+
# })
|
| 287 |
+
# torch.cuda.empty_cache()
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
# if __name__ == '__main__':
|
| 291 |
+
# main()
|
| 292 |
+
st.title('internlm2_5-7b-chat-assistant')
|
| 293 |
+
|
| 294 |
+
# torch.cuda.empty_cache()
|
| 295 |
+
print('load model begin.')
|
| 296 |
+
model, tokenizer = load_model()
|
| 297 |
+
print('load model end.')
|
| 298 |
+
|
| 299 |
+
generation_config = prepare_generation_config()
|
| 300 |
+
|
| 301 |
+
# Initialize chat history
|
| 302 |
+
if 'messages' not in st.session_state:
|
| 303 |
+
st.session_state.messages = []
|
| 304 |
+
|
| 305 |
+
# Display chat messages from history on app rerun
|
| 306 |
+
for message in st.session_state.messages:
|
| 307 |
+
with st.chat_message(message['role'], avatar=message.get('avatar')):
|
| 308 |
+
st.markdown(message['content'])
|
| 309 |
+
|
| 310 |
+
# Accept user input
|
| 311 |
+
if prompt := st.chat_input('What is up?'):
|
| 312 |
+
# Display user message in chat message container
|
| 313 |
+
|
| 314 |
+
with st.chat_message('user', avatar='user'):
|
| 315 |
+
|
| 316 |
+
st.markdown(prompt)
|
| 317 |
+
real_prompt = combine_history(prompt)
|
| 318 |
+
# Add user message to chat history
|
| 319 |
+
st.session_state.messages.append({
|
| 320 |
+
'role': 'user',
|
| 321 |
+
'content': prompt,
|
| 322 |
+
'avatar': 'user'
|
| 323 |
+
})
|
| 324 |
+
|
| 325 |
+
with st.chat_message('robot', avatar='assistant'):
|
| 326 |
+
|
| 327 |
+
message_placeholder = st.empty()
|
| 328 |
+
for cur_response in generate_interactive(
|
| 329 |
+
model=model,
|
| 330 |
+
tokenizer=tokenizer,
|
| 331 |
+
prompt=real_prompt,
|
| 332 |
+
additional_eos_token_id=92542,
|
| 333 |
+
device='cuda:0',
|
| 334 |
+
**asdict(generation_config),
|
| 335 |
+
):
|
| 336 |
+
# Display robot response in chat message container
|
| 337 |
+
message_placeholder.markdown(cur_response + 'β')
|
| 338 |
+
message_placeholder.markdown(cur_response)
|
| 339 |
+
# Add robot response to chat history
|
| 340 |
+
st.session_state.messages.append({
|
| 341 |
+
'role': 'robot',
|
| 342 |
+
'content': cur_response, # pylint: disable=undefined-loop-variable
|
| 343 |
+
'avatar': 'assistant',
|
| 344 |
+
})
|
| 345 |
+
torch.cuda.empty_cache()
|