Spaces:
Running
Running
Fixing a few typos.
Browse files
app.py
CHANGED
|
@@ -152,13 +152,13 @@ def generate_response(
|
|
| 152 |
scores = [result['_relevance_score'] for result in results]
|
| 153 |
snippets_html = "<h4>Snippets</h4>\n" + create_bulleted_list(snippets, scores)
|
| 154 |
|
| 155 |
-
# Generate the
|
| 156 |
-
|
| 157 |
query, '\n\n'.join(snippets)
|
| 158 |
)
|
| 159 |
|
| 160 |
model_response = ""
|
| 161 |
-
for chunk in
|
| 162 |
model_response += chunk.data.choices[0].delta.content
|
| 163 |
yield model_response, references_html, snippets_html
|
| 164 |
|
|
@@ -207,8 +207,8 @@ with gr.Blocks() as demo:
|
|
| 207 |
label='Top k results', render=False
|
| 208 |
)
|
| 209 |
|
| 210 |
-
# Snippets,
|
| 211 |
-
with gr.Accordion("Snippets /
|
| 212 |
|
| 213 |
# References and snippets
|
| 214 |
with gr.Accordion("References & snippets", open=False):
|
|
|
|
| 152 |
scores = [result['_relevance_score'] for result in results]
|
| 153 |
snippets_html = "<h4>Snippets</h4>\n" + create_bulleted_list(snippets, scores)
|
| 154 |
|
| 155 |
+
# Generate the response from the LLM
|
| 156 |
+
stream_response = llm_utils.generate_chat_response_streaming(
|
| 157 |
query, '\n\n'.join(snippets)
|
| 158 |
)
|
| 159 |
|
| 160 |
model_response = ""
|
| 161 |
+
for chunk in stream_response:
|
| 162 |
model_response += chunk.data.choices[0].delta.content
|
| 163 |
yield model_response, references_html, snippets_html
|
| 164 |
|
|
|
|
| 207 |
label='Top k results', render=False
|
| 208 |
)
|
| 209 |
|
| 210 |
+
# Snippets, sample questions, search parameters, documentation
|
| 211 |
+
with gr.Accordion("Snippets / sample questions / search parameters / documentation", open=False):
|
| 212 |
|
| 213 |
# References and snippets
|
| 214 |
with gr.Accordion("References & snippets", open=False):
|