Didier commited on
Commit
efca76e
·
verified ·
1 Parent(s): 599e07c

Fixing a few typos.

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -152,13 +152,13 @@ def generate_response(
152
  scores = [result['_relevance_score'] for result in results]
153
  snippets_html = "<h4>Snippets</h4>\n" + create_bulleted_list(snippets, scores)
154
 
155
- # Generate the reponse from the LLM
156
- stream_reponse = llm_utils.generate_chat_response_streaming(
157
  query, '\n\n'.join(snippets)
158
  )
159
 
160
  model_response = ""
161
- for chunk in stream_reponse:
162
  model_response += chunk.data.choices[0].delta.content
163
  yield model_response, references_html, snippets_html
164
 
@@ -207,8 +207,8 @@ with gr.Blocks() as demo:
207
  label='Top k results', render=False
208
  )
209
 
210
- # Snippets, sampel questions, search parameters, documentation
211
- with gr.Accordion("Snippets / Sample questions / search parameters/ documentation", open=False):
212
 
213
  # References and snippets
214
  with gr.Accordion("References & snippets", open=False):
 
152
  scores = [result['_relevance_score'] for result in results]
153
  snippets_html = "<h4>Snippets</h4>\n" + create_bulleted_list(snippets, scores)
154
 
155
+ # Generate the response from the LLM
156
+ stream_response = llm_utils.generate_chat_response_streaming(
157
  query, '\n\n'.join(snippets)
158
  )
159
 
160
  model_response = ""
161
+ for chunk in stream_response:
162
  model_response += chunk.data.choices[0].delta.content
163
  yield model_response, references_html, snippets_html
164
 
 
207
  label='Top k results', render=False
208
  )
209
 
210
+ # Snippets, sample questions, search parameters, documentation
211
+ with gr.Accordion("Snippets / sample questions / search parameters / documentation", open=False):
212
 
213
  # References and snippets
214
  with gr.Accordion("References & snippets", open=False):