Spaces:
Running
Running
Update model_functions.py
Browse files- model_functions.py +3 -3
model_functions.py
CHANGED
|
@@ -193,7 +193,7 @@ def generate_three_versions(prompt, temp1, temp2, temp3):
|
|
| 193 |
model="gpt-3.5-turbo",
|
| 194 |
messages=[{"role": "user", "content": prompt}],
|
| 195 |
temperature=temp1,
|
| 196 |
-
max_tokens=
|
| 197 |
)
|
| 198 |
output1 = response1.choices[0].message.content
|
| 199 |
|
|
@@ -202,7 +202,7 @@ def generate_three_versions(prompt, temp1, temp2, temp3):
|
|
| 202 |
model="gpt-3.5-turbo",
|
| 203 |
messages=[{"role": "user", "content": prompt}],
|
| 204 |
temperature=temp2,
|
| 205 |
-
max_tokens=
|
| 206 |
)
|
| 207 |
output2 = response2.choices[0].message.content
|
| 208 |
|
|
@@ -211,7 +211,7 @@ def generate_three_versions(prompt, temp1, temp2, temp3):
|
|
| 211 |
model="gpt-3.5-turbo",
|
| 212 |
messages=[{"role": "user", "content": prompt}],
|
| 213 |
temperature=temp3,
|
| 214 |
-
max_tokens=
|
| 215 |
)
|
| 216 |
output3 = response3.choices[0].message.content
|
| 217 |
|
|
|
|
| 193 |
model="gpt-3.5-turbo",
|
| 194 |
messages=[{"role": "user", "content": prompt}],
|
| 195 |
temperature=temp1,
|
| 196 |
+
max_tokens=200
|
| 197 |
)
|
| 198 |
output1 = response1.choices[0].message.content
|
| 199 |
|
|
|
|
| 202 |
model="gpt-3.5-turbo",
|
| 203 |
messages=[{"role": "user", "content": prompt}],
|
| 204 |
temperature=temp2,
|
| 205 |
+
max_tokens=200
|
| 206 |
)
|
| 207 |
output2 = response2.choices[0].message.content
|
| 208 |
|
|
|
|
| 211 |
model="gpt-3.5-turbo",
|
| 212 |
messages=[{"role": "user", "content": prompt}],
|
| 213 |
temperature=temp3,
|
| 214 |
+
max_tokens=200
|
| 215 |
)
|
| 216 |
output3 = response3.choices[0].message.content
|
| 217 |
|