legends810 commited on
Commit
ec48efe
·
verified ·
1 Parent(s): 5f69b15

Update app/api/ask-ai/route.ts

Browse files
Files changed (1) hide show
  1. app/api/ask-ai/route.ts +19 -499
app/api/ask-ai/route.ts CHANGED
@@ -1,510 +1,30 @@
1
- /* eslint-disable @typescript-eslint/no-explicit-any */
2
- import type { NextRequest } from "next/server";
3
- import { NextResponse } from "next/server";
4
- import { headers } from "next/headers";
5
- import { InferenceClient } from "@huggingface/inference";
6
 
7
- import { MODELS, PROVIDERS } from "@/lib/providers";
8
- import {
9
- DIVIDER,
10
- FOLLOW_UP_SYSTEM_PROMPT,
11
- INITIAL_SYSTEM_PROMPT,
12
- MAX_REQUESTS_PER_IP,
13
- NEW_PAGE_END,
14
- NEW_PAGE_START,
15
- REPLACE_END,
16
- SEARCH_START,
17
- UPDATE_PAGE_START,
18
- UPDATE_PAGE_END,
19
- } from "@/lib/prompts";
20
- import MY_TOKEN_KEY from "@/lib/get-cookie-name";
21
- import { Page } from "@/types";
22
 
23
- const ipAddresses = new Map();
 
24
 
25
- export async function POST(request: NextRequest) {
26
- const authHeaders = await headers();
27
- const userToken = request.cookies.get(MY_TOKEN_KEY())?.value;
28
-
29
- const body = await request.json();
30
- const { prompt, provider, model, redesignMarkdown, previousPrompts, pages } = body;
31
-
32
- if (!model || (!prompt && !redesignMarkdown)) {
33
- return NextResponse.json(
34
- { ok: false, error: "Missing required fields" },
35
- { status: 400 }
36
- );
37
- }
38
-
39
- const selectedModel = MODELS.find(
40
- (m) => m.value === model || m.label === model
41
- );
42
-
43
- if (!selectedModel) {
44
- return NextResponse.json(
45
- { ok: false, error: "Invalid model selected" },
46
- { status: 400 }
47
- );
48
- }
49
-
50
- if (!selectedModel.providers.includes(provider) && provider !== "auto") {
51
- return NextResponse.json(
52
- {
53
- ok: false,
54
- error: `The selected model does not support the ${provider} provider.`,
55
- openSelectProvider: true,
56
- },
57
- { status: 400 }
58
- );
59
- }
60
-
61
- let token = userToken;
62
- let billTo: string | null = null;
63
-
64
- /**
65
- * Handle local usage token, this bypass the need for a user token
66
- * and allows local testing without authentication.
67
- * This is useful for development and testing purposes.
68
- */
69
- if (process.env.HF_TOKEN && process.env.HF_TOKEN.length > 0) {
70
- token = process.env.HF_TOKEN;
71
- }
72
-
73
- const ip = authHeaders.get("x-forwarded-for")?.includes(",")
74
- ? authHeaders.get("x-forwarded-for")?.split(",")[1].trim()
75
- : authHeaders.get("x-forwarded-for");
76
-
77
- if (!token) {
78
- ipAddresses.set(ip, (ipAddresses.get(ip) || 0) + 1);
79
- if (ipAddresses.get(ip) > MAX_REQUESTS_PER_IP) {
80
- return NextResponse.json(
81
- {
82
- ok: false,
83
- openLogin: true,
84
- message: "Log In to continue using the service",
85
- },
86
- { status: 429 }
87
- );
88
- }
89
-
90
- token = process.env.DEFAULT_HF_TOKEN as string;
91
- billTo = "huggingface";
92
- }
93
-
94
- const DEFAULT_PROVIDER = PROVIDERS.novita;
95
- const selectedProvider =
96
- provider === "auto"
97
- ? PROVIDERS[selectedModel.autoProvider as keyof typeof PROVIDERS]
98
- : PROVIDERS[provider as keyof typeof PROVIDERS] ?? DEFAULT_PROVIDER;
99
-
100
- const rewrittenPrompt = prompt;
101
-
102
- // if (prompt?.length < 240) {
103
-
104
- //rewrittenPrompt = await callAiRewritePrompt(prompt, { token, billTo });
105
- // }
106
 
 
107
  try {
108
- const encoder = new TextEncoder();
109
- const stream = new TransformStream();
110
- const writer = stream.writable.getWriter();
111
 
112
- const response = new NextResponse(stream.readable, {
113
- headers: {
114
- "Content-Type": "text/plain; charset=utf-8",
115
- "Cache-Control": "no-cache",
116
- Connection: "keep-alive",
117
- },
118
  });
119
 
120
- (async () => {
121
- // let completeResponse = "";
122
- try {
123
- const client = new InferenceClient(token);
124
- const chatCompletion = client.chatCompletionStream(
125
- {
126
- model: selectedModel.value,
127
- provider: selectedProvider.id as any,
128
- messages: [
129
- {
130
- role: "system",
131
- content: INITIAL_SYSTEM_PROMPT,
132
- },
133
- ...(pages?.length > 1 ? [{
134
- role: "assistant",
135
- content: `Here are the current pages:\n\n${pages.map((p: Page) => `- ${p.path} \n${p.html}`).join("\n")}\n\nNow, please create a new page based on this code. Also here are the previous prompts:\n\n${previousPrompts.map((p: string) => `- ${p}`).join("\n")}`
136
- }] : []),
137
- {
138
- role: "user",
139
- content: redesignMarkdown
140
- ? `Here is my current design as a markdown:\n\n${redesignMarkdown}\n\nNow, please create a new design based on this markdown.`
141
- : rewrittenPrompt,
142
- },
143
- ],
144
- max_tokens: selectedProvider.max_tokens,
145
- },
146
- billTo ? { billTo } : {}
147
- );
148
-
149
- while (true) {
150
- const { done, value } = await chatCompletion.next();
151
- if (done) {
152
- break;
153
- }
154
-
155
- const chunk = value.choices[0]?.delta?.content;
156
- if (chunk) {
157
- await writer.write(encoder.encode(chunk));
158
- }
159
- }
160
- } catch (error: any) {
161
- if (error.message?.includes("exceeded your monthly included credits")) {
162
- await writer.write(
163
- encoder.encode(
164
- JSON.stringify({
165
- ok: false,
166
- openProModal: true,
167
- message: error.message,
168
- })
169
- )
170
- );
171
- } else {
172
- await writer.write(
173
- encoder.encode(
174
- JSON.stringify({
175
- ok: false,
176
- message:
177
- error.message ||
178
- "An error occurred while processing your request.",
179
- })
180
- )
181
- );
182
- }
183
- } finally {
184
- await writer?.close();
185
- }
186
- })();
187
-
188
- return response;
189
- } catch (error: any) {
190
- return NextResponse.json(
191
- {
192
- ok: false,
193
- openSelectProvider: true,
194
- message:
195
- error?.message || "An error occurred while processing your request.",
196
- },
197
- { status: 500 }
198
- );
199
- }
200
- }
201
-
202
- export async function PUT(request: NextRequest) {
203
- const authHeaders = await headers();
204
- const userToken = request.cookies.get(MY_TOKEN_KEY())?.value;
205
-
206
- const body = await request.json();
207
- const { prompt, previousPrompts, provider, selectedElementHtml, model, pages, files, } =
208
- body;
209
-
210
- if (!prompt || pages.length === 0) {
211
- return NextResponse.json(
212
- { ok: false, error: "Missing required fields" },
213
- { status: 400 }
214
- );
215
- }
216
-
217
- const selectedModel = MODELS.find(
218
- (m) => m.value === model || m.label === model
219
- );
220
- if (!selectedModel) {
221
- return NextResponse.json(
222
- { ok: false, error: "Invalid model selected" },
223
- { status: 400 }
224
- );
225
- }
226
-
227
- let token = userToken;
228
- let billTo: string | null = null;
229
-
230
- /**
231
- * Handle local usage token, this bypass the need for a user token
232
- * and allows local testing without authentication.
233
- * This is useful for development and testing purposes.
234
- */
235
- if (process.env.HF_TOKEN && process.env.HF_TOKEN.length > 0) {
236
- token = process.env.HF_TOKEN;
237
- }
238
-
239
- const ip = authHeaders.get("x-forwarded-for")?.includes(",")
240
- ? authHeaders.get("x-forwarded-for")?.split(",")[1].trim()
241
- : authHeaders.get("x-forwarded-for");
242
-
243
- if (!token) {
244
- ipAddresses.set(ip, (ipAddresses.get(ip) || 0) + 1);
245
- if (ipAddresses.get(ip) > MAX_REQUESTS_PER_IP) {
246
- return NextResponse.json(
247
- {
248
- ok: false,
249
- openLogin: true,
250
- message: "Log In to continue using the service",
251
- },
252
- { status: 429 }
253
- );
254
- }
255
-
256
- token = process.env.DEFAULT_HF_TOKEN as string;
257
- billTo = "huggingface";
258
- }
259
-
260
- const client = new InferenceClient(token);
261
-
262
- const DEFAULT_PROVIDER = PROVIDERS.novita;
263
- const selectedProvider =
264
- provider === "auto"
265
- ? PROVIDERS[selectedModel.autoProvider as keyof typeof PROVIDERS]
266
- : PROVIDERS[provider as keyof typeof PROVIDERS] ?? DEFAULT_PROVIDER;
267
-
268
- try {
269
- const response = await client.chatCompletion(
270
- {
271
- model: selectedModel.value,
272
- provider: selectedProvider.id as any,
273
- messages: [
274
- {
275
- role: "system",
276
- content: FOLLOW_UP_SYSTEM_PROMPT,
277
- },
278
- {
279
- role: "user",
280
- content: previousPrompts
281
- ? `Also here are the previous prompts:\n\n${previousPrompts.map((p: string) => `- ${p}`).join("\n")}`
282
- : "You are modifying the HTML file based on the user's request.",
283
- },
284
- {
285
- role: "assistant",
286
-
287
- content: `${
288
- selectedElementHtml
289
- ? `\n\nYou have to update ONLY the following element, NOTHING ELSE: \n\n\`\`\`html\n${selectedElementHtml}\n\`\`\``
290
- : ""
291
- }. Current pages: ${pages?.map((p: Page) => `- ${p.path} \n${p.html}`).join("\n")}. ${files?.length > 0 ? `Current images: ${files?.map((f: string) => `- ${f}`).join("\n")}.` : ""}`,
292
- },
293
- {
294
- role: "user",
295
- content: prompt,
296
- },
297
- ],
298
- ...(selectedProvider.id !== "sambanova"
299
- ? {
300
- max_tokens: selectedProvider.max_tokens,
301
- }
302
- : {}),
303
- },
304
- billTo ? { billTo } : {}
305
- );
306
-
307
- const chunk = response.choices[0]?.message?.content;
308
- if (!chunk) {
309
- return NextResponse.json(
310
- { ok: false, message: "No content returned from the model" },
311
- { status: 400 }
312
- );
313
- }
314
-
315
- if (chunk) {
316
- const updatedLines: number[][] = [];
317
- let newHtml = "";
318
- const updatedPages = [...(pages || [])];
319
-
320
- const updatePageRegex = new RegExp(`${UPDATE_PAGE_START.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}([^\\s]+)\\s*${UPDATE_PAGE_END.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}([\\s\\S]*?)(?=${UPDATE_PAGE_START.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}|${NEW_PAGE_START.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}|$)`, 'g');
321
- let updatePageMatch;
322
-
323
- while ((updatePageMatch = updatePageRegex.exec(chunk)) !== null) {
324
- const [, pagePath, pageContent] = updatePageMatch;
325
-
326
- const pageIndex = updatedPages.findIndex(p => p.path === pagePath);
327
- if (pageIndex !== -1) {
328
- let pageHtml = updatedPages[pageIndex].html;
329
-
330
- let processedContent = pageContent;
331
- const htmlMatch = pageContent.match(/```html\s*([\s\S]*?)\s*```/);
332
- if (htmlMatch) {
333
- processedContent = htmlMatch[1];
334
- }
335
- let position = 0;
336
- let moreBlocks = true;
337
-
338
- while (moreBlocks) {
339
- const searchStartIndex = processedContent.indexOf(SEARCH_START, position);
340
- if (searchStartIndex === -1) {
341
- moreBlocks = false;
342
- continue;
343
- }
344
-
345
- const dividerIndex = processedContent.indexOf(DIVIDER, searchStartIndex);
346
- if (dividerIndex === -1) {
347
- moreBlocks = false;
348
- continue;
349
- }
350
-
351
- const replaceEndIndex = processedContent.indexOf(REPLACE_END, dividerIndex);
352
- if (replaceEndIndex === -1) {
353
- moreBlocks = false;
354
- continue;
355
- }
356
-
357
- const searchBlock = processedContent.substring(
358
- searchStartIndex + SEARCH_START.length,
359
- dividerIndex
360
- );
361
- const replaceBlock = processedContent.substring(
362
- dividerIndex + DIVIDER.length,
363
- replaceEndIndex
364
- );
365
-
366
- if (searchBlock.trim() === "") {
367
- pageHtml = `${replaceBlock}\n${pageHtml}`;
368
- updatedLines.push([1, replaceBlock.split("\n").length]);
369
- } else {
370
- const blockPosition = pageHtml.indexOf(searchBlock);
371
- if (blockPosition !== -1) {
372
- const beforeText = pageHtml.substring(0, blockPosition);
373
- const startLineNumber = beforeText.split("\n").length;
374
- const replaceLines = replaceBlock.split("\n").length;
375
- const endLineNumber = startLineNumber + replaceLines - 1;
376
-
377
- updatedLines.push([startLineNumber, endLineNumber]);
378
- pageHtml = pageHtml.replace(searchBlock, replaceBlock);
379
- }
380
- }
381
-
382
- position = replaceEndIndex + REPLACE_END.length;
383
- }
384
-
385
- updatedPages[pageIndex].html = pageHtml;
386
-
387
- if (pagePath === '/' || pagePath === '/index' || pagePath === 'index') {
388
- newHtml = pageHtml;
389
- }
390
- }
391
- }
392
-
393
- const newPageRegex = new RegExp(`${NEW_PAGE_START.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}([^\\s]+)\\s*${NEW_PAGE_END.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}([\\s\\S]*?)(?=${UPDATE_PAGE_START.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}|${NEW_PAGE_START.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}|$)`, 'g');
394
- let newPageMatch;
395
-
396
- while ((newPageMatch = newPageRegex.exec(chunk)) !== null) {
397
- const [, pagePath, pageContent] = newPageMatch;
398
-
399
- let pageHtml = pageContent;
400
- const htmlMatch = pageContent.match(/```html\s*([\s\S]*?)\s*```/);
401
- if (htmlMatch) {
402
- pageHtml = htmlMatch[1];
403
- }
404
-
405
- const existingPageIndex = updatedPages.findIndex(p => p.path === pagePath);
406
-
407
- if (existingPageIndex !== -1) {
408
- updatedPages[existingPageIndex] = {
409
- path: pagePath,
410
- html: pageHtml.trim()
411
- };
412
- } else {
413
- updatedPages.push({
414
- path: pagePath,
415
- html: pageHtml.trim()
416
- });
417
- }
418
- }
419
-
420
- if (updatedPages.length === pages?.length && !chunk.includes(UPDATE_PAGE_START)) {
421
- let position = 0;
422
- let moreBlocks = true;
423
-
424
- while (moreBlocks) {
425
- const searchStartIndex = chunk.indexOf(SEARCH_START, position);
426
- if (searchStartIndex === -1) {
427
- moreBlocks = false;
428
- continue;
429
- }
430
-
431
- const dividerIndex = chunk.indexOf(DIVIDER, searchStartIndex);
432
- if (dividerIndex === -1) {
433
- moreBlocks = false;
434
- continue;
435
- }
436
-
437
- const replaceEndIndex = chunk.indexOf(REPLACE_END, dividerIndex);
438
- if (replaceEndIndex === -1) {
439
- moreBlocks = false;
440
- continue;
441
- }
442
-
443
- const searchBlock = chunk.substring(
444
- searchStartIndex + SEARCH_START.length,
445
- dividerIndex
446
- );
447
- const replaceBlock = chunk.substring(
448
- dividerIndex + DIVIDER.length,
449
- replaceEndIndex
450
- );
451
-
452
- if (searchBlock.trim() === "") {
453
- newHtml = `${replaceBlock}\n${newHtml}`;
454
- updatedLines.push([1, replaceBlock.split("\n").length]);
455
- } else {
456
- const blockPosition = newHtml.indexOf(searchBlock);
457
- if (blockPosition !== -1) {
458
- const beforeText = newHtml.substring(0, blockPosition);
459
- const startLineNumber = beforeText.split("\n").length;
460
- const replaceLines = replaceBlock.split("\n").length;
461
- const endLineNumber = startLineNumber + replaceLines - 1;
462
-
463
- updatedLines.push([startLineNumber, endLineNumber]);
464
- newHtml = newHtml.replace(searchBlock, replaceBlock);
465
- }
466
- }
467
-
468
- position = replaceEndIndex + REPLACE_END.length;
469
- }
470
-
471
- // Update the main HTML if it's the index page
472
- const mainPageIndex = updatedPages.findIndex(p => p.path === '/' || p.path === '/index' || p.path === 'index');
473
- if (mainPageIndex !== -1) {
474
- updatedPages[mainPageIndex].html = newHtml;
475
- }
476
- }
477
 
478
- return NextResponse.json({
479
- ok: true,
480
- updatedLines,
481
- pages: updatedPages,
482
- });
483
- } else {
484
- return NextResponse.json(
485
- { ok: false, message: "No content returned from the model" },
486
- { status: 400 }
487
- );
488
- }
489
- } catch (error: any) {
490
- if (error.message?.includes("exceeded your monthly included credits")) {
491
- return NextResponse.json(
492
- {
493
- ok: false,
494
- openProModal: true,
495
- message: error.message,
496
- },
497
- { status: 402 }
498
- );
499
- }
500
- return NextResponse.json(
501
- {
502
- ok: false,
503
- openSelectProvider: true,
504
- message:
505
- error.message || "An error occurred while processing your request.",
506
- },
507
- { status: 500 }
508
- );
509
  }
510
  }
 
1
+ // app/api/ask-ai/route.ts
 
 
 
 
2
 
3
+ import { GroqStream, StreamingTextResponse } from 'ai';
4
+ import Groq from 'groq-sdk';
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ // IMPORTANT: Set the runtime to edge
7
+ export const runtime = 'edge';
8
 
9
+ const groq = new Groq({
10
+ apiKey: process.env.GROQ_API_KEY,
11
+ });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ export async function POST(req: Request) {
14
  try {
15
+ const { messages } = await req.json();
 
 
16
 
17
+ const response = await groq.chat.completions.create({
18
+ model: 'llama-3.1-8b-instant', // Aap model change kar sakte hain
19
+ stream: true,
20
+ messages,
 
 
21
  });
22
 
23
+ const stream = GroqStream(response);
24
+ return new StreamingTextResponse(stream);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ } catch (error) {
27
+ console.error('[GROQ API ERROR]', error);
28
+ return new Response('An error occurred with the Groq API.', { status: 500 });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  }
30
  }