Spaces:
Sleeping
Sleeping
| import os, sys, time, asyncio | |
| from typing import List, Dict | |
| import gradio as gr | |
| from dotenv import load_dotenv | |
| import base64 | |
| from openai import OpenAI | |
| if sys.platform.startswith("win"): | |
| try: | |
| asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy()) | |
| except Exception: | |
| pass | |
| # config | |
| load_dotenv() | |
| APP_Name = os.getenv("APP_Name", "Ghaymah GenAI chatbots") | |
| APP_Version = os.getenv("APP_Version", "0.1.0") | |
| API_KEY = os.getenv("API_KEY", "") | |
| # Models | |
| MODELS = [m.strip() for m in os.getenv("Models", "").split(",") if m.strip()] or [ | |
| "QwQ-32B", | |
| "DeepSeek-V3-0324", | |
| "Qwen/Qwen3-32B", | |
| "zai-org/GLM-4.5-Air", | |
| "moonshotai/Kimi-K2-Instruct", | |
| ] | |
| MODEL_INFO = { | |
| "QwQ-32B": "QwQ-32B — reasoning-focused; strong long-form answers.", | |
| "DeepSeek-V3-0324": "DeepSeek V3 (0324) — versatile, great multi-step reasoning.", | |
| "Qwen/Qwen3-32B": "Qwen3-32B — multilingual, good code & math.", | |
| "zai-org/GLM-4.5-Air": "GLM-4.5-Air — efficient generalist, good latency.", | |
| "moonshotai/Kimi-K2-Instruct": "Kimi K2 Instruct — long-context, helpful writing.", | |
| } | |
| LOGO_PATH = "download.jpeg" | |
| COMPANY_LOGO = LOGO_PATH | |
| OWNER_NAME = "ENG. Ahmed Yasser El Sharkawy" | |
| CSS = """ | |
| .app-header{display:flex;align-items:center;gap:12px;justify-content:center;margin:6px 0 16px} | |
| .app-header img{height:60px;border-radius:12px} | |
| .app-title{font-weight:800;font-size:28px;line-height:1.1} | |
| .app-sub{opacity:.7;font-size:14px} | |
| """ | |
| # OpenAI-compatible client | |
| BASE_URL = "https://genai.ghaymah.systems" | |
| client = OpenAI(api_key=API_KEY, base_url=BASE_URL) if API_KEY else None | |
| SYSTEM_SEED = "You are Ghaymah Assistant. Be concise and helpful." | |
| # Helpers | |
| BACKOFF = [5, 10, 20] | |
| def logo_data_uri(path: str) -> str: | |
| if not os.path.exists(path): | |
| return "" | |
| ext = os.path.splitext(path)[1].lower() | |
| mime = { | |
| ".png": "image/png", ".jpg": "image/jpeg", ".jpeg": "image/jpeg", | |
| ".webp": "image/webp", ".gif": "image/gif" | |
| }.get(ext, "image/png") | |
| with open(path, "rb") as f: | |
| b64 = base64.b64encode(f.read()).decode("utf-8") | |
| return f"data:{mime};base64,{b64}" | |
| def safe_chat_complete(model: str, messages: List[Dict], max_tokens: int = 800) -> str: | |
| if not client: | |
| return "⚠️ Missing API_KEY in .env" | |
| attempt = 0 | |
| while True: | |
| try: | |
| resp = client.chat.completions.create( | |
| model=model, | |
| messages=messages, | |
| max_tokens=max_tokens, | |
| temperature=0.3, | |
| timeout=90, | |
| ) | |
| return resp.choices[0].message.content or "" | |
| except Exception as e: | |
| msg = str(e) | |
| if ("429" in msg or "Rate" in msg) and attempt < len(BACKOFF): | |
| time.sleep(BACKOFF[attempt]); attempt += 1 | |
| continue | |
| return f"Request failed for `{model}`: {e}" | |
| def init_state(): | |
| return {"messages": [{"role": "system", "content": SYSTEM_SEED}]} | |
| # Gradio app | |
| with gr.Blocks(title=f"{APP_Name} v{APP_Version}", css=CSS) as demo: | |
| header_logo_src = logo_data_uri(COMPANY_LOGO) | |
| logo_html = f"<img src='{header_logo_src}' alt='logo'>" if header_logo_src else "" | |
| gr.HTML(f""" | |
| <div class="app-header"> | |
| {logo_html} | |
| <div class="app-header-text"> | |
| <div class="app-title">{APP_Name}</div> | |
| <div class="app-sub">v{APP_Version} • {OWNER_NAME}</div> | |
| </div> | |
| </div> | |
| """) | |
| state = gr.State(init_state()) | |
| with gr.Row(): | |
| # Left: Chat | |
| with gr.Column(scale=3): | |
| chat = gr.Chatbot(label="Chat", height=520, type="messages", value=[]) | |
| user_in = gr.Textbox(label="Your message", placeholder="Type here…", lines=2) | |
| with gr.Row(): | |
| send_btn = gr.Button("Send", variant="primary") | |
| clear_btn = gr.Button("Clear") | |
| # Right: Model selector + logo + info | |
| with gr.Column(scale=1, min_width=320): | |
| model_choice = gr.Radio( | |
| choices=MODELS, | |
| value=MODELS[0], | |
| label="Models", | |
| info="Select Your Model Here", | |
| ) | |
| info_md = gr.Markdown(MODEL_INFO.get(MODELS[0], "")) | |
| def _update_info(m: str) -> str: | |
| title = f"**{m}**" | |
| desc = MODEL_INFO.get(m, "") | |
| return f"{title}\n\n{desc}" | |
| model_choice.change(_update_info, model_choice, info_md) | |
| gr.Image(LOGO_PATH, show_label=False, container=False) | |
| def on_submit(msg, chat_messages): | |
| if not msg: | |
| return "", (chat_messages or []) | |
| updated = (chat_messages or []) + [{"role": "user", "content": msg}] | |
| return "", updated | |
| def bot_step(chat_messages, chosen_model, st): | |
| msgs = [{"role": "system", "content": SYSTEM_SEED}] | |
| # only include last 2 visible messages | |
| for m in (chat_messages or [])[-2:]: | |
| role, content = m.get("role"), m.get("content") | |
| if role in ("user", "assistant") and isinstance(content, str): | |
| msgs.append({"role": role, "content": content}) | |
| reply = safe_chat_complete(chosen_model, msgs, max_tokens=800) | |
| updated = (chat_messages or []) + [{"role": "assistant", "content": reply}] | |
| st = st or init_state() | |
| st["messages"] = msgs + [{"role": "assistant", "content": reply}] | |
| return updated, st | |
| # Clear | |
| def on_clear(): | |
| return [], init_state() | |
| # Wire events | |
| user_in.submit(on_submit, [user_in, chat], [user_in, chat]) \ | |
| .then(bot_step, [chat, model_choice, state], [chat, state]) | |
| send_btn.click(on_submit, [user_in, chat], [user_in, chat]) \ | |
| .then(bot_step, [chat, model_choice, state], [chat, state]) | |
| clear_btn.click(on_clear, outputs=[chat, state]) | |
| if __name__ == "__main__": | |
| demo.queue() | |
| demo.launch() | |