import torch import gradio as gr from threading import Thread from transformers import BitsAndBytesConfig from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, TextStreamer, TextIteratorStreamer model_id = "large-traversaal/Alif-1.0-8B-Instruct" # 4-bit quantization configuration quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4" ) # Load tokenizer and model in 4-bit tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, quantization_config=quantization_config, device_map="auto" ) chat_prompt = """ You are JusticeGPT, an AI assistant with the wisdom and authority of a judge. You provide balanced, fair, and thoughtful responses. ### Instruction: Below is an instruction that describes a task. Write a response in urdu that appropriately completes the request. Don't say you don't know unless you really don't. Please be expressive when needed. Give long and detailed answers. ### Input: {prompt} ### Response: """ def generate_response(message, history): prompt = chat_prompt.format(prompt=message) inputs = tokenizer([prompt], return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu") streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) generation_kwargs = dict( inputs, streamer=streamer, max_new_tokens=4098, do_sample=True, top_p=0.95, top_k=50, temperature=0.7, repetition_penalty=1.2, ) thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() generated_text = "" for new_text in streamer: if new_text.endswith(tokenizer.eos_token): new_text = new_text[:len(new_text) - len(tokenizer.eos_token)] generated_text += new_text yield generated_text # Custom CSS for the judge theme custom_css = """ # Judge-themed CSS .gradio-container { background: linear-gradient(135deg, #2c3e50 0%, #3498db 100%); font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; } #header { text-align: center; background: linear-gradient(135deg, #8B4513 0%, #D2691E 100%); padding: 20px; border-radius: 10px; margin-bottom: 20px; color: white; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); } #judge-img { max-width: 150px; border-radius: 50%; border: 5px solid gold; margin: 0 auto; display: block; } .chatbot { background: white; border-radius: 15px; box-shadow: 0 4px 15px rgba(0, 0, 0, 0.2); min-height: 500px; } .chatbot .message { padding: 15px; margin: 10px; border-radius: 10px; } .chatbot .user { background: #e3f2fd; border-left: 4px solid #2196F3; } .chatbot .bot { background: #f3e5f5; border-left: 4px solid #9C27B0; } #input-box { background: white; border-radius: 10px; padding: 15px; box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1); } #examples { background: rgba(255, 255, 255, 0.9); border-radius: 10px; padding: 15px; margin-top: 20px; } .examples-title { font-weight: bold; color: #2c3e50; margin-bottom: 10px; } .examples-container { display: flex; flex-direction: column; gap: 8px; } .example-btn { background: #3498db; color: white; border: none; padding: 8px 15px; border-radius: 20px; cursor: pointer; transition: all 0.3s ease; } .example-btn:hover { background: #2980b9; transform: translateY(-2px); } #submit-btn { background: linear-gradient(135deg, #27ae60 0%, #2ecc71 100%); color: white; border: none; padding: 12px 30px; border-radius: 25px; font-weight: bold; transition: all 0.3s ease; } #submit-btn:hover { transform: translateY(-2px); box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2); } #clear-btn { background: linear-gradient(135deg, #e74c3c 0%, #c0392b 100%); color: white; border: none; padding: 12px 30px; border-radius: 25px; font-weight: bold; transition: all 0.3s ease; } #clear-btn:hover { transform: translateY(-2px); box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2); } """ # Judge image URL (you can replace this with your own image) judge_image = "https://images.unsplash.com/photo-1589829545856-d10d557cf95f?ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&auto=format&fit=crop&w=500&q=80" # Create the enhanced interface with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo: # Header section with judge image and title with gr.Column(elem_id="header"): gr.HTML(f"""
Justice GPT

⚖️ JusticeGPT ⚖️

Your AI Assistant with Judicial Wisdom

""") # Main chat area chatbot = gr.Chatbot( label="JusticeGPT Conversation", height=500, show_copy_button=True, show_share_button=True ) # Input area with gr.Row(): msg = gr.Textbox( label="Your Question", placeholder="Ask JusticeGPT anything in Urdu or English...", lines=2, max_lines=5, scale=4, container=False, elem_id="input-box" ) # Buttons with gr.Row(): submit_btn = gr.Button("⚡ Submit Question", elem_id="submit-btn", scale=1) clear_btn = gr.Button("🗑️ Clear Chat", elem_id="clear-btn", scale=1) # Examples section with gr.Column(elem_id="examples"): gr.Markdown("### 💡 Example Questions") with gr.Row(): gr.Examples( examples=[ "شہر کراچی کی کیا اہمیت ہے؟", "امریکی آئین کی بنیادی خصوصیات کیا ہیں؟", "کیا امتحان میں نقل کرنا جائز ہے؟", "What are the principles of justice?", "How does the judicial system work?", "Explain the concept of fairness in law" ], inputs=msg, label="Click any example to try:", examples_per_page=6 ) # Event handlers def respond(message, chat_history): bot_message = "" for response in generate_response(message, chat_history): bot_message = response chat_history.append((message, bot_message)) return "", chat_history def clear_chat(): return [] # Connect the interface submit_btn.click( respond, [msg, chatbot], [msg, chatbot] ) msg.submit( respond, [msg, chatbot], [msg, chatbot] ) clear_btn.click( clear_chat, outputs=chatbot ) # Launch the interface if __name__ == "__main__": demo.launch( share=True, server_name="0.0.0.0", server_port=7860, show_error=True )