Dhruv Pawar commited on
Commit
5e0ae28
·
0 Parent(s):

Initial commit: add all project files

Browse files
.env.example ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ==================== API KEYS ====================
2
+ GROQ_API_KEY=your_groq_api_key_here
3
+
4
+
5
+ # ==================== APPLICATION CONFIG ====================
6
+ APP_ENV=development # development or production
7
+ LOG_LEVEL=INFO # DEBUG, INFO, WARNING, ERROR, CRITICAL
8
+
9
+
10
+ # ==================== CONVERSATION SETTINGS ====================
11
+ MAX_HISTORY_LENGTH=10
12
+ MAX_CONVERSATION_STORAGE=1000
13
+
14
+
15
+ # ==================== MODEL PARAMETERS ====================
16
+ DEFAULT_TEMPERATURE=0.7
17
+ DEFAULT_MAX_TOKENS=4000
18
+
19
+
20
+ # ==================== API SETTINGS ====================
21
+ REQUEST_TIMEOUT=60
22
+ MAX_RETRIES=3
23
+ RETRY_DELAY=1.0
24
+
25
+
26
+ # ==================== CACHE SETTINGS ====================
27
+ ENABLE_CACHE=true
28
+ CACHE_SIZE=100
29
+ CACHE_TTL=3600 # seconds (1 hour)
30
+
31
+
32
+ # ==================== RATE LIMITING ====================
33
+ ENABLE_RATE_LIMITING=true
34
+ RATE_LIMIT_REQUESTS=50
35
+ RATE_LIMIT_WINDOW=60 # seconds
36
+
37
+
38
+ # ==================== FILE STORAGE ====================
39
+ EXPORT_DIR=exports
40
+ BACKUP_DIR=backups
41
+
42
+
43
+ # ==================== UI THEME ====================
44
+ THEME_PRIMARY=purple
45
+ THEME_SECONDARY=blue
46
+
47
+
48
+ # ==================== PERFORMANCE ====================
49
+ MAX_WORKERS=3
50
+
51
+
52
+ # ==================== FEATURE FLAGS ====================
53
+ ENABLE_PDF_EXPORT=true
.gitignore ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual Environment
24
+ venv/
25
+ env/
26
+ ENV/
27
+ .venv
28
+
29
+ # Environment Variables
30
+ .env
31
+ .env.local
32
+
33
+ # IDEs
34
+ .vscode/
35
+ .idea/
36
+ *.swp
37
+ *.swo
38
+ *~
39
+
40
+ # Logs
41
+ logs/
42
+ *.log
43
+
44
+ # Exports and Backups
45
+ exports/
46
+ backups/
47
+
48
+ # OS
49
+ .DS_Store
50
+ Thumbs.db
51
+
52
+ # Testing
53
+ .pytest_cache/
54
+ .coverage
55
+ htmlcov/
56
+ .tox/
57
+
58
+ # MyPy
59
+ .mypy_cache/
60
+ .dmypy.json
61
+ dmypy.json
62
+
63
+ # Jupyter Notebook
64
+ .ipynb_checkpoints
65
+
66
+ # Gradio
67
+ gradio_cached_examples/
68
+ flagged/
Dockerfile ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ FROM python:3.11-slim
2
+ WORKDIR /app
3
+ COPY . /app
README.md ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🧠 Advanced AI Reasoning System Pro
2
+
3
+ A production-ready AI reasoning application built with Gradio and Groq API, featuring multiple research-backed reasoning methodologies, advanced caching, real-time analytics, and comprehensive export capabilities.
4
+
5
+ ## ✨ Features
6
+
7
+ ### 🔬 Research-Backed Reasoning Modes
8
+ - **Tree of Thoughts (ToT)** - Systematic exploration of multiple reasoning paths
9
+ - **Chain of Thought (CoT)** - Step-by-step logical reasoning
10
+ - **Self-Consistency** - Multiple solution paths with consensus
11
+ - **Reflexion** - Self-critique and iterative improvement
12
+ - **Multi-Agent Debate** - Multiple perspectives and synthesis
13
+ - **Analogical Reasoning** - Problem-solving through analogies
14
+
15
+ ### ⚡ Performance Features
16
+ - **Response Caching** - LRU cache with TTL for faster responses
17
+ - **Rate Limiting** - Token bucket algorithm to prevent API abuse
18
+ - **Streaming Responses** - Real-time response generation
19
+ - **Multi-threading** - Concurrent request handling
20
+
21
+ ### 📊 Analytics & Monitoring
22
+ - **Real-time Metrics** - Live performance tracking
23
+ - **Conversation Analytics** - Usage patterns and insights
24
+ - **Cache Statistics** - Hit rates and performance metrics
25
+ - **Session Tracking** - Unique session identification
26
+
27
+ ### 📤 Export Capabilities
28
+ - **Multiple Formats** - JSON, Markdown, TXT, PDF
29
+ - **Metadata Support** - Timestamps, models, performance data
30
+ - **Automatic Backups** - Periodic conversation backups
31
+ - **Search Functionality** - Keyword-based conversation search
32
+
33
+ ### 🎨 User Interface
34
+ - **Collapsible Sidebar** - Clean, distraction-free workspace
35
+ - **Multiple Tabs** - Reasoning, Export, Analytics, Settings
36
+ - **Live Metrics Display** - Real-time performance indicators
37
+ - **Responsive Design** - Mobile-friendly interface
38
+
39
+ ## 🚀 Quick Start
40
+
41
+ ### Prerequisites
42
+ - Python 3.9+
43
+ - Groq API Key ([Get one here](https://console.groq.com))
44
+
45
+ ### Installation
46
+
47
+ 1. **Clone the repository**
48
+ 1. **Clone the repository**
49
+ git clone <repository-url>
50
+ cd reasoning-system-pro
51
+
52
+ text
53
+
54
+ 2. **Create virtual environment**
55
+ python -m venv venv
56
+ source venv/bin/activate # On Windows: venv\Scripts\activate
57
+
58
+ text
59
+
60
+ 3. **Install dependencies**
61
+ pip install -r requirements.txt
62
+
63
+ text
64
+
65
+ 4. **Configure environment**
66
+ cp .env.example .env
67
+
68
+ Edit .env and add your GROQ_API_KEY
69
+ text
70
+
71
+ 5. **Run the application**
72
+ python main.py
73
+
74
+ text
75
+
76
+ 6. **Open in browser**
77
+ http://localhost:7860
78
+
79
+ text
80
+
81
+ ## 📁 Project Structure
82
+
83
+ reasoning-system-pro/
84
+ ├── src/
85
+ │ ├── api/ # API client management
86
+ │ ├── core/ # Core business logic
87
+ │ ├── models/ # Data models
88
+ │ ├── services/ # Business services
89
+ │ ├── ui/ # Gradio interface
90
+ │ ├── utils/ # Utilities
91
+ │ └── config/ # Configuration
92
+ ├── tests/ # Test suite
93
+ ├── exports/ # Generated exports
94
+ ├── backups/ # Conversation backups
95
+ ├── logs/ # Application logs
96
+ ├── main.py # Entry point
97
+ ├── requirements.txt # Dependencies
98
+ └── .env # Environment config
99
+
100
+ text
101
+
102
+ ## 🔧 Configuration
103
+
104
+ ### Environment Variables
105
+
106
+ Key configuration options in `.env`:
107
+
108
+ API
109
+ GROQ_API_KEY=your_key_here
110
+
111
+ Performance
112
+ CACHE_SIZE=100
113
+ RATE_LIMIT_REQUESTS=50
114
+
115
+ Features
116
+ ENABLE_PDF_EXPORT=true
117
+ ENABLE_CACHE=true
118
+
119
+ text
120
+
121
+ ### Advanced Configuration
122
+
123
+ Edit `src/config/settings.py` for fine-tuned control over:
124
+ - Cache TTL and size
125
+ - Rate limiting parameters
126
+ - Request timeouts and retries
127
+ - File storage locations
128
+ - UI themes
129
+
130
+ ## 📖 Usage Examples
131
+
132
+ ### Basic Query
133
+ Simply type your question in the chat interface
134
+ "Explain quantum entanglement using the Tree of Thoughts method"
135
+
136
+ text
137
+
138
+ ### With Self-Critique
139
+ Enable "Self-Critique" checkbox for automatic validation and refinement.
140
+
141
+ ### Custom Templates
142
+ Select from pre-built templates:
143
+ - Research Analysis
144
+ - Problem Solving
145
+ - Code Review
146
+ - Writing Enhancement
147
+ - Debate Analysis
148
+ - Learning Explanation
149
+
150
+ ## 🧪 Testing
151
+
152
+ Run all tests
153
+ pytest
154
+
155
+ With coverage
156
+ pytest --cov=src --cov-report=html
157
+
158
+ Specific test file
159
+ pytest tests/test_reasoner.py
160
+
161
+ text
162
+
163
+ ## 📊 Available Models
164
+
165
+ ### Meta / Llama Models
166
+ - `llama-3.3-70b-versatile` - Best overall performance
167
+ - `llama-3.1-8b-instant` - Ultra-fast responses
168
+ - `llama-4-maverick-17b-128k` - Experimental long context
169
+
170
+ ### DeepSeek Models
171
+ - `deepseek-r1-distill-llama-70b` - Optimized reasoning
172
+
173
+ ### Mixtral Models
174
+ - `mixtral-8x7b-32768` - Long context specialist
175
+
176
+ And many more! See `src/config/constants.py` for the full list.
177
+
178
+ ## 🛠️ Development
179
+
180
+ ### Code Style
181
+ Format code
182
+ black src/
183
+
184
+ Check linting
185
+ flake8 src/
186
+
187
+ Type checking
188
+ mypy src/
189
+
190
+ text
191
+
192
+ ### Adding New Reasoning Modes
193
+
194
+ 1. Add to `src/config/constants.py`:
195
+ class ReasoningMode(Enum):
196
+ YOUR_MODE = "Your Mode Name"
197
+
198
+ text
199
+
200
+ 2. Add system prompt in `src/core/prompt_engine.py`:
201
+ SYSTEM_PROMPTS = {
202
+ ReasoningMode.YOUR_MODE: "Your prompt here..."
203
+ }
204
+
205
+ text
206
+
207
+ ## 📝 API Documentation
208
+
209
+ ### Core Classes
210
+
211
+ #### AdvancedReasoner
212
+ Main reasoning engine with streaming support.
213
+
214
+ reasoner = AdvancedReasoner()
215
+ for response in reasoner.generate_response(query, ...):
216
+ print(response)
217
+
218
+ text
219
+
220
+ #### ResponseCache
221
+ Thread-safe LRU cache with TTL.
222
+
223
+ cache = ResponseCache(maxsize=100, ttl=3600)
224
+ cache.set(key, value)
225
+ result = cache.get(key)
226
+
227
+ text
228
+
229
+ ## 🐛 Troubleshooting
230
+
231
+ ### Common Issues
232
+
233
+ **Issue: "GROQ_API_KEY not found"**
234
+ - Solution: Ensure `.env` file exists and contains `GROQ_API_KEY=your_key`
235
+
236
+ **Issue: PDF export fails**
237
+ - Solution: Install reportlab: `pip install reportlab`
238
+
239
+ **Issue: Rate limit errors**
240
+ - Solution: Increase `RATE_LIMIT_WINDOW` in `.env`
241
+
242
+ ## 🤝 Contributing
243
+
244
+ 1. Fork the repository
245
+ 2. Create a feature branch
246
+ 3. Make your changes
247
+ 4. Add tests
248
+ 5. Submit a pull request
249
+
250
+ ## 📄 License
251
+
252
+ MIT License - See LICENSE file for details
253
+
254
+ ## 🙏 Acknowledgments
255
+
256
+ - **Yao et al. (2023)** - Tree of Thoughts
257
+ - **Wei et al. (2022)** - Chain of Thought
258
+ - **Bai et al. (2022)** - Constitutional AI
259
+ - **Groq** - High-speed LLM inference
260
+
261
+ ## 📧 Support
262
+
263
+ For issues and questions:
264
+ - Create an issue on GitHub
265
+ - Check existing documentation
266
+ - Review logs in `logs/` directory
267
+
268
+ ---
269
+
270
+ **Built with ❤️ using Gradio and Groq**
docker-compose.yml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ version: '3.8'
2
+ services:
3
+ app:
4
+ build: .
docs/API.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # API
2
+
docs/ARCHITECTURE.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Architecture
2
+
docs/DEPLOYMENT.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Deployment Instructions
2
+
main.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Main application entry point
3
+ """
4
+ from src.config.env import load_environment
5
+ from src.config.settings import AppConfig
6
+ from src.config.constants import ReasoningMode, ModelConfig
7
+ from src.ui.app import create_ui
8
+ from src.utils.logger import logger
9
+
10
+
11
+ def main():
12
+ """
13
+ 🚀 APPLICATION ENTRY POINT
14
+ """
15
+ try:
16
+ # Load environment variables
17
+ load_environment()
18
+
19
+ # Print startup information
20
+ logger.info("="*60)
21
+ logger.info("🚀 Starting Advanced AI Reasoning System Pro...")
22
+ logger.info(f"🌍 Environment: {AppConfig.ENV}")
23
+ logger.info(f"🎨 Theme: {AppConfig.THEME_PRIMARY}/{AppConfig.THEME_SECONDARY}")
24
+ logger.info(f"🤖 Available Models: {len(ModelConfig)}")
25
+ logger.info(f"🧠 Reasoning Modes: {len(ReasoningMode)}")
26
+ logger.info(f"💾 Cache: {AppConfig.CACHE_SIZE} entries")
27
+ logger.info(f"⏱️ Rate Limit: {AppConfig.RATE_LIMIT_REQUESTS} req/{AppConfig.RATE_LIMIT_WINDOW}s")
28
+ logger.info("🎛️ Features: Collapsible Sidebar, PDF Export, Real-time Analytics")
29
+ logger.info("="*60)
30
+
31
+ # Create and launch UI
32
+ demo = create_ui()
33
+ demo.launch(
34
+ share=False,
35
+ server_name="0.0.0.0",
36
+ server_port=7860,
37
+ show_error=True,
38
+ show_api=False,
39
+ favicon_path=None,
40
+ max_threads=AppConfig.MAX_WORKERS
41
+ )
42
+
43
+ except KeyboardInterrupt:
44
+ logger.info("⏹️ Application stopped by user (Ctrl+C)")
45
+ except Exception as e:
46
+ logger.critical(f"❌ Failed to start application: {e}", exc_info=True)
47
+ raise
48
+ finally:
49
+ logger.info("👋 Shutting down gracefully...")
50
+
51
+
52
+ if __name__ == "__main__":
53
+ main()
pytest.ini ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [pytest]
2
+ python_files = test_*.py
requirements-dev.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Development and Testing Dependencies
2
+ -r requirements.txt
3
+
4
+ # Testing
5
+ pytest>=7.4.0
6
+ pytest-cov>=4.1.0
7
+ pytest-asyncio>=0.21.0
8
+ pytest-mock>=3.11.0
9
+
10
+ # Code Quality
11
+ black>=23.0.0
12
+ flake8>=6.0.0
13
+ mypy>=1.5.0
14
+ pylint>=3.0.0
15
+ isort>=5.12.0
16
+
17
+ # Documentation
18
+ sphinx>=7.0.0
19
+ sphinx-rtd-theme>=1.3.0
20
+
21
+ # Type Stubs
22
+ types-requests>=2.31.0
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core Dependencies
2
+ gradio>=4.0.0
3
+ groq>=0.4.0
4
+ python-dotenv>=1.0.0
5
+
6
+ # PDF Export (Optional but recommended)
7
+ reportlab>=4.0.0
8
+
9
+ # Development Dependencies (move to requirements-dev.txt if needed)
10
+ pytest>=7.4.0
11
+ pytest-cov>=4.1.0
12
+ black>=23.0.0
13
+ flake8>=6.0.0
14
+ mypy>=1.5.0
scripts/backup.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #!/bin/bash
2
+ # backup script
scripts/deploy.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #!/bin/bash
2
+ # deploy script
scripts/setup.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #!/bin/bash
2
+ # setup script
src/api/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """
2
+ API layer package initialization
3
+ """
4
+ from .groq_client import GroqClientManager
5
+
6
+ __all__ = ['GroqClientManager']
src/api/endpoints.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """API endpoints (placeholder)."""
src/api/groq_client.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Groq API client manager with singleton pattern
3
+ """
4
+ import os
5
+ import threading
6
+ from typing import Optional
7
+ from groq import Groq
8
+ from src.utils.logger import logger
9
+ from src.config.settings import AppConfig
10
+
11
+
12
+ class GroqClientManager:
13
+ """
14
+ 🔌 SINGLETON GROQ CLIENT MANAGER
15
+ Thread-safe singleton pattern for API client
16
+ """
17
+ _instance: Optional['GroqClientManager'] = None
18
+ _lock = threading.Lock()
19
+ _client: Optional[Groq] = None
20
+ _initialized = False
21
+
22
+ def __new__(cls):
23
+ if cls._instance is None:
24
+ with cls._lock:
25
+ if cls._instance is None:
26
+ cls._instance = super().__new__(cls)
27
+ return cls._instance
28
+
29
+ def __init__(self):
30
+ if not self._initialized:
31
+ with self._lock:
32
+ if not self._initialized:
33
+ self._initialize_client()
34
+ self._initialized = True
35
+
36
+ def _initialize_client(self) -> None:
37
+ """
38
+ 🔧 INITIALIZE GROQ CLIENT
39
+ """
40
+ api_key = os.getenv('GROQ_API_KEY')
41
+
42
+ if not api_key:
43
+ logger.error("❌ GROQ_API_KEY not found in environment variables")
44
+ raise EnvironmentError(
45
+ "GROQ_API_KEY not set. Please add it to your .env file:\n"
46
+ "GROQ_API_KEY=your_api_key_here"
47
+ )
48
+
49
+ try:
50
+ self._client = Groq(
51
+ api_key=api_key,
52
+ timeout=AppConfig.REQUEST_TIMEOUT
53
+ )
54
+ logger.info("✅ Groq client initialized successfully")
55
+ except Exception as e:
56
+ logger.error(f"❌ Failed to initialize Groq client: {e}")
57
+ raise
58
+
59
+ @property
60
+ def client(self) -> Groq:
61
+ """
62
+ ✅ GET GROQ CLIENT INSTANCE
63
+ """
64
+ if self._client is None:
65
+ raise RuntimeError("Groq client not initialized")
66
+ return self._client
67
+
68
+ def health_check(self) -> bool:
69
+ """
70
+ 🏥 HEALTH CHECK
71
+ """
72
+ try:
73
+ if self._client is None:
74
+ logger.warning("⚠️ Health check failed: Client not initialized")
75
+ return False
76
+
77
+ logger.debug("✅ Health check passed")
78
+ return True
79
+ except Exception as e:
80
+ logger.error(f"❌ Health check failed: {e}")
81
+ return False
82
+
83
+ def reset(self) -> None:
84
+ """
85
+ 🔄 RESET CLIENT (FOR TESTING)
86
+ """
87
+ with self._lock:
88
+ self._client = None
89
+ self._initialized = False
90
+ logger.info("🔄 Groq client reset")
src/config/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Configuration package initialization
3
+ """
4
+ from .settings import AppConfig
5
+ from .constants import ReasoningMode, ModelConfig
6
+ from .env import load_environment
7
+
8
+ __all__ = ['AppConfig', 'ReasoningMode', 'ModelConfig', 'load_environment']
src/config/constants.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Application constants and enums
3
+ """
4
+ from enum import Enum
5
+
6
+
7
+ class ReasoningMode(Enum):
8
+ """
9
+ 🧠 RESEARCH-ALIGNED REASONING METHODOLOGIES
10
+ Based on academic papers and proven techniques
11
+ """
12
+ TREE_OF_THOUGHTS = "Tree of Thoughts (ToT)"
13
+ CHAIN_OF_THOUGHT = "Chain of Thought (CoT)"
14
+ SELF_CONSISTENCY = "Self-Consistency Sampling"
15
+ REFLEXION = "Reflexion + Self-Correction"
16
+ DEBATE = "Multi-Agent Debate"
17
+ ANALOGICAL = "Analogical Reasoning"
18
+
19
+ def __str__(self) -> str:
20
+ return self.value
21
+
22
+ @classmethod
23
+ def get_description(cls, mode: 'ReasoningMode') -> str:
24
+ """Get detailed description of reasoning mode"""
25
+ descriptions = {
26
+ cls.TREE_OF_THOUGHTS: "Explores multiple reasoning paths systematically before converging on the best solution.",
27
+ cls.CHAIN_OF_THOUGHT: "Breaks down complex problems into clear, logical steps with explicit reasoning.",
28
+ cls.SELF_CONSISTENCY: "Generates multiple independent solutions and identifies the most consistent answer.",
29
+ cls.REFLEXION: "Solves problems, critiques reasoning, and refines solutions iteratively.",
30
+ cls.DEBATE: "Presents multiple perspectives and synthesizes the strongest arguments.",
31
+ cls.ANALOGICAL: "Finds similar problems and applies their solutions to the current problem."
32
+ }
33
+ return descriptions.get(mode, "Advanced reasoning mode")
34
+
35
+
36
+ class ModelConfig(Enum):
37
+ """
38
+ 🤖 AVAILABLE GROQ MODELS
39
+ """
40
+
41
+ # === Meta / Llama Models ===
42
+ LLAMA_70B = ("llama-3.3-70b-versatile", 70, 8000, "Best overall performance", "Meta")
43
+ LLAMA_70B_V31 = ("llama-3.1-70b-versatile", 70, 8000, "Stable production model", "Meta")
44
+ LLAMA_3_1_8B_INSTANT = ("llama-3.1-8b-instant", 8, 131072, "Ultra-fast responses", "Meta")
45
+ LLAMA_4_MAVERICK_17B = ("meta-llama/llama-4-maverick-17b-128k", 17, 131072, "Llama 4 experimental", "Meta")
46
+ LLAMA_4_SCOUT_17B = ("meta-llama/llama-4-scout-17b-16e-instruct", 17, 16384, "Llama 4 scout model", "Meta")
47
+ LLAMA_GUARD_4_12B = ("meta-llama/llama-guard-4-12b", 12, 8192, "Content moderation", "Meta")
48
+ LLAMA_PROMPT_GUARD_2_22M = ("meta-llama/llama-prompt-guard-2-22m", 0, 8192, "Prompt injection detection (22M)", "Meta")
49
+ LLAMA_PROMPT_GUARD_2_86M = ("meta-llama/llama-prompt-guard-2-86m", 0, 8192, "Prompt injection detection (86M)", "Meta")
50
+
51
+ # === DeepSeek Models ===
52
+ DEEPSEEK_70B = ("deepseek-r1-distill-llama-70b", 70, 8000, "Optimized reasoning", "DeepSeek")
53
+
54
+ # === Mixtral Models ===
55
+ MIXTRAL_8X7B = ("mixtral-8x7b-32768", 47, 32768, "Long context specialist", "Mixtral")
56
+
57
+ # === Google Gemma Models ===
58
+ GEMMA_9B = ("gemma2-9b-it", 9, 8192, "Fast and efficient", "Google")
59
+
60
+ # === Moonshot AI Models ===
61
+ KIMI_K2_INSTRUCT_DEPRECATED = ("moonshotai/kimi-k2-instruct", 0, 200000, "Ultra-long context (Deprecated)", "Moonshot")
62
+ KIMI_K2_INSTRUCT_0905 = ("moonshotai/kimi-k2-instruct-0905", 0, 200000, "Ultra-long context specialist", "Moonshot")
63
+
64
+ # === OpenAI Models ===
65
+ GPT_OSS_120B = ("openai/gpt-oss-120b", 120, 8192, "Large open source model", "OpenAI")
66
+ GPT_OSS_20B = ("openai/gpt-oss-20b", 20, 8192, "Medium open source model", "OpenAI")
67
+
68
+ # === Qwen Models ===
69
+ QWEN3_32B = ("qwen/qwen3-32b", 32, 32768, "Qwen 3 multilingual", "Qwen")
70
+
71
+ # === Groq Compound Models ===
72
+ GROQ_COMPOUND = ("groq/compound", 0, 8192, "Groq optimized compound", "Groq")
73
+ GROQ_COMPOUND_MINI = ("groq/compound-mini", 0, 8192, "Groq mini compound", "Groq")
74
+
75
+ def __init__(self, model_id: str, params_b: int, max_context: int, description: str, provider: str):
76
+ self.model_id = model_id
77
+ self.params_b = params_b
78
+ self.max_context = max_context
79
+ self.description = description
80
+ self.provider = provider
81
+
82
+ def __str__(self) -> str:
83
+ return f"{self.provider} - {self.model_id}"
84
+
85
+ @classmethod
86
+ def get_by_id(cls, model_id: str) -> 'ModelConfig':
87
+ """Get model config by ID"""
88
+ for model in cls:
89
+ if model.model_id == model_id:
90
+ return model
91
+ raise ValueError(f"Model {model_id} not found")
92
+
93
+ @classmethod
94
+ def get_by_provider(cls, provider: str) -> list:
95
+ """Get all models by provider"""
96
+ return [model for model in cls if model.provider == provider]
97
+
98
+ @classmethod
99
+ def get_recommended(cls) -> 'ModelConfig':
100
+ """Get recommended default model"""
101
+ return cls.LLAMA_70B
102
+
103
+ @property
104
+ def is_long_context(self) -> bool:
105
+ """Check if model supports long context (>16k)"""
106
+ return self.max_context > 16384
107
+
108
+ @property
109
+ def is_fast(self) -> bool:
110
+ """Check if model is optimized for speed"""
111
+ return "instant" in self.model_id.lower() or self.params_b < 10
src/config/env.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Environment variable loader
3
+ """
4
+ import os
5
+ from pathlib import Path
6
+ from dotenv import load_dotenv
7
+ from src.utils.logger import logger
8
+
9
+
10
+ def load_environment() -> None:
11
+ """
12
+ Load environment variables from .env file
13
+ """
14
+ env_path = Path('.env')
15
+
16
+ if env_path.exists():
17
+ load_dotenv(env_path)
18
+ logger.info("✅ Environment variables loaded from .env")
19
+ else:
20
+ logger.warning("⚠️ No .env file found. Using default configuration.")
21
+
22
+ # Validate required environment variables
23
+ required_vars = ['GROQ_API_KEY']
24
+ missing_vars = [var for var in required_vars if not os.getenv(var)]
25
+
26
+ if missing_vars:
27
+ logger.error(f"❌ Missing required environment variables: {', '.join(missing_vars)}")
28
+ raise EnvironmentError(
29
+ f"Missing required environment variables: {', '.join(missing_vars)}. "
30
+ "Please create a .env file with these variables."
31
+ )
32
+
33
+ logger.info("✅ All required environment variables validated")
src/config/settings.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Application configuration settings
3
+ """
4
+ import os
5
+ from pathlib import Path
6
+ from typing import ClassVar
7
+
8
+
9
+ class AppConfig:
10
+ """
11
+ 🎛️ CENTRALIZED APPLICATION CONFIGURATION
12
+ """
13
+
14
+ # Environment
15
+ ENV: ClassVar[str] = os.getenv('APP_ENV', 'development')
16
+ DEBUG: ClassVar[bool] = ENV == 'development'
17
+
18
+ # Conversation Settings
19
+ MAX_HISTORY_LENGTH: ClassVar[int] = int(os.getenv('MAX_HISTORY_LENGTH', '10'))
20
+ MAX_CONVERSATION_STORAGE: ClassVar[int] = int(os.getenv('MAX_CONVERSATION_STORAGE', '1000'))
21
+
22
+ # Model Parameters
23
+ DEFAULT_TEMPERATURE: ClassVar[float] = float(os.getenv('DEFAULT_TEMPERATURE', '0.7'))
24
+ MIN_TEMPERATURE: ClassVar[float] = 0.0
25
+ MAX_TEMPERATURE: ClassVar[float] = 2.0
26
+
27
+ DEFAULT_MAX_TOKENS: ClassVar[int] = int(os.getenv('DEFAULT_MAX_TOKENS', '4000'))
28
+ MIN_TOKENS: ClassVar[int] = 100
29
+ MAX_TOKENS: ClassVar[int] = 32000
30
+
31
+ # API Settings
32
+ REQUEST_TIMEOUT: ClassVar[int] = int(os.getenv('REQUEST_TIMEOUT', '60'))
33
+ MAX_RETRIES: ClassVar[int] = int(os.getenv('MAX_RETRIES', '3'))
34
+ RETRY_DELAY: ClassVar[float] = float(os.getenv('RETRY_DELAY', '1.0'))
35
+
36
+ # Cache Settings
37
+ CACHE_SIZE: ClassVar[int] = int(os.getenv('CACHE_SIZE', '100'))
38
+ CACHE_TTL: ClassVar[int] = int(os.getenv('CACHE_TTL', '3600'))
39
+ ENABLE_CACHE: ClassVar[bool] = os.getenv('ENABLE_CACHE', 'true').lower() == 'true'
40
+
41
+ # Rate Limiting
42
+ RATE_LIMIT_REQUESTS: ClassVar[int] = int(os.getenv('RATE_LIMIT_REQUESTS', '50'))
43
+ RATE_LIMIT_WINDOW: ClassVar[int] = int(os.getenv('RATE_LIMIT_WINDOW', '60'))
44
+ ENABLE_RATE_LIMITING: ClassVar[bool] = os.getenv('ENABLE_RATE_LIMITING', 'true').lower() == 'true'
45
+
46
+ # File Storage
47
+ BASE_DIR: ClassVar[Path] = Path(__file__).parent.parent.parent
48
+ EXPORT_DIR: ClassVar[Path] = BASE_DIR / os.getenv('EXPORT_DIR', 'exports')
49
+ BACKUP_DIR: ClassVar[Path] = BASE_DIR / os.getenv('BACKUP_DIR', 'backups')
50
+ LOG_DIR: ClassVar[Path] = BASE_DIR / 'logs'
51
+ MAX_EXPORT_SIZE_MB: ClassVar[int] = 50
52
+
53
+ # UI Theme
54
+ THEME_PRIMARY: ClassVar[str] = os.getenv('THEME_PRIMARY', 'purple')
55
+ THEME_SECONDARY: ClassVar[str] = os.getenv('THEME_SECONDARY', 'blue')
56
+
57
+ # Analytics
58
+ AUTO_SAVE_INTERVAL: ClassVar[int] = 300
59
+ ENABLE_ANALYTICS: ClassVar[bool] = True
60
+ ANALYTICS_BATCH_SIZE: ClassVar[int] = 10
61
+
62
+ # Performance
63
+ MAX_WORKERS: ClassVar[int] = int(os.getenv('MAX_WORKERS', '3'))
64
+ ENABLE_PARALLEL_PROCESSING: ClassVar[bool] = True
65
+
66
+ # Security
67
+ MAX_INPUT_LENGTH: ClassVar[int] = 10000
68
+ ENABLE_XSS_PROTECTION: ClassVar[bool] = True
69
+ ALLOWED_EXPORT_FORMATS: ClassVar[list] = ['json', 'markdown', 'txt', 'pdf']
70
+
71
+ # Feature Flags
72
+ ENABLE_PDF_EXPORT: ClassVar[bool] = os.getenv('ENABLE_PDF_EXPORT', 'true').lower() == 'true'
73
+ ENABLE_SELF_CRITIQUE: ClassVar[bool] = True
74
+ ENABLE_SIDEBAR_TOGGLE: ClassVar[bool] = True
75
+
76
+ @classmethod
77
+ def validate(cls) -> bool:
78
+ """Validates all configuration parameters"""
79
+ # Import logger here to avoid circular import
80
+ from src.utils.logger import logger
81
+
82
+ try:
83
+ assert cls.MIN_TEMPERATURE <= cls.DEFAULT_TEMPERATURE <= cls.MAX_TEMPERATURE
84
+ assert cls.MIN_TOKENS <= cls.DEFAULT_MAX_TOKENS <= cls.MAX_TOKENS
85
+ assert cls.MAX_HISTORY_LENGTH > 0
86
+ assert cls.MAX_CONVERSATION_STORAGE >= cls.MAX_HISTORY_LENGTH
87
+ assert cls.CACHE_SIZE > 0 and cls.CACHE_TTL > 0
88
+ assert cls.RATE_LIMIT_REQUESTS > 0 and cls.RATE_LIMIT_WINDOW > 0
89
+ assert cls.REQUEST_TIMEOUT > 0 and cls.MAX_RETRIES >= 0
90
+ assert 1 <= cls.MAX_WORKERS <= 10
91
+ assert cls.MAX_INPUT_LENGTH >= 1000
92
+
93
+ logger.info("✅ Configuration validation passed")
94
+ return True
95
+ except AssertionError as e:
96
+ logger.error(f"❌ Configuration validation failed: {e}")
97
+ return False
98
+
99
+ @classmethod
100
+ def create_directories(cls) -> None:
101
+ """Creates all required directories"""
102
+ # Import logger here to avoid circular import
103
+ from src.utils.logger import logger
104
+
105
+ directories = [cls.EXPORT_DIR, cls.BACKUP_DIR, cls.LOG_DIR]
106
+
107
+ try:
108
+ for directory in directories:
109
+ directory.mkdir(exist_ok=True, parents=True)
110
+ logger.debug(f"📁 Directory ready: {directory}")
111
+ logger.info("✅ All application directories initialized")
112
+ except Exception as e:
113
+ logger.error(f"❌ Failed to create directories: {e}")
114
+ raise
115
+
116
+
117
+ # Initialize directories and validate
118
+ AppConfig.create_directories()
119
+ if not AppConfig.validate():
120
+ raise RuntimeError("❌ Configuration validation failed")
src/core/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Core business logic package initialization
3
+ """
4
+ from .reasoner import AdvancedReasoner
5
+ from .prompt_engine import PromptEngine
6
+ from .conversation import ConversationManager
7
+
8
+ __all__ = ['AdvancedReasoner', 'PromptEngine', 'ConversationManager']
src/core/conversation.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation management and history handling
3
+ """
4
+ import threading
5
+ from collections import deque, defaultdict
6
+ from typing import List, Dict, Optional
7
+ from src.models.entry import ConversationEntry
8
+ from src.config.settings import AppConfig
9
+ from src.utils.logger import logger
10
+
11
+
12
+ class ConversationManager:
13
+ """
14
+ 💬 THREAD-SAFE CONVERSATION MANAGER
15
+ Handles conversation history with automatic size management
16
+ """
17
+
18
+ def __init__(self):
19
+ self.conversation_history: deque = deque(maxlen=AppConfig.MAX_CONVERSATION_STORAGE)
20
+ self.model_usage: Dict[str, int] = defaultdict(int)
21
+ self.mode_usage: Dict[str, int] = defaultdict(int)
22
+ self._lock = threading.Lock()
23
+
24
+ def add_conversation(self, entry: ConversationEntry) -> None:
25
+ """
26
+ ✅ ADD CONVERSATION ENTRY
27
+ """
28
+ with self._lock:
29
+ self.conversation_history.append(entry)
30
+ self.model_usage[entry.model] += 1
31
+ self.mode_usage[entry.reasoning_mode] += 1
32
+ logger.debug(f"💬 Added conversation: {entry.entry_id[:8]}...")
33
+
34
+ def get_history(self, limit: Optional[int] = None) -> List[ConversationEntry]:
35
+ """
36
+ ✅ GET CONVERSATION HISTORY
37
+ """
38
+ with self._lock:
39
+ if limit:
40
+ return list(self.conversation_history)[-limit:]
41
+ return list(self.conversation_history)
42
+
43
+ def clear_history(self) -> None:
44
+ """
45
+ 🗑️ CLEAR CONVERSATION HISTORY
46
+ """
47
+ with self._lock:
48
+ self.conversation_history.clear()
49
+ self.model_usage.clear()
50
+ self.mode_usage.clear()
51
+ logger.info("🗑️ Conversation history cleared")
52
+
53
+ def get_recent_context(self, limit: int = 10) -> List[Dict]:
54
+ """
55
+ ✅ GET RECENT CONTEXT FOR API
56
+ """
57
+ with self._lock:
58
+ recent = list(self.conversation_history)[-limit:]
59
+
60
+ context = []
61
+ for conv in recent:
62
+ context.append({"role": "user", "content": conv.user_message})
63
+ context.append({"role": "assistant", "content": conv.assistant_response})
64
+
65
+ logger.debug(f"📚 Retrieved {len(context)} context messages")
66
+ return context
67
+
68
+ def get_statistics(self) -> Dict:
69
+ """
70
+ 📊 GET CONVERSATION STATISTICS
71
+ """
72
+ with self._lock:
73
+ return {
74
+ 'total_conversations': len(self.conversation_history),
75
+ 'model_usage': dict(self.model_usage),
76
+ 'mode_usage': dict(self.mode_usage),
77
+ 'max_storage': AppConfig.MAX_CONVERSATION_STORAGE
78
+ }
79
+
80
+ def __len__(self) -> int:
81
+ """Get conversation count"""
82
+ with self._lock:
83
+ return len(self.conversation_history)
src/core/prompt_engine.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Centralized prompt management and template system
3
+ """
4
+ from typing import Dict, List, Optional
5
+ from src.config.constants import ReasoningMode
6
+ from src.utils.logger import logger
7
+
8
+
9
+ class PromptEngine:
10
+ """
11
+ 📝 CENTRALIZED PROMPT MANAGEMENT
12
+ Research-backed prompt templates for different reasoning modes
13
+ """
14
+
15
+ # System prompts for each reasoning mode
16
+ SYSTEM_PROMPTS: Dict[ReasoningMode, str] = {
17
+ ReasoningMode.TREE_OF_THOUGHTS: """You are an advanced AI using Tree of Thoughts reasoning (Yao et al., 2023).
18
+
19
+ **Instructions:**
20
+ 1. Generate 3 distinct reasoning paths
21
+ 2. Evaluate each path's viability
22
+ 3. Select the most promising path
23
+ 4. Expand and refine iteratively
24
+ 5. Present the best solution with reasoning trace
25
+
26
+ Be systematic, thorough, and show your branching logic.""",
27
+
28
+ ReasoningMode.CHAIN_OF_THOUGHT: """You are an advanced AI using Chain of Thought reasoning (Wei et al., 2022).
29
+
30
+ **Instructions:**
31
+ 1. Break down the problem into clear steps
32
+ 2. Show explicit reasoning for each step
33
+ 3. Build on previous conclusions
34
+ 4. Arrive at final answer logically
35
+ 5. Explain your thought process
36
+
37
+ Be clear, sequential, and transparent in your reasoning.""",
38
+
39
+ ReasoningMode.SELF_CONSISTENCY: """You are an advanced AI using Self-Consistency sampling (Wang et al., 2022).
40
+
41
+ **Instructions:**
42
+ 1. Generate 3 independent solution paths
43
+ 2. Solve the problem using different approaches
44
+ 3. Compare solutions for consistency
45
+ 4. Identify the most reliable answer
46
+ 5. Present the consensus solution
47
+
48
+ Show multiple perspectives and explain why one answer is most consistent.""",
49
+
50
+ ReasoningMode.REFLEXION: """You are an advanced AI using Reflexion with self-correction (Shinn et al., 2023).
51
+
52
+ **Instructions:**
53
+ 1. Provide initial solution
54
+ 2. Critique your own reasoning
55
+ 3. Identify potential errors or gaps
56
+ 4. Refine and improve solution
57
+ 5. Present corrected answer with reflection
58
+
59
+ Be self-critical and show your improvement process.""",
60
+
61
+ ReasoningMode.DEBATE: """You are an advanced AI using Multi-Agent Debate (Du et al., 2023).
62
+
63
+ **Instructions:**
64
+ 1. Present Perspective A with arguments
65
+ 2. Present Perspective B with counterarguments
66
+ 3. Debate key points of disagreement
67
+ 4. Synthesize the strongest arguments
68
+ 5. Conclude with balanced judgment
69
+
70
+ Show multiple viewpoints and reasoned synthesis.""",
71
+
72
+ ReasoningMode.ANALOGICAL: """You are an advanced AI using Analogical Reasoning (Gentner & Forbus, 2011).
73
+
74
+ **Instructions:**
75
+ 1. Identify similar problems or domains
76
+ 2. Map structural similarities
77
+ 3. Transfer solution strategies
78
+ 4. Adapt to current problem context
79
+ 5. Verify applicability
80
+
81
+ Draw meaningful analogies and explain transfer logic."""
82
+ }
83
+
84
+ # Pre-built templates for common tasks
85
+ TEMPLATES: Dict[str, str] = {
86
+ "Custom": "", # User provides their own
87
+
88
+ "Research Analysis": """Analyze the following research question or topic:
89
+
90
+ {query}
91
+
92
+ Provide:
93
+ 1. Current state of knowledge
94
+ 2. Key findings and evidence
95
+ 3. Gaps or limitations
96
+ 4. Future directions
97
+ 5. Practical implications""",
98
+
99
+ "Problem Solving": """Solve the following problem systematically:
100
+
101
+ {query}
102
+
103
+ Include:
104
+ 1. Problem understanding
105
+ 2. Constraints and requirements
106
+ 3. Solution approach
107
+ 4. Step-by-step solution
108
+ 5. Verification""",
109
+
110
+ "Code Review": """Review the following code:
111
+
112
+ {query}
113
+
114
+ Analyze:
115
+ 1. Code quality and readability
116
+ 2. Potential bugs or issues
117
+ 3. Performance considerations
118
+ 4. Best practice recommendations
119
+ 5. Security concerns""",
120
+
121
+ "Writing Enhancement": """Improve the following text:
122
+
123
+ {query}
124
+
125
+ Focus on:
126
+ 1. Clarity and coherence
127
+ 2. Grammar and style
128
+ 3. Structural improvements
129
+ 4. Audience appropriateness
130
+ 5. Enhanced version""",
131
+
132
+ "Debate Analysis": """Analyze the following argument or debate topic:
133
+
134
+ {query}
135
+
136
+ Provide:
137
+ 1. Key arguments for each side
138
+ 2. Evidence evaluation
139
+ 3. Logical fallacies (if any)
140
+ 4. Strongest points
141
+ 5. Balanced conclusion""",
142
+
143
+ "Learning Explanation": """Explain the following concept:
144
+
145
+ {query}
146
+
147
+ Include:
148
+ 1. Simple definition
149
+ 2. Core principles
150
+ 3. Examples and analogies
151
+ 4. Common misconceptions
152
+ 5. Practical applications"""
153
+ }
154
+
155
+ @classmethod
156
+ def get_system_prompt(cls, mode: ReasoningMode) -> str:
157
+ """
158
+ ✅ GET SYSTEM PROMPT FOR REASONING MODE
159
+ """
160
+ prompt = cls.SYSTEM_PROMPTS.get(mode, cls.SYSTEM_PROMPTS[ReasoningMode.CHAIN_OF_THOUGHT])
161
+ logger.debug(f"📝 Retrieved system prompt for mode: {mode}")
162
+ return prompt
163
+
164
+ @classmethod
165
+ def apply_template(cls, template_name: str, query: str) -> str:
166
+ """
167
+ ✅ APPLY PROMPT TEMPLATE
168
+ """
169
+ if template_name not in cls.TEMPLATES:
170
+ logger.warning(f"⚠️ Template '{template_name}' not found, using query as-is")
171
+ return query
172
+
173
+ template = cls.TEMPLATES[template_name]
174
+
175
+ if not template: # Custom template
176
+ return query
177
+
178
+ formatted = template.format(query=query)
179
+ logger.debug(f"📝 Applied template: {template_name}")
180
+ return formatted
181
+
182
+ @classmethod
183
+ def build_messages(cls,
184
+ query: str,
185
+ mode: ReasoningMode,
186
+ template: str = "Custom",
187
+ history: Optional[List[Dict]] = None) -> List[Dict]:
188
+ """
189
+ ✅ BUILD MESSAGE ARRAY FOR API
190
+ """
191
+ messages = [
192
+ {"role": "system", "content": cls.get_system_prompt(mode)}
193
+ ]
194
+
195
+ # Add conversation history
196
+ if history:
197
+ for msg in history[-10:]: # Last 10 messages
198
+ if msg.get("role") in ["user", "assistant"]:
199
+ messages.append({
200
+ "role": msg["role"],
201
+ "content": msg["content"]
202
+ })
203
+
204
+ # Add current query with template
205
+ formatted_query = cls.apply_template(template, query)
206
+ messages.append({"role": "user", "content": formatted_query})
207
+
208
+ logger.debug(f"📝 Built message array with {len(messages)} messages")
209
+ return messages
210
+
211
+ @classmethod
212
+ def get_self_critique_prompt(cls, original_response: str) -> str:
213
+ """
214
+ ✅ GENERATE SELF-CRITIQUE PROMPT
215
+ """
216
+ return f"""Review and critique the following response:
217
+
218
+ {original_response}
219
+
220
+ **Self-Critique Instructions:**
221
+ 1. Identify any factual errors or logical flaws
222
+ 2. Check for completeness and clarity
223
+ 3. Evaluate reasoning quality
224
+ 4. Suggest specific improvements
225
+ 5. Provide refined answer if needed
226
+
227
+ Be thorough and constructive in your critique."""
src/core/reasoner.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Advanced reasoning engine - Main business logic
3
+ """
4
+ import time
5
+ import hashlib
6
+ from typing import Generator, List, Dict, Optional, Any, Tuple
7
+ from src.api.groq_client import GroqClientManager
8
+ from src.core.prompt_engine import PromptEngine
9
+ from src.core.conversation import ConversationManager
10
+ from src.services.cache_service import ResponseCache
11
+ from src.services.rate_limiter import RateLimiter
12
+ from src.services.export_service import ConversationExporter
13
+ from src.services.analytics_service import AnalyticsService
14
+ from src.models.metrics import ConversationMetrics
15
+ from src.models.entry import ConversationEntry
16
+ from src.config.settings import AppConfig
17
+ from src.config.constants import ReasoningMode, ModelConfig
18
+ from src.utils.logger import logger
19
+ from src.utils.decorators import handle_groq_errors, with_rate_limit
20
+ from src.utils.validators import validate_input
21
+ from src.utils.helpers import generate_session_id
22
+
23
+
24
+ class AdvancedReasoner:
25
+ """
26
+ 🧠 ADVANCED REASONING ENGINE
27
+ Main orchestrator for AI reasoning with caching, metrics, and export
28
+ """
29
+
30
+ def __init__(self):
31
+ # Core components
32
+ self.client_manager = GroqClientManager()
33
+ self.conversation_manager = ConversationManager()
34
+ self.prompt_engine = PromptEngine()
35
+
36
+ # Services
37
+ self.cache = ResponseCache(AppConfig.CACHE_SIZE, AppConfig.CACHE_TTL)
38
+ self.rate_limiter = RateLimiter(AppConfig.RATE_LIMIT_REQUESTS, AppConfig.RATE_LIMIT_WINDOW)
39
+ self.exporter = ConversationExporter()
40
+ self.analytics = AnalyticsService()
41
+
42
+ # Metrics and state
43
+ self.metrics = ConversationMetrics()
44
+ self.session_id = generate_session_id()
45
+
46
+ logger.info(f"✅ AdvancedReasoner initialized | Session: {self.session_id[:8]}...")
47
+
48
+ def _generate_cache_key(self, query: str, model: str, mode: str,
49
+ temp: float, tokens: int) -> str:
50
+ """
51
+ 🔑 GENERATE CACHE KEY
52
+ """
53
+ key_string = f"{query}|{model}|{mode}|{temp}|{tokens}"
54
+ return hashlib.sha256(key_string.encode()).hexdigest()
55
+
56
+ @handle_groq_errors(max_retries=AppConfig.MAX_RETRIES, retry_delay=AppConfig.RETRY_DELAY)
57
+ def _call_groq_api(self, messages: List[Dict], model: str,
58
+ temperature: float, max_tokens: int) -> Generator[str, None, None]:
59
+ """
60
+ 🔌 CALL GROQ API WITH STREAMING
61
+ """
62
+ if AppConfig.ENABLE_RATE_LIMITING:
63
+ self.rate_limiter.acquire()
64
+
65
+ client = self.client_manager.client
66
+
67
+ stream = client.chat.completions.create(
68
+ model=model,
69
+ messages=messages,
70
+ temperature=temperature,
71
+ max_tokens=max_tokens,
72
+ stream=True
73
+ )
74
+
75
+ for chunk in stream:
76
+ if chunk.choices[0].delta.content:
77
+ yield chunk.choices[0].delta.content
78
+
79
+ def generate_response(
80
+ self,
81
+ query: str,
82
+ history: List[Dict],
83
+ model: str,
84
+ reasoning_mode: ReasoningMode,
85
+ enable_critique: bool = True,
86
+ temperature: float = 0.7,
87
+ max_tokens: int = 4000,
88
+ template: str = "Custom",
89
+ use_cache: bool = True
90
+ ) -> Generator[str, None, None]:
91
+ """
92
+ 🧠 GENERATE RESPONSE WITH STREAMING
93
+ """
94
+ # Validate input
95
+ is_valid, error_msg = validate_input(query, AppConfig.MAX_INPUT_LENGTH)
96
+ if not is_valid:
97
+ yield f"❌ **Input Error:** {error_msg}"
98
+ return
99
+
100
+ start_time = time.time()
101
+
102
+ # Check cache
103
+ cache_key = self._generate_cache_key(query, model, reasoning_mode.value, temperature, max_tokens)
104
+
105
+ if use_cache and AppConfig.ENABLE_CACHE:
106
+ cached = self.cache.get(cache_key)
107
+ if cached:
108
+ self.metrics.update_cache_stats(hit=True)
109
+ logger.info("✅ Cache hit - returning cached response")
110
+ yield cached
111
+ return
112
+
113
+ self.metrics.update_cache_stats(hit=False)
114
+
115
+ # Build messages
116
+ messages = self.prompt_engine.build_messages(query, reasoning_mode, template, history)
117
+
118
+ # Stream response
119
+ full_response = ""
120
+ try:
121
+ for chunk in self._call_groq_api(messages, model, temperature, max_tokens):
122
+ full_response += chunk
123
+ yield full_response
124
+
125
+ # Self-critique if enabled
126
+ if enable_critique and AppConfig.ENABLE_SELF_CRITIQUE:
127
+ critique_prompt = self.prompt_engine.get_self_critique_prompt(full_response)
128
+ critique_messages = [
129
+ {"role": "system", "content": "You are a critical reviewer."},
130
+ {"role": "user", "content": critique_prompt}
131
+ ]
132
+
133
+ critique_response = ""
134
+ for chunk in self._call_groq_api(critique_messages, model, temperature, max_tokens // 2):
135
+ critique_response += chunk
136
+
137
+ full_response += f"\n\n---\n\n### 🔍 Self-Critique\n{critique_response}"
138
+ yield full_response
139
+
140
+ # Cache response
141
+ if use_cache and AppConfig.ENABLE_CACHE:
142
+ self.cache.set(cache_key, full_response)
143
+
144
+ # Update metrics
145
+ elapsed_time = time.time() - start_time
146
+ tokens_estimate = len(full_response.split())
147
+
148
+ self.metrics.update(
149
+ tokens=tokens_estimate,
150
+ time_taken=elapsed_time,
151
+ depth=1,
152
+ corrections=1 if enable_critique else 0,
153
+ confidence=95.0
154
+ )
155
+
156
+ # Save conversation
157
+ entry = ConversationEntry(
158
+ user_message=query,
159
+ assistant_response=full_response,
160
+ model=model,
161
+ reasoning_mode=reasoning_mode.value,
162
+ temperature=temperature,
163
+ max_tokens=max_tokens,
164
+ tokens_used=tokens_estimate,
165
+ inference_time=elapsed_time,
166
+ critique_enabled=enable_critique,
167
+ cache_hit=False
168
+ )
169
+
170
+ self.conversation_manager.add_conversation(entry)
171
+
172
+ logger.info(f"✅ Response generated in {elapsed_time:.2f}s | Tokens: {tokens_estimate}")
173
+
174
+ except Exception as e:
175
+ self.metrics.increment_errors()
176
+ error_msg = f"❌ **Error:** {str(e)}"
177
+ logger.error(f"Response generation error: {e}", exc_info=True)
178
+ yield error_msg
179
+
180
+ # Convenience properties
181
+ @property
182
+ def conversation_history(self) -> List[ConversationEntry]:
183
+ """Get conversation history"""
184
+ return self.conversation_manager.get_history()
185
+
186
+ @property
187
+ def model_usage(self) -> Dict[str, int]:
188
+ """Get model usage statistics"""
189
+ return dict(self.conversation_manager.model_usage)
190
+
191
+ @property
192
+ def mode_usage(self) -> Dict[str, int]:
193
+ """Get mode usage statistics"""
194
+ return dict(self.conversation_manager.mode_usage)
195
+
196
+ def clear_history(self) -> None:
197
+ """Clear conversation history"""
198
+ self.conversation_manager.clear_history()
199
+
200
+ def export_conversation(self, format_type: str, include_metadata: bool = True) -> Tuple[str, Optional[str]]:
201
+ """
202
+ Export conversations
203
+ Returns (content, filepath_string) for Gradio compatibility
204
+ """
205
+ return self.exporter.export(self.conversation_history, format_type, include_metadata)
206
+
207
+ def export_current_chat_pdf(self) -> Optional[str]:
208
+ """
209
+ Export current chat as PDF
210
+ Returns string path for Gradio compatibility
211
+ """
212
+ return self.exporter.export_to_pdf(self.conversation_history, include_metadata=True)
213
+
214
+ def search_conversations(self, keyword: str) -> List[tuple]:
215
+ """Search conversations"""
216
+ return self.analytics.search_conversations(self.conversation_history, keyword)
217
+
218
+ def get_analytics(self) -> Dict[str, Any]:
219
+ """Get analytics"""
220
+ return self.analytics.generate_analytics(
221
+ self.conversation_history,
222
+ self.metrics,
223
+ self.session_id,
224
+ self.model_usage,
225
+ self.mode_usage,
226
+ self.cache.get_stats()
227
+ )
src/models/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data models package initialization
3
+ """
4
+ from .metrics import ConversationMetrics
5
+ from .entry import ConversationEntry
6
+ from .config_models import ReasoningMode, ModelConfig
7
+
8
+ __all__ = ['ConversationMetrics', 'ConversationEntry', 'ReasoningMode', 'ModelConfig']
src/models/config_models.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """
2
+ Configuration models (re-exported from config.constants for convenience)
3
+ """
4
+ from src.config.constants import ReasoningMode, ModelConfig
5
+
6
+ __all__ = ['ReasoningMode', 'ModelConfig']
src/models/entry.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation entry data model
3
+ """
4
+ from dataclasses import dataclass, field, asdict
5
+ from datetime import datetime
6
+ import uuid
7
+ from typing import Optional
8
+ from src.utils.helpers import format_timestamp
9
+
10
+
11
+ @dataclass
12
+ class ConversationEntry:
13
+ """
14
+ 💬 ENHANCED CONVERSATION ENTRY WITH METADATA
15
+ """
16
+ user_message: str
17
+ assistant_response: str
18
+ model: str
19
+ reasoning_mode: str
20
+ timestamp: str = field(default_factory=lambda: format_timestamp())
21
+ entry_id: str = field(default_factory=lambda: str(uuid.uuid4()))
22
+ temperature: float = 0.7
23
+ max_tokens: int = 4000
24
+ tokens_used: int = 0
25
+ inference_time: float = 0.0
26
+ reasoning_depth: int = 1
27
+ confidence_score: float = 100.0
28
+ critique_enabled: bool = False
29
+ cache_hit: bool = False
30
+
31
+ def to_dict(self) -> dict:
32
+ """Convert to dictionary"""
33
+ return asdict(self)
34
+
35
+ def __str__(self) -> str:
36
+ return (f"ConversationEntry(id={self.entry_id[:8]}, "
37
+ f"model={self.model}, mode={self.reasoning_mode})")
src/models/metrics.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation metrics data model
3
+ """
4
+ from dataclasses import dataclass, field
5
+ from datetime import datetime
6
+ import threading
7
+ from typing import Dict
8
+ from src.utils.helpers import format_timestamp
9
+
10
+
11
+ @dataclass
12
+ class ConversationMetrics:
13
+ """
14
+ 📊 ENHANCED THREAD-SAFE CONVERSATION METRICS
15
+ """
16
+ total_conversations: int = 0
17
+ tokens_used: int = 0
18
+ inference_time: float = 0.0
19
+ reasoning_depth: int = 0
20
+ self_corrections: int = 0
21
+ confidence_score: float = 0.0
22
+ session_start: str = field(default_factory=lambda: format_timestamp())
23
+ peak_tokens: int = 0
24
+ avg_response_time: float = 0.0
25
+ tokens_per_second: float = 0.0
26
+ error_count: int = 0
27
+ cache_hits: int = 0
28
+ cache_misses: int = 0
29
+ _lock: threading.Lock = field(default_factory=threading.Lock, init=False, repr=False)
30
+
31
+ def update(self, tokens: int, time_taken: float, depth: int = 1,
32
+ corrections: int = 0, confidence: float = 100.0) -> None:
33
+ """
34
+ ✅ THREAD-SAFE METRIC UPDATE
35
+ """
36
+ with self._lock:
37
+ self.total_conversations += 1
38
+ self.tokens_used += tokens
39
+ self.inference_time = time_taken
40
+ self.reasoning_depth = depth
41
+ self.self_corrections = corrections
42
+ self.confidence_score = confidence
43
+
44
+ if tokens > self.peak_tokens:
45
+ self.peak_tokens = tokens
46
+
47
+ if self.total_conversations > 0:
48
+ total_time = self.inference_time * self.total_conversations
49
+ self.avg_response_time = total_time / self.total_conversations
50
+
51
+ if self.avg_response_time > 0:
52
+ self.tokens_per_second = self.tokens_used / (self.avg_response_time * self.total_conversations)
53
+
54
+ def increment_errors(self) -> None:
55
+ """Increment error count"""
56
+ with self._lock:
57
+ self.error_count += 1
58
+
59
+ def update_cache_stats(self, hit: bool) -> None:
60
+ """Update cache statistics"""
61
+ with self._lock:
62
+ if hit:
63
+ self.cache_hits += 1
64
+ else:
65
+ self.cache_misses += 1
66
+
67
+ def reset(self) -> None:
68
+ """Reset all metrics"""
69
+ with self._lock:
70
+ self.total_conversations = 0
71
+ self.tokens_used = 0
72
+ self.inference_time = 0.0
73
+ self.reasoning_depth = 0
74
+ self.self_corrections = 0
75
+ self.confidence_score = 0.0
76
+ self.peak_tokens = 0
77
+ self.avg_response_time = 0.0
78
+ self.tokens_per_second = 0.0
79
+ self.error_count = 0
80
+ self.cache_hits = 0
81
+ self.cache_misses = 0
82
+ self.session_start = format_timestamp()
src/services/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Services package initialization
3
+ """
4
+ from .cache_service import ResponseCache
5
+ from .rate_limiter import RateLimiter
6
+ from .export_service import ConversationExporter
7
+ from .analytics_service import AnalyticsService
8
+
9
+ __all__ = ['ResponseCache', 'RateLimiter', 'ConversationExporter', 'AnalyticsService']
src/services/analytics_service.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Analytics and insights generation service
3
+ """
4
+ from typing import List, Dict, Any
5
+ from collections import Counter
6
+ from src.models.entry import ConversationEntry
7
+ from src.models.metrics import ConversationMetrics
8
+ from src.utils.logger import logger
9
+
10
+
11
+ class AnalyticsService:
12
+ """
13
+ 📊 ANALYTICS AND INSIGHTS GENERATOR
14
+ """
15
+
16
+ @staticmethod
17
+ def generate_analytics(conversations: List[ConversationEntry],
18
+ metrics: ConversationMetrics,
19
+ session_id: str,
20
+ model_usage: Dict[str, int],
21
+ mode_usage: Dict[str, int],
22
+ cache_stats: dict) -> Dict[str, Any]:
23
+ """
24
+ 📊 GENERATE COMPREHENSIVE ANALYTICS
25
+ """
26
+ if not conversations:
27
+ return {}
28
+
29
+ # Model distribution
30
+ most_used_model = max(model_usage.items(), key=lambda x: x[1])[0] if model_usage else "N/A"
31
+
32
+ # Reasoning mode distribution
33
+ most_used_mode = max(mode_usage.items(), key=lambda x: x[1])[0] if mode_usage else "N/A"
34
+
35
+ # Token statistics
36
+ total_tokens = sum(conv.tokens_used for conv in conversations)
37
+ avg_tokens = total_tokens / len(conversations) if conversations else 0
38
+
39
+ # Time statistics
40
+ total_time = sum(conv.inference_time for conv in conversations)
41
+ avg_time = total_time / len(conversations) if conversations else 0
42
+
43
+ analytics = {
44
+ 'session_id': session_id,
45
+ 'total_conversations': len(conversations),
46
+ 'total_tokens': total_tokens,
47
+ 'avg_tokens_per_conversation': avg_tokens,
48
+ 'total_time': total_time,
49
+ 'avg_inference_time': avg_time,
50
+ 'peak_tokens': metrics.peak_tokens,
51
+ 'most_used_model': most_used_model,
52
+ 'most_used_mode': most_used_mode,
53
+ 'model_distribution': dict(model_usage),
54
+ 'mode_distribution': dict(mode_usage),
55
+ 'cache_hits': cache_stats.get('hits', 0),
56
+ 'cache_misses': cache_stats.get('misses', 0),
57
+ 'cache_hit_rate': cache_stats.get('hit_rate', '0.0'),
58
+ 'error_count': metrics.error_count,
59
+ 'avg_confidence': sum(conv.confidence_score for conv in conversations) / len(conversations) if conversations else 0
60
+ }
61
+
62
+ logger.debug(f"📊 Analytics generated for {len(conversations)} conversations")
63
+ return analytics
64
+
65
+ @staticmethod
66
+ def search_conversations(conversations: List[ConversationEntry],
67
+ keyword: str) -> List[tuple]:
68
+ """
69
+ 🔍 SEARCH CONVERSATIONS BY KEYWORD
70
+ """
71
+ keyword_lower = keyword.lower()
72
+ results = []
73
+
74
+ for idx, conv in enumerate(conversations):
75
+ if (keyword_lower in conv.user_message.lower() or
76
+ keyword_lower in conv.assistant_response.lower()):
77
+ results.append((idx, conv))
78
+
79
+ logger.info(f"🔍 Found {len(results)} results for '{keyword}'")
80
+ return results
src/services/cache_service.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Response caching service with LRU and TTL
3
+ """
4
+ import time
5
+ import threading
6
+ from typing import Dict, Tuple, Optional, Any
7
+ from src.utils.logger import logger
8
+
9
+
10
+ class ResponseCache:
11
+ """
12
+ 💾 THREAD-SAFE LRU CACHE WITH TTL
13
+ """
14
+ def __init__(self, maxsize: int = 100, ttl: int = 3600):
15
+ self.cache: Dict[str, Tuple[Any, float]] = {}
16
+ self.maxsize = maxsize
17
+ self.ttl = ttl
18
+ self.lock = threading.Lock()
19
+ self.hits = 0
20
+ self.misses = 0
21
+
22
+ def get(self, key: str) -> Optional[Any]:
23
+ """
24
+ ✅ GET CACHED VALUE WITH TTL CHECK
25
+ """
26
+ with self.lock:
27
+ if key not in self.cache:
28
+ self.misses += 1
29
+ return None
30
+
31
+ value, timestamp = self.cache[key]
32
+
33
+ # Check if expired
34
+ if time.time() - timestamp > self.ttl:
35
+ del self.cache[key]
36
+ self.misses += 1
37
+ logger.debug(f"🕐 Cache expired for key: {key[:20]}...")
38
+ return None
39
+
40
+ self.hits += 1
41
+ logger.debug(f"✅ Cache hit for key: {key[:20]}...")
42
+ return value
43
+
44
+ def set(self, key: str, value: Any) -> None:
45
+ """
46
+ ✅ SET CACHE VALUE WITH LRU EVICTION
47
+ """
48
+ with self.lock:
49
+ # Evict oldest if at capacity
50
+ if len(self.cache) >= self.maxsize and key not in self.cache:
51
+ oldest_key = min(self.cache.keys(), key=lambda k: self.cache[k][1])
52
+ del self.cache[oldest_key]
53
+ logger.debug(f"🗑️ Evicted cache entry: {oldest_key[:20]}...")
54
+
55
+ self.cache[key] = (value, time.time())
56
+ logger.debug(f"💾 Cached response for key: {key[:20]}...")
57
+
58
+ def clear(self) -> None:
59
+ """
60
+ 🗑️ CLEAR ALL CACHE ENTRIES
61
+ """
62
+ with self.lock:
63
+ self.cache.clear()
64
+ self.hits = 0
65
+ self.misses = 0
66
+ logger.info("🗑️ Cache cleared")
67
+
68
+ def get_stats(self) -> dict:
69
+ """
70
+ 📊 GET CACHE STATISTICS
71
+ """
72
+ with self.lock:
73
+ total_requests = self.hits + self.misses
74
+ hit_rate = (self.hits / total_requests * 100) if total_requests > 0 else 0
75
+
76
+ return {
77
+ 'size': len(self.cache),
78
+ 'maxsize': self.maxsize,
79
+ 'hits': self.hits,
80
+ 'misses': self.misses,
81
+ 'hit_rate': f"{hit_rate:.1f}",
82
+ 'ttl': self.ttl
83
+ }
84
+
85
+ def __len__(self) -> int:
86
+ """Get current cache size"""
87
+ with self.lock:
88
+ return len(self.cache)
src/services/export_service.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation export service supporting multiple formats
3
+ """
4
+ import json
5
+ from pathlib import Path
6
+ from datetime import datetime
7
+ from typing import List, Tuple, Optional, Union
8
+ from src.models.entry import ConversationEntry
9
+ from src.config.settings import AppConfig
10
+ from src.utils.logger import logger
11
+ from src.utils.helpers import sanitize_filename
12
+
13
+
14
+ class ConversationExporter:
15
+ """
16
+ 📤 MULTI-FORMAT CONVERSATION EXPORTER
17
+ """
18
+
19
+ def __init__(self):
20
+ self.export_dir = AppConfig.EXPORT_DIR
21
+ self.backup_dir = AppConfig.BACKUP_DIR
22
+ self.export_dir.mkdir(exist_ok=True, parents=True)
23
+ self.backup_dir.mkdir(exist_ok=True, parents=True)
24
+
25
+ def export_to_json(self, conversations: List[ConversationEntry],
26
+ include_metadata: bool = True) -> str:
27
+ """
28
+ 📄 EXPORT TO JSON
29
+ """
30
+ if include_metadata:
31
+ data = [conv.to_dict() for conv in conversations]
32
+ else:
33
+ data = [
34
+ {
35
+ 'user': conv.user_message,
36
+ 'assistant': conv.assistant_response,
37
+ 'timestamp': conv.timestamp
38
+ }
39
+ for conv in conversations
40
+ ]
41
+
42
+ return json.dumps(data, indent=2, ensure_ascii=False)
43
+
44
+ def export_to_markdown(self, conversations: List[ConversationEntry],
45
+ include_metadata: bool = True) -> str:
46
+ """
47
+ 📝 EXPORT TO MARKDOWN
48
+ """
49
+ lines = [
50
+ "# Conversation Export",
51
+ f"\n**Export Date:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
52
+ f"**Total Conversations:** {len(conversations)}\n",
53
+ "---\n"
54
+ ]
55
+
56
+ for idx, conv in enumerate(conversations, 1):
57
+ lines.append(f"## Conversation {idx}\n")
58
+
59
+ if include_metadata:
60
+ lines.append(f"**Timestamp:** {conv.timestamp} ")
61
+ lines.append(f"**Model:** {conv.model} ")
62
+ lines.append(f"**Reasoning Mode:** {conv.reasoning_mode} ")
63
+ lines.append(f"**Tokens Used:** {conv.tokens_used} ")
64
+ lines.append(f"**Inference Time:** {conv.inference_time:.2f}s\n")
65
+
66
+ lines.append(f"**👤 User:**\n{conv.user_message}\n")
67
+ lines.append(f"**🤖 Assistant:**\n{conv.assistant_response}\n")
68
+ lines.append("---\n")
69
+
70
+ return "\n".join(lines)
71
+
72
+ def export_to_txt(self, conversations: List[ConversationEntry],
73
+ include_metadata: bool = True) -> str:
74
+ """
75
+ 📄 EXPORT TO PLAIN TEXT
76
+ """
77
+ lines = [
78
+ "=" * 80,
79
+ "CONVERSATION EXPORT",
80
+ f"Export Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
81
+ f"Total Conversations: {len(conversations)}",
82
+ "=" * 80,
83
+ ""
84
+ ]
85
+
86
+ for idx, conv in enumerate(conversations, 1):
87
+ lines.append(f"\n{'=' * 80}")
88
+ lines.append(f"CONVERSATION {idx}")
89
+ lines.append(f"{'=' * 80}")
90
+
91
+ if include_metadata:
92
+ lines.append(f"Timestamp: {conv.timestamp}")
93
+ lines.append(f"Model: {conv.model}")
94
+ lines.append(f"Reasoning Mode: {conv.reasoning_mode}")
95
+ lines.append(f"Tokens Used: {conv.tokens_used}")
96
+ lines.append(f"Inference Time: {conv.inference_time:.2f}s")
97
+ lines.append("")
98
+
99
+ lines.append(f"USER:\n{conv.user_message}\n")
100
+ lines.append(f"ASSISTANT:\n{conv.assistant_response}\n")
101
+
102
+ return "\n".join(lines)
103
+
104
+ def export_to_pdf(self, conversations: List[ConversationEntry],
105
+ include_metadata: bool = True) -> Optional[str]:
106
+ """
107
+ 📄 EXPORT TO PDF (Premium Feature)
108
+ Returns string path for Gradio compatibility
109
+ """
110
+ if not AppConfig.ENABLE_PDF_EXPORT:
111
+ logger.warning("⚠️ PDF export is disabled")
112
+ return None
113
+
114
+ try:
115
+ from reportlab.lib.pagesizes import letter
116
+ from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
117
+ from reportlab.lib.units import inch
118
+ from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, PageBreak
119
+ from reportlab.lib.enums import TA_LEFT, TA_CENTER
120
+ except ImportError:
121
+ logger.error("❌ reportlab not installed. Install with: pip install reportlab")
122
+ return None
123
+
124
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
125
+ filename = self.export_dir / f"conversation_export_{timestamp}.pdf"
126
+
127
+ doc = SimpleDocTemplate(str(filename), pagesize=letter)
128
+ styles = getSampleStyleSheet()
129
+
130
+ # Custom styles
131
+ title_style = ParagraphStyle(
132
+ 'CustomTitle',
133
+ parent=styles['Heading1'],
134
+ fontSize=24,
135
+ textColor='#667eea',
136
+ alignment=TA_CENTER,
137
+ spaceAfter=30
138
+ )
139
+
140
+ story = []
141
+
142
+ # Title
143
+ story.append(Paragraph("Conversation Export", title_style))
144
+ story.append(Paragraph(
145
+ f"<b>Export Date:</b> {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
146
+ styles['Normal']
147
+ ))
148
+ story.append(Paragraph(
149
+ f"<b>Total Conversations:</b> {len(conversations)}",
150
+ styles['Normal']
151
+ ))
152
+ story.append(Spacer(1, 0.3*inch))
153
+
154
+ # Conversations
155
+ for idx, conv in enumerate(conversations, 1):
156
+ story.append(Paragraph(f"<b>Conversation {idx}</b>", styles['Heading2']))
157
+
158
+ if include_metadata:
159
+ meta_text = (
160
+ f"<b>Timestamp:</b> {conv.timestamp} | "
161
+ f"<b>Model:</b> {conv.model} | "
162
+ f"<b>Mode:</b> {conv.reasoning_mode}<br/>"
163
+ f"<b>Tokens:</b> {conv.tokens_used} | "
164
+ f"<b>Time:</b> {conv.inference_time:.2f}s"
165
+ )
166
+ story.append(Paragraph(meta_text, styles['Normal']))
167
+ story.append(Spacer(1, 0.1*inch))
168
+
169
+ story.append(Paragraph("<b>👤 User:</b>", styles['Heading3']))
170
+ story.append(Paragraph(conv.user_message.replace('\n', '<br/>'), styles['Normal']))
171
+ story.append(Spacer(1, 0.1*inch))
172
+
173
+ story.append(Paragraph("<b>🤖 Assistant:</b>", styles['Heading3']))
174
+ story.append(Paragraph(conv.assistant_response.replace('\n', '<br/>'), styles['Normal']))
175
+
176
+ if idx < len(conversations):
177
+ story.append(PageBreak())
178
+
179
+ doc.build(story)
180
+ logger.info(f"✅ PDF exported: {filename}")
181
+
182
+ # Return string path for Gradio compatibility
183
+ return str(filename)
184
+
185
+ def export(self, conversations: List[ConversationEntry],
186
+ format_type: str, include_metadata: bool = True) -> Tuple[str, Optional[str]]:
187
+ """
188
+ 📤 UNIFIED EXPORT METHOD
189
+ Returns (content, filepath_string) for Gradio compatibility
190
+ """
191
+ if not conversations:
192
+ return "⚠️ No conversations to export.", None
193
+
194
+ try:
195
+ if format_type == "json":
196
+ content = self.export_to_json(conversations, include_metadata)
197
+ filename = self._save_to_file(content, "json")
198
+ return content, str(filename) # Convert to string
199
+
200
+ elif format_type == "markdown":
201
+ content = self.export_to_markdown(conversations, include_metadata)
202
+ filename = self._save_to_file(content, "md")
203
+ return content, str(filename) # Convert to string
204
+
205
+ elif format_type == "txt":
206
+ content = self.export_to_txt(conversations, include_metadata)
207
+ filename = self._save_to_file(content, "txt")
208
+ return content, str(filename) # Convert to string
209
+
210
+ elif format_type == "pdf":
211
+ filename = self.export_to_pdf(conversations, include_metadata)
212
+ if filename:
213
+ return f"✅ PDF exported successfully: {Path(filename).name}", filename
214
+ return "❌ PDF export failed", None
215
+
216
+ else:
217
+ return f"❌ Unsupported format: {format_type}", None
218
+
219
+ except Exception as e:
220
+ logger.error(f"❌ Export error: {e}", exc_info=True)
221
+ return f"❌ Export failed: {str(e)}", None
222
+
223
+ def _save_to_file(self, content: str, extension: str) -> Path:
224
+ """
225
+ 💾 SAVE CONTENT TO FILE
226
+ """
227
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
228
+ filename = self.export_dir / f"conversation_export_{timestamp}.{extension}"
229
+
230
+ filename.write_text(content, encoding='utf-8')
231
+ logger.info(f"✅ File saved: {filename}")
232
+ return filename
233
+
234
+ def create_backup(self, conversations: List[ConversationEntry]) -> Optional[str]:
235
+ """
236
+ 💾 CREATE AUTOMATIC BACKUP
237
+ Returns string path for Gradio compatibility
238
+ """
239
+ if not conversations:
240
+ return None
241
+
242
+ try:
243
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
244
+ filename = self.backup_dir / f"backup_{timestamp}.json"
245
+
246
+ content = self.export_to_json(conversations, include_metadata=True)
247
+ filename.write_text(content, encoding='utf-8')
248
+
249
+ logger.info(f"✅ Backup created: {filename}")
250
+ return str(filename) # Convert to string
251
+
252
+ except Exception as e:
253
+ logger.error(f"❌ Backup failed: {e}")
254
+ return None
src/services/rate_limiter.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Token bucket rate limiting service
3
+ """
4
+ import time
5
+ import threading
6
+ from collections import deque
7
+ from src.utils.logger import logger
8
+
9
+
10
+ class RateLimiter:
11
+ """
12
+ ⏱️ TOKEN BUCKET RATE LIMITER
13
+ """
14
+ def __init__(self, max_requests: int = 50, window_seconds: int = 60):
15
+ self.max_requests = max_requests
16
+ self.window_seconds = window_seconds
17
+ self.requests = deque()
18
+ self.lock = threading.Lock()
19
+
20
+ def acquire(self) -> bool:
21
+ """
22
+ ✅ ACQUIRE RATE LIMIT TOKEN
23
+ Returns True if request is allowed, False otherwise
24
+ """
25
+ with self.lock:
26
+ current_time = time.time()
27
+
28
+ # Remove expired requests
29
+ while self.requests and current_time - self.requests[0] > self.window_seconds:
30
+ self.requests.popleft()
31
+
32
+ # Check if under limit
33
+ if len(self.requests) < self.max_requests:
34
+ self.requests.append(current_time)
35
+ logger.debug(f"✅ Rate limit check passed ({len(self.requests)}/{self.max_requests})")
36
+ return True
37
+
38
+ # Calculate wait time
39
+ wait_time = self.window_seconds - (current_time - self.requests[0])
40
+ logger.warning(f"⏳ Rate limit exceeded. Wait {wait_time:.1f}s")
41
+
42
+ # Wait and retry
43
+ time.sleep(wait_time + 0.1)
44
+ self.requests.popleft()
45
+ self.requests.append(time.time())
46
+ return True
47
+
48
+ def get_stats(self) -> dict:
49
+ """
50
+ 📊 GET RATE LIMITER STATISTICS
51
+ """
52
+ with self.lock:
53
+ current_time = time.time()
54
+
55
+ # Remove expired
56
+ while self.requests and current_time - self.requests[0] > self.window_seconds:
57
+ self.requests.popleft()
58
+
59
+ return {
60
+ 'current_requests': len(self.requests),
61
+ 'max_requests': self.max_requests,
62
+ 'window_seconds': self.window_seconds,
63
+ 'remaining': self.max_requests - len(self.requests)
64
+ }
65
+
66
+ def reset(self) -> None:
67
+ """
68
+ 🔄 RESET RATE LIMITER
69
+ """
70
+ with self.lock:
71
+ self.requests.clear()
72
+ logger.info("🔄 Rate limiter reset")
src/ui/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ User interface package initialization
3
+ """
4
+ from .app import create_ui
5
+ from .components import UIComponents
6
+ from .handlers import EventHandlers
7
+ from .styles import CUSTOM_CSS, SIDEBAR_CSS
8
+
9
+ __all__ = ['create_ui', 'UIComponents', 'EventHandlers', 'CUSTOM_CSS', 'SIDEBAR_CSS']
src/ui/app.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Main Gradio application interface
3
+ """
4
+ import gradio as gr
5
+ from src.core.reasoner import AdvancedReasoner
6
+ from src.core.prompt_engine import PromptEngine
7
+ from src.config.settings import AppConfig
8
+ from src.config.constants import ReasoningMode, ModelConfig
9
+ from src.ui.components import UIComponents
10
+ from src.ui.handlers import EventHandlers
11
+ from src.ui.styles import SIDEBAR_CSS
12
+ from src.utils.logger import logger
13
+
14
+
15
+ def create_ui() -> gr.Blocks:
16
+ """
17
+ 🎨 CREATE ENHANCED GRADIO INTERFACE
18
+ """
19
+
20
+ # Initialize reasoner and components
21
+ reasoner = AdvancedReasoner()
22
+ components = UIComponents()
23
+ handlers = EventHandlers(reasoner)
24
+
25
+ with gr.Blocks(
26
+ theme=gr.themes.Soft(
27
+ primary_hue=AppConfig.THEME_PRIMARY,
28
+ secondary_hue=AppConfig.THEME_SECONDARY,
29
+ font=gr.themes.GoogleFont("Inter")
30
+ ),
31
+ css=SIDEBAR_CSS,
32
+ title="Advanced AI Reasoning System Pro"
33
+ ) as demo:
34
+
35
+ # Header
36
+ gr.HTML(components.get_header_html())
37
+
38
+ # Main Tabs
39
+ with gr.Tabs():
40
+
41
+ # ==================== TAB 1: REASONING WORKSPACE ====================
42
+ with gr.Tab("🧠 Reasoning Workspace"):
43
+ with gr.Row():
44
+
45
+ # Main Chat Area
46
+ with gr.Column(scale=4):
47
+ chatbot = gr.Chatbot(
48
+ label="💬 Reasoning Workspace",
49
+ height=750,
50
+ show_copy_button=True,
51
+ type="messages",
52
+ avatar_images=(
53
+ "https://api.dicebear.com/7.x/avataaars/svg?seed=User",
54
+ "https://api.dicebear.com/7.x/bottts/svg?seed=AI"
55
+ ),
56
+ bubble_full_width=False
57
+ )
58
+
59
+ msg = gr.Textbox(
60
+ placeholder="💭 Enter your complex problem or research question... (Max 10,000 characters)",
61
+ label="Query Input",
62
+ lines=3,
63
+ max_lines=10,
64
+ show_label=False
65
+ )
66
+
67
+ with gr.Row():
68
+ submit_btn = gr.Button("🚀 Process", variant="primary", scale=2, size="lg")
69
+ clear_btn = gr.Button("🗑️ Clear", scale=1, size="lg")
70
+ pdf_btn = gr.Button("📄 Download PDF", scale=1, size="lg", variant="secondary")
71
+ toggle_sidebar_btn = gr.Button("⚙️ Settings", scale=1, size="lg", variant="secondary")
72
+
73
+ # Collapsible Sidebar
74
+ with gr.Column(scale=1, visible=True, elem_classes="settings-column") as sidebar:
75
+ gr.Markdown("### ⚙️ Configuration")
76
+
77
+ reasoning_mode = gr.Radio(
78
+ choices=components.get_reasoning_mode_choices(),
79
+ value=ReasoningMode.TREE_OF_THOUGHTS.value,
80
+ label="🧠 Reasoning Method",
81
+ info="Select your preferred reasoning strategy"
82
+ )
83
+
84
+ prompt_template = gr.Dropdown(
85
+ choices=components.get_prompt_template_choices(),
86
+ value="Custom",
87
+ label="📝 Prompt Template",
88
+ info="Pre-built prompt templates for common tasks"
89
+ )
90
+
91
+ enable_critique = gr.Checkbox(
92
+ label="🔍 Enable Self-Critique",
93
+ value=True,
94
+ info="Add automatic validation and correction phase"
95
+ )
96
+
97
+ use_cache = gr.Checkbox(
98
+ label="💾 Use Response Cache",
99
+ value=True,
100
+ info="Cache responses for faster repeated queries"
101
+ )
102
+
103
+ model = gr.Dropdown(
104
+ choices=components.get_model_choices(),
105
+ value=ModelConfig.LLAMA_70B.model_id,
106
+ label="🤖 AI Model",
107
+ info="Select the AI model to use"
108
+ )
109
+
110
+ with gr.Accordion("🎛️ Advanced Settings", open=False):
111
+ temperature = gr.Slider(
112
+ AppConfig.MIN_TEMPERATURE,
113
+ AppConfig.MAX_TEMPERATURE,
114
+ value=AppConfig.DEFAULT_TEMPERATURE,
115
+ step=0.1,
116
+ label="🌡️ Temperature",
117
+ info="Higher = more creative, Lower = more focused"
118
+ )
119
+
120
+ max_tokens = gr.Slider(
121
+ AppConfig.MIN_TOKENS,
122
+ 8000,
123
+ value=AppConfig.DEFAULT_MAX_TOKENS,
124
+ step=500,
125
+ label="📊 Max Tokens",
126
+ info="Maximum response length"
127
+ )
128
+
129
+ gr.Markdown("### 📊 Live Metrics")
130
+ metrics_display = gr.Markdown(value=components.get_metrics_html(reasoner))
131
+
132
+ with gr.Accordion("ℹ️ System Info", open=False):
133
+ gr.Markdown(components.get_system_info_html(reasoner))
134
+
135
+ # PDF download output
136
+ pdf_file_output = gr.File(
137
+ label="📥 Download Your PDF Report",
138
+ visible=True,
139
+ file_types=[".pdf"]
140
+ )
141
+
142
+ # ==================== TAB 2: EXPORT & HISTORY ====================
143
+ with gr.Tab("📤 Export & History"):
144
+ gr.Markdown("### 📤 Export Conversation History")
145
+ gr.Markdown("Export your conversations in multiple formats with optional metadata.")
146
+
147
+ with gr.Row():
148
+ export_format = gr.Radio(
149
+ choices=["json", "markdown", "txt", "pdf"],
150
+ value="markdown",
151
+ label="📄 Export Format",
152
+ info="Choose your preferred export format"
153
+ )
154
+ include_meta = gr.Checkbox(
155
+ label="📋 Include Metadata",
156
+ value=True,
157
+ info="Include timestamps, models, and performance metrics"
158
+ )
159
+
160
+ export_btn = gr.Button("📥 Export Now", variant="primary", size="lg")
161
+ export_output = gr.Code(label="Exported Data Preview", language="markdown", lines=20)
162
+ download_file = gr.File(
163
+ label="📥 Download Export File",
164
+ file_types=[".json", ".md", ".txt", ".pdf"]
165
+ )
166
+
167
+ gr.Markdown("---")
168
+ gr.Markdown("### 🔍 Search Conversations")
169
+ gr.Markdown("Search through your conversation history by keywords.")
170
+
171
+ with gr.Row():
172
+ search_input = gr.Textbox(
173
+ placeholder="🔎 Enter keyword to search...",
174
+ scale=3,
175
+ label="Search Query",
176
+ show_label=False
177
+ )
178
+ search_btn = gr.Button("🔍 Search", scale=1, size="lg")
179
+
180
+ search_results = gr.Markdown("💡 **Tip:** Enter a keyword and click Search to find relevant conversations.")
181
+
182
+ gr.Markdown("---")
183
+ gr.Markdown("### 📚 Conversation History")
184
+ history_stats = gr.Markdown("Loading statistics...")
185
+
186
+ # ==================== TAB 3: ANALYTICS & INSIGHTS ====================
187
+ with gr.Tab("📊 Analytics & Insights"):
188
+ gr.Markdown("### 📊 Performance Analytics Dashboard")
189
+ refresh_btn = gr.Button("🔄 Refresh Analytics", variant="primary", size="lg")
190
+
191
+ with gr.Row():
192
+ with gr.Column():
193
+ gr.Markdown("#### 📈 Performance Metrics")
194
+ analytics_display = gr.Markdown(components.get_empty_analytics_html())
195
+
196
+ with gr.Column():
197
+ gr.Markdown("#### 💾 Cache Statistics")
198
+ cache_display = gr.Markdown("No cache data available yet. Start a conversation to see cache performance.")
199
+
200
+ gr.Markdown("---")
201
+ gr.Markdown("### 📊 Usage Distribution")
202
+
203
+ with gr.Row():
204
+ with gr.Column():
205
+ gr.Markdown("#### 🤖 Model Usage")
206
+ model_dist = gr.Markdown("**No data yet.** Models will be tracked as you use them.")
207
+
208
+ with gr.Column():
209
+ gr.Markdown("#### 🧠 Reasoning Mode Usage")
210
+ mode_dist = gr.Markdown("**No data yet.** Reasoning modes will be tracked as you use them.")
211
+
212
+ # ==================== TAB 4: SETTINGS ====================
213
+ with gr.Tab("⚙️ Settings"):
214
+ gr.Markdown("### ⚙️ Application Settings")
215
+ gr.Markdown("Current configuration and system controls.")
216
+
217
+ gr.Markdown(components.get_settings_table_html())
218
+
219
+ gr.Markdown("---")
220
+ gr.Markdown("### 🛠️ System Actions")
221
+
222
+ with gr.Row():
223
+ clear_cache_btn = gr.Button("🗑️ Clear Cache", variant="stop", size="lg")
224
+ reset_metrics_btn = gr.Button("🔄 Reset Metrics", variant="secondary", size="lg")
225
+
226
+ cache_status = gr.Markdown("")
227
+
228
+ # ==================== EVENT HANDLERS ====================
229
+
230
+ # State management
231
+ sidebar_visible_state = gr.State(value=True)
232
+
233
+ # Message submission
234
+ submit_btn.click(
235
+ handlers.process_message,
236
+ [msg, chatbot, reasoning_mode, enable_critique, model,
237
+ temperature, max_tokens, prompt_template, use_cache],
238
+ [chatbot, metrics_display]
239
+ ).then(lambda: "", None, msg)
240
+
241
+ msg.submit(
242
+ handlers.process_message,
243
+ [msg, chatbot, reasoning_mode, enable_critique, model,
244
+ temperature, max_tokens, prompt_template, use_cache],
245
+ [chatbot, metrics_display]
246
+ ).then(lambda: "", None, msg)
247
+
248
+ # Chat controls
249
+ clear_btn.click(handlers.reset_chat, None, [chatbot, metrics_display])
250
+ pdf_btn.click(handlers.download_chat_pdf, None, pdf_file_output)
251
+
252
+ # Sidebar toggle
253
+ toggle_sidebar_btn.click(
254
+ handlers.toggle_sidebar,
255
+ inputs=[sidebar_visible_state],
256
+ outputs=[sidebar, sidebar_visible_state]
257
+ )
258
+
259
+ # Export & Search
260
+ export_btn.click(handlers.export_conversation, [export_format, include_meta], [export_output, download_file])
261
+ search_btn.click(handlers.search_conversations, search_input, search_results)
262
+
263
+ # Analytics
264
+ refresh_btn.click(handlers.refresh_analytics, None, [analytics_display, cache_display, model_dist, mode_dist])
265
+
266
+ # Settings actions
267
+ clear_cache_btn.click(handlers.clear_cache_action, None, cache_status)
268
+ reset_metrics_btn.click(handlers.reset_metrics_action, None, cache_status)
269
+
270
+ # Load history stats on page load
271
+ demo.load(handlers.update_history_stats, None, history_stats)
272
+
273
+ return demo
src/ui/components.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Reusable UI components
3
+ """
4
+ from src.config.settings import AppConfig
5
+ from src.config.constants import ReasoningMode, ModelConfig
6
+ from src.core.reasoner import AdvancedReasoner
7
+ from src.core.prompt_engine import PromptEngine
8
+ from src.utils.logger import logger
9
+
10
+
11
+ class UIComponents:
12
+ """
13
+ 🎨 REUSABLE UI COMPONENTS
14
+ """
15
+
16
+ @staticmethod
17
+ def get_header_html() -> str:
18
+ """
19
+ 📋 GENERATE HEADER HTML
20
+ """
21
+ return """
22
+ <div class="research-header">
23
+ <h1>🧠 Advanced AI Reasoning System Pro</h1>
24
+ <p><strong>Research-Backed Implementation:</strong> Tree of Thoughts + Constitutional AI + Multi-Agent Validation + Performance Optimization</p>
25
+ <div style="margin-top: 1rem;">
26
+ <span class="badge">🌳 Yao et al. 2023 - Tree of Thoughts</span>
27
+ <span class="badge">🛡️ Bai et al. 2022 - Constitutional AI</span>
28
+ <span class="badge">🔬 6 Advanced Reasoning Modes</span>
29
+ <span class="badge">⚡ Cache + Rate Limiting</span>
30
+ <span class="badge">🎛️ Collapsible Sidebar</span>
31
+ <span class="badge">📊 Real-Time Analytics</span>
32
+ </div>
33
+ </div>
34
+ """
35
+
36
+ @staticmethod
37
+ def get_metrics_html(reasoner: AdvancedReasoner) -> str:
38
+ """
39
+ 📊 GENERATE METRICS HTML
40
+ """
41
+ m = reasoner.metrics
42
+ cache_stats = reasoner.cache.get_stats()
43
+
44
+ if m.tokens_used > 0:
45
+ status = '<span class="status-active">● Active</span>'
46
+ else:
47
+ status = '<span style="color: #64748b;">● Ready</span>'
48
+
49
+ return f"""<div class="metrics-card">
50
+ <strong>⚡ Inference:</strong> {m.inference_time:.2f}s<br>
51
+ <strong>⏱️ Avg Time:</strong> {m.avg_response_time:.2f}s<br>
52
+ <strong>🚀 Speed:</strong> {m.tokens_per_second:.1f} tok/s<br>
53
+ <strong>🧠 Reasoning:</strong> {m.reasoning_depth} steps<br>
54
+ <strong>🔄 Corrections:</strong> {m.self_corrections}<br>
55
+ <strong>✨ Confidence:</strong> {m.confidence_score:.1f}%<br>
56
+ <strong>💬 Total:</strong> {m.total_conversations}<br>
57
+ <strong>📊 Tokens:</strong> {m.tokens_used:,}<br>
58
+ <strong>🏔️ Peak:</strong> {m.peak_tokens}<br>
59
+ <strong>💾 Cache:</strong> {cache_stats['hit_rate']}% hit rate<br>
60
+ <strong>📡 Status:</strong> {status}<br>
61
+ <strong>🔑 Session:</strong> {reasoner.session_id[:8]}...
62
+ </div>"""
63
+
64
+ @staticmethod
65
+ def get_empty_analytics_html() -> str:
66
+ """
67
+ 📊 GENERATE EMPTY ANALYTICS HTML
68
+ """
69
+ return """<div class="analytics-panel">
70
+ <h3>📊 No Analytics Data Yet</h3>
71
+ <p>Start a conversation to see detailed analytics and insights!</p>
72
+ <p style="margin-top: 1rem; font-size: 0.9em;">Metrics tracked: tokens, speed, reasoning depth, confidence scores, and more.</p>
73
+ </div>"""
74
+
75
+ @staticmethod
76
+ def get_system_info_html(reasoner: AdvancedReasoner) -> str:
77
+ """
78
+ ℹ️ GENERATE SYSTEM INFO HTML
79
+ """
80
+ return f"""
81
+ **Session ID:** `{reasoner.session_id}`
82
+ **Environment:** `{AppConfig.ENV}`
83
+ **Cache Size:** {AppConfig.CACHE_SIZE} entries
84
+ **Cache TTL:** {AppConfig.CACHE_TTL}s
85
+ **Rate Limit:** {AppConfig.RATE_LIMIT_REQUESTS} req/{AppConfig.RATE_LIMIT_WINDOW}s
86
+ **Max History:** {AppConfig.MAX_HISTORY_LENGTH} messages
87
+ **Available Models:** {len(ModelConfig)} models
88
+ **Reasoning Modes:** {len(ReasoningMode)} modes
89
+ """
90
+
91
+ @staticmethod
92
+ def get_settings_table_html() -> str:
93
+ """
94
+ ⚙️ GENERATE SETTINGS TABLE HTML
95
+ """
96
+ return f"""
97
+ | Setting | Value |
98
+ |---------|-------|
99
+ | **Environment** | `{AppConfig.ENV}` |
100
+ | **Debug Mode** | `{AppConfig.DEBUG}` |
101
+ | **Max History Length** | {AppConfig.MAX_HISTORY_LENGTH} messages |
102
+ | **Max Conversation Storage** | {AppConfig.MAX_CONVERSATION_STORAGE} conversations |
103
+ | **Cache Size** | {AppConfig.CACHE_SIZE} entries |
104
+ | **Cache TTL** | {AppConfig.CACHE_TTL} seconds |
105
+ | **Rate Limit** | {AppConfig.RATE_LIMIT_REQUESTS} requests per {AppConfig.RATE_LIMIT_WINDOW}s |
106
+ | **Request Timeout** | {AppConfig.REQUEST_TIMEOUT} seconds |
107
+ | **Max Retries** | {AppConfig.MAX_RETRIES} attempts |
108
+ | **Export Directory** | `{AppConfig.EXPORT_DIR}` |
109
+ | **Backup Directory** | `{AppConfig.BACKUP_DIR}` |
110
+ | **Available Models** | {len(ModelConfig)} models |
111
+ | **Reasoning Modes** | {len(ReasoningMode)} modes |
112
+ """
113
+
114
+ @staticmethod
115
+ def get_reasoning_mode_choices() -> list:
116
+ """Get reasoning mode choices"""
117
+ return [mode.value for mode in ReasoningMode]
118
+
119
+ @staticmethod
120
+ def get_prompt_template_choices() -> list:
121
+ """Get prompt template choices"""
122
+ return list(PromptEngine.TEMPLATES.keys())
123
+
124
+ @staticmethod
125
+ def get_model_choices() -> list:
126
+ """Get model choices"""
127
+ return [m.model_id for m in ModelConfig]
src/ui/handlers.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Event handlers for UI interactions
3
+ """
4
+ from typing import Tuple, Optional
5
+ from pathlib import Path
6
+ import gradio as gr
7
+ from src.core.reasoner import AdvancedReasoner
8
+ from src.config.constants import ReasoningMode
9
+ from src.utils.logger import logger
10
+ from src.ui.components import UIComponents
11
+
12
+
13
+ class EventHandlers:
14
+ """
15
+ 🎯 EVENT HANDLERS FOR UI INTERACTIONS
16
+ """
17
+
18
+ def __init__(self, reasoner: AdvancedReasoner):
19
+ self.reasoner = reasoner
20
+ self.components = UIComponents()
21
+
22
+ def process_message(self, message, history, mode, critique, model_name,
23
+ temp, tokens, template, cache):
24
+ """
25
+ 🔄 PROCESS MESSAGE WITH STREAMING
26
+ """
27
+ if not message or not message.strip():
28
+ history = history or []
29
+ history.append({
30
+ "role": "assistant",
31
+ "content": "⚠️ **Input Error:** Please enter a message before submitting."
32
+ })
33
+ return history, self.components.get_metrics_html(self.reasoner)
34
+
35
+ history = history or []
36
+ mode_enum = ReasoningMode(mode)
37
+
38
+ # Add user message
39
+ history.append({"role": "user", "content": message})
40
+ yield history, self.components.get_metrics_html(self.reasoner)
41
+
42
+ # Add empty assistant message for streaming
43
+ history.append({"role": "assistant", "content": ""})
44
+
45
+ try:
46
+ for response in self.reasoner.generate_response(
47
+ message, history[:-1], model_name, mode_enum,
48
+ critique, temp, tokens, template, cache
49
+ ):
50
+ history[-1]["content"] = response
51
+ yield history, self.components.get_metrics_html(self.reasoner)
52
+
53
+ except Exception as e:
54
+ error_msg = f"❌ **Unexpected Error:** {str(e)}\n\nPlease try again or check the logs for details."
55
+ history[-1]["content"] = error_msg
56
+ logger.error(f"Error in process_message: {e}", exc_info=True)
57
+ yield history, self.components.get_metrics_html(self.reasoner)
58
+
59
+ def reset_chat(self):
60
+ """🗑️ RESET CHAT"""
61
+ self.reasoner.clear_history()
62
+ logger.info("Chat history cleared by user")
63
+ return [], self.components.get_metrics_html(self.reasoner)
64
+
65
+ def export_conversation(self, format_type, include_metadata):
66
+ """📤 EXPORT CONVERSATION"""
67
+ try:
68
+ content, filename = self.reasoner.export_conversation(format_type, include_metadata)
69
+ if filename:
70
+ logger.info(f"Conversation exported: {filename}")
71
+ return content, filename
72
+ else:
73
+ return content, None
74
+ except Exception as e:
75
+ logger.error(f"Export error: {e}")
76
+ return f"❌ Export failed: {str(e)}", None
77
+
78
+ def download_chat_pdf(self):
79
+ """📄 DOWNLOAD CHAT AS PDF"""
80
+ try:
81
+ pdf_file = self.reasoner.export_current_chat_pdf()
82
+ if pdf_file:
83
+ logger.info(f"PDF ready for download: {pdf_file}")
84
+ return pdf_file
85
+ else:
86
+ logger.warning("No conversations to export")
87
+ return None
88
+ except Exception as e:
89
+ logger.error(f"PDF download error: {e}")
90
+ return None
91
+
92
+ def search_conversations(self, keyword):
93
+ """🔍 SEARCH CONVERSATIONS"""
94
+ if not keyword or not keyword.strip():
95
+ return "⚠️ **Search Error:** Please enter a search keyword."
96
+
97
+ try:
98
+ results = self.reasoner.search_conversations(keyword)
99
+ if not results:
100
+ return f"🔍 **No Results:** No conversations found containing '{keyword}'."
101
+
102
+ output = f"### 🔍 Found {len(results)} result(s) for '{keyword}'\n\n"
103
+ for idx, entry in results[:10]:
104
+ output += f"**{idx + 1}.** 📅 {entry.timestamp} | 🤖 {entry.model}\n"
105
+ preview = entry.user_message[:100].replace('\n', ' ')
106
+ output += f"**👤 User:** {preview}...\n\n"
107
+
108
+ if len(results) > 10:
109
+ output += f"\n*Showing first 10 of {len(results)} results*"
110
+
111
+ return output
112
+ except Exception as e:
113
+ logger.error(f"Search error: {e}")
114
+ return f"❌ **Search Error:** {str(e)}"
115
+
116
+ def refresh_analytics(self):
117
+ """📊 REFRESH ANALYTICS"""
118
+ try:
119
+ analytics = self.reasoner.get_analytics()
120
+ if not analytics:
121
+ return (
122
+ self.components.get_empty_analytics_html(),
123
+ "No cache data available yet.",
124
+ "**Model Usage:** No data",
125
+ "**Reasoning Mode Usage:** No data"
126
+ )
127
+
128
+ analytics_html = f"""<div class="analytics-panel">
129
+ <h3>📊 Session Analytics</h3>
130
+ <p><strong>🔑 Session ID:</strong> {analytics['session_id']}</p>
131
+ <p><strong>💬 Total Conversations:</strong> {analytics['total_conversations']}</p>
132
+ <p><strong>📊 Total Tokens:</strong> {analytics['total_tokens']:,}</p>
133
+ <p><strong>⏱️ Total Time:</strong> {analytics['total_time']:.1f}s</p>
134
+ <p><strong>⚡ Avg Inference Time:</strong> {analytics['avg_inference_time']:.2f}s</p>
135
+ <p><strong>🏔️ Peak Tokens:</strong> {analytics['peak_tokens']}</p>
136
+ <p><strong>🤖 Most Used Model:</strong> {analytics['most_used_model']}</p>
137
+ <p><strong>🧠 Most Used Mode:</strong> {analytics['most_used_mode']}</p>
138
+ <p><strong>⚠️ Errors:</strong> {analytics['error_count']}</p>
139
+ </div>"""
140
+
141
+ cache_html = f"""**💾 Cache Performance:**
142
+ - ✅ Hits: {analytics['cache_hits']}
143
+ - ❌ Misses: {analytics['cache_misses']}
144
+ - 📊 Total: {analytics['cache_hits'] + analytics['cache_misses']}
145
+ - 📈 Hit Rate: {self.reasoner.cache.get_stats()['hit_rate']}%
146
+ """
147
+
148
+ model_dist_html = f"**🤖 Most Used Model:** {analytics['most_used_model']}"
149
+ mode_dist_html = f"**🧠 Most Used Mode:** {analytics['most_used_mode']}"
150
+
151
+ return analytics_html, cache_html, model_dist_html, mode_dist_html
152
+ except Exception as e:
153
+ logger.error(f"Analytics refresh error: {e}")
154
+ return self.components.get_empty_analytics_html(), "Error loading cache data", "No data", "No data"
155
+
156
+ def update_history_stats(self):
157
+ """📚 UPDATE HISTORY STATS"""
158
+ try:
159
+ count = len(self.reasoner.conversation_history)
160
+ if count == 0:
161
+ return "📚 **No conversations yet.** Start chatting to build your history!"
162
+
163
+ return f"""**📊 Conversation Statistics:**
164
+
165
+ - 💬 Total Conversations: {count}
166
+ - 🔑 Session ID: `{self.reasoner.session_id[:8]}...`
167
+ - 📅 Session Started: {self.reasoner.metrics.session_start}
168
+ - 🤖 Models Used: {len(self.reasoner.model_usage)}
169
+ - 🧠 Reasoning Modes Used: {len(self.reasoner.mode_usage)}
170
+ """
171
+ except Exception as e:
172
+ logger.error(f"History stats error: {e}")
173
+ return "Error loading history statistics"
174
+
175
+ def clear_cache_action(self):
176
+ """🗑️ CLEAR CACHE"""
177
+ try:
178
+ self.reasoner.cache.clear()
179
+ logger.info("Cache cleared by user")
180
+ return "✅ **Success:** Cache cleared successfully!"
181
+ except Exception as e:
182
+ logger.error(f"Cache clear error: {e}")
183
+ return f"❌ **Error:** Failed to clear cache: {str(e)}"
184
+
185
+ def reset_metrics_action(self):
186
+ """🔄 RESET METRICS"""
187
+ try:
188
+ self.reasoner.metrics.reset()
189
+ logger.info("Metrics reset by user")
190
+ return "✅ **Success:** Metrics reset successfully!"
191
+ except Exception as e:
192
+ logger.error(f"Metrics reset error: {e}")
193
+ return f"❌ **Error:** Failed to reset metrics: {str(e)}"
194
+
195
+ def toggle_sidebar(self, sidebar_state):
196
+ """⚙️ TOGGLE SIDEBAR VISIBILITY"""
197
+ new_state = not sidebar_state
198
+ logger.info(f"Sidebar toggled: {'Visible' if new_state else 'Hidden'}")
199
+ return gr.update(visible=new_state), new_state
src/ui/styles.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ CSS styles for the Gradio interface
3
+ """
4
+
5
+ CUSTOM_CSS = """
6
+ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800&family=JetBrains+Mono:wght@400;500;600&display=swap');
7
+
8
+ :root {
9
+ --primary-gradient: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
10
+ --success-gradient: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);
11
+ --shadow-lg: 0 10px 40px rgba(0,0,0,0.15);
12
+ --border-radius: 16px;
13
+ --transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
14
+ }
15
+
16
+ /* ==================== HEADER ==================== */
17
+
18
+ .research-header {
19
+ background: var(--primary-gradient);
20
+ padding: 3rem 2.5rem;
21
+ border-radius: var(--border-radius);
22
+ color: white;
23
+ margin-bottom: 2rem;
24
+ box-shadow: var(--shadow-lg);
25
+ animation: slideDown 0.6s ease-out;
26
+ }
27
+
28
+ .research-header h1 {
29
+ font-size: 2.5rem;
30
+ font-weight: 800;
31
+ margin-bottom: 1rem;
32
+ text-shadow: 2px 2px 4px rgba(0,0,0,0.2);
33
+ }
34
+
35
+ .badge {
36
+ background: rgba(255,255,255,0.25);
37
+ backdrop-filter: blur(10px);
38
+ color: white;
39
+ padding: 0.5rem 1.2rem;
40
+ border-radius: 25px;
41
+ font-size: 0.9rem;
42
+ margin: 0.3rem;
43
+ display: inline-block;
44
+ transition: var(--transition);
45
+ border: 1px solid rgba(255,255,255,0.2);
46
+ }
47
+
48
+ .badge:hover {
49
+ transform: translateY(-2px);
50
+ background: rgba(255,255,255,0.35);
51
+ }
52
+
53
+ /* ==================== METRICS CARD ==================== */
54
+
55
+ .metrics-card {
56
+ background: linear-gradient(135deg, #ffffff 0%, #f8f9fa 100%);
57
+ border-left: 5px solid #667eea;
58
+ padding: 1.8rem;
59
+ border-radius: var(--border-radius);
60
+ margin: 1rem 0;
61
+ font-family: 'JetBrains Mono', monospace;
62
+ transition: var(--transition);
63
+ box-shadow: 0 2px 8px rgba(0,0,0,0.08);
64
+ color: #2c3e50 !important;
65
+ }
66
+
67
+ .metrics-card * {
68
+ color: #2c3e50 !important;
69
+ }
70
+
71
+ .metrics-card strong {
72
+ color: #1a202c !important;
73
+ font-weight: 700 !important;
74
+ }
75
+
76
+ .metrics-card:hover {
77
+ transform: translateX(5px);
78
+ box-shadow: 0 4px 12px rgba(0,0,0,0.12);
79
+ }
80
+
81
+ /* ==================== ANALYTICS PANEL ==================== */
82
+
83
+ .analytics-panel {
84
+ background: var(--success-gradient);
85
+ color: white;
86
+ padding: 2rem;
87
+ border-radius: var(--border-radius);
88
+ animation: fadeIn 0.5s ease-out;
89
+ box-shadow: var(--shadow-lg);
90
+ }
91
+
92
+ .analytics-panel * {
93
+ color: white !important;
94
+ }
95
+
96
+ .analytics-panel h3 {
97
+ margin-bottom: 1rem;
98
+ font-size: 1.5rem;
99
+ }
100
+
101
+ .analytics-panel p {
102
+ line-height: 1.6;
103
+ }
104
+
105
+ .analytics-panel strong {
106
+ font-weight: 600;
107
+ }
108
+
109
+ /* ==================== STATUS INDICATORS ==================== */
110
+
111
+ .status-active {
112
+ color: #10b981 !important;
113
+ font-weight: bold;
114
+ animation: pulse 2s infinite;
115
+ text-shadow: 0 0 10px rgba(16, 185, 129, 0.5);
116
+ }
117
+
118
+ /* ==================== ANIMATIONS ==================== */
119
+
120
+ @keyframes slideDown {
121
+ from { opacity: 0; transform: translateY(-30px); }
122
+ to { opacity: 1; transform: translateY(0); }
123
+ }
124
+
125
+ @keyframes fadeIn {
126
+ from { opacity: 0; transform: scale(0.95); }
127
+ to { opacity: 1; transform: scale(1); }
128
+ }
129
+
130
+ @keyframes pulse {
131
+ 0%, 100% { opacity: 1; }
132
+ 50% { opacity: 0.7; }
133
+ }
134
+
135
+ /* ==================== GLOBAL STYLES ==================== */
136
+
137
+ .gradio-container {
138
+ font-family: 'Inter', sans-serif !important;
139
+ max-width: 1600px !important;
140
+ }
141
+
142
+ .gr-button {
143
+ transition: var(--transition) !important;
144
+ }
145
+
146
+ .gr-button:hover {
147
+ transform: translateY(-2px) !important;
148
+ box-shadow: 0 4px 12px rgba(102, 126, 234, 0.3) !important;
149
+ }
150
+
151
+ /* ==================== MARKDOWN TEXT FIX ==================== */
152
+
153
+ .gr-markdown .metrics-card {
154
+ color: #2c3e50 !important;
155
+ }
156
+
157
+ .gr-markdown .metrics-card p {
158
+ color: #2c3e50 !important;
159
+ margin: 0.25rem 0 !important;
160
+ }
161
+
162
+ .gr-markdown .metrics-card span {
163
+ color: #2c3e50 !important;
164
+ }
165
+
166
+ /* ==================== SIDEBAR TOGGLE ==================== */
167
+
168
+ .sidebar-hidden {
169
+ display: none !important;
170
+ }
171
+
172
+ .toggle-btn {
173
+ position: fixed;
174
+ right: 20px;
175
+ top: 50%;
176
+ transform: translateY(-50%);
177
+ z-index: 1000;
178
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
179
+ color: white;
180
+ border: none;
181
+ border-radius: 50%;
182
+ width: 50px;
183
+ height: 50px;
184
+ cursor: pointer;
185
+ box-shadow: 0 4px 12px rgba(102, 126, 234, 0.4);
186
+ transition: all 0.3s ease;
187
+ font-size: 20px;
188
+ display: flex;
189
+ align-items: center;
190
+ justify-content: center;
191
+ }
192
+
193
+ .toggle-btn:hover {
194
+ transform: translateY(-50%) scale(1.1);
195
+ box-shadow: 0 6px 20px rgba(102, 126, 234, 0.6);
196
+ }
197
+
198
+ .settings-column {
199
+ transition: all 0.3s ease-in-out;
200
+ }
201
+
202
+ .fullscreen-chat .gradio-container {
203
+ max-width: 98% !important;
204
+ }
205
+
206
+ /* ==================== RESPONSIVE DESIGN ==================== */
207
+
208
+ @media (max-width: 768px) {
209
+ .research-header h1 {
210
+ font-size: 1.8rem;
211
+ }
212
+
213
+ .badge {
214
+ font-size: 0.75rem;
215
+ padding: 0.4rem 0.8rem;
216
+ }
217
+
218
+ .toggle-btn {
219
+ width: 40px;
220
+ height: 40px;
221
+ font-size: 16px;
222
+ right: 10px;
223
+ }
224
+
225
+ .metrics-card {
226
+ padding: 1.2rem;
227
+ font-size: 0.9rem;
228
+ }
229
+ }
230
+
231
+ /* ==================== DARK MODE SUPPORT ==================== */
232
+
233
+ @media (prefers-color-scheme: dark) {
234
+ .metrics-card {
235
+ background: linear-gradient(135deg, #1e293b 0%, #334155 100%);
236
+ color: #e2e8f0 !important;
237
+ }
238
+
239
+ .metrics-card * {
240
+ color: #e2e8f0 !important;
241
+ }
242
+
243
+ .metrics-card strong {
244
+ color: #f1f5f9 !important;
245
+ }
246
+ }
247
+
248
+ /* ==================== LOADING SPINNER ==================== */
249
+
250
+ .loading-spinner {
251
+ border: 3px solid #f3f3f3;
252
+ border-top: 3px solid #667eea;
253
+ border-radius: 50%;
254
+ width: 40px;
255
+ height: 40px;
256
+ animation: spin 1s linear infinite;
257
+ margin: 20px auto;
258
+ }
259
+
260
+ @keyframes spin {
261
+ 0% { transform: rotate(0deg); }
262
+ 100% { transform: rotate(360deg); }
263
+ }
264
+ """
265
+
266
+ SIDEBAR_CSS = CUSTOM_CSS
src/utils/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities package initialization
3
+ """
4
+ from .logger import logger, setup_logging
5
+ from .decorators import handle_groq_errors, with_rate_limit, timer_decorator
6
+ from .validators import validate_input, validate_temperature, validate_max_tokens
7
+ from .helpers import generate_session_id, format_timestamp, truncate_text
8
+
9
+ __all__ = [
10
+ 'logger',
11
+ 'setup_logging',
12
+ 'handle_groq_errors',
13
+ 'with_rate_limit',
14
+ 'timer_decorator',
15
+ 'validate_input',
16
+ 'validate_temperature',
17
+ 'validate_max_tokens',
18
+ 'generate_session_id',
19
+ 'format_timestamp',
20
+ 'truncate_text'
21
+ ]
src/utils/decorators.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility decorators for error handling and timing
3
+ """
4
+ import time
5
+ from functools import wraps
6
+ from typing import Callable, Any
7
+ import groq
8
+ from src.utils.logger import logger
9
+
10
+
11
+ def handle_groq_errors(max_retries: int = 3, retry_delay: float = 1.0) -> Callable:
12
+ """
13
+ 🛡️ GROQ API ERROR HANDLER WITH EXPONENTIAL BACKOFF
14
+ """
15
+ def decorator(func: Callable) -> Callable:
16
+ @wraps(func)
17
+ def wrapper(*args, **kwargs) -> Any:
18
+ last_exception = None
19
+
20
+ for attempt in range(max_retries):
21
+ try:
22
+ return func(*args, **kwargs)
23
+
24
+ except groq.RateLimitError as e:
25
+ last_exception = e
26
+ wait_time = retry_delay * (2 ** attempt)
27
+ logger.warning(f"⏳ Rate limit hit. Waiting {wait_time:.1f}s... (Attempt {attempt + 1}/{max_retries})")
28
+ time.sleep(wait_time)
29
+
30
+ except groq.APIConnectionError as e:
31
+ last_exception = e
32
+ wait_time = retry_delay * (2 ** attempt)
33
+ logger.warning(f"🔌 Connection error. Retrying in {wait_time:.1f}s... (Attempt {attempt + 1}/{max_retries})")
34
+ time.sleep(wait_time)
35
+
36
+ except groq.AuthenticationError as e:
37
+ logger.error(f"🔑 Authentication failed: {e}")
38
+ raise ValueError("Invalid GROQ_API_KEY. Please check your API key.") from e
39
+
40
+ except groq.BadRequestError as e:
41
+ logger.error(f"❌ Invalid request: {e}")
42
+ raise ValueError(f"Invalid request parameters: {str(e)}") from e
43
+
44
+ except Exception as e:
45
+ last_exception = e
46
+ logger.error(f"❌ Unexpected error: {e}", exc_info=True)
47
+ if attempt == max_retries - 1:
48
+ break
49
+ time.sleep(retry_delay * (2 ** attempt))
50
+
51
+ error_msg = f"Failed after {max_retries} attempts: {str(last_exception)}"
52
+ logger.error(error_msg)
53
+ raise Exception(error_msg) from last_exception
54
+
55
+ return wrapper
56
+ return decorator
57
+
58
+
59
+ def with_rate_limit(rate_limiter):
60
+ """
61
+ ⏱️ RATE LIMITING DECORATOR
62
+ """
63
+ def decorator(func: Callable) -> Callable:
64
+ @wraps(func)
65
+ def wrapper(*args, **kwargs) -> Any:
66
+ rate_limiter.acquire()
67
+ return func(*args, **kwargs)
68
+ return wrapper
69
+ return decorator
70
+
71
+
72
+ def timer_decorator(func: Callable) -> Callable:
73
+ """
74
+ ⏱️ TIMING DECORATOR
75
+ """
76
+ @wraps(func)
77
+ def wrapper(*args, **kwargs) -> Any:
78
+ start_time = time.time()
79
+ result = func(*args, **kwargs)
80
+ elapsed_time = time.time() - start_time
81
+ logger.debug(f"⏱️ {func.__name__} took {elapsed_time:.2f}s")
82
+ return result
83
+ return wrapper
src/utils/helpers.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helper utility functions
3
+ """
4
+ import hashlib
5
+ import uuid
6
+ from datetime import datetime
7
+ from typing import Optional
8
+
9
+
10
+ def generate_session_id() -> str:
11
+ """
12
+ 🔑 GENERATE UNIQUE SESSION ID
13
+ """
14
+ timestamp = datetime.now().isoformat()
15
+ random_uuid = str(uuid.uuid4())
16
+ raw_id = f"{timestamp}-{random_uuid}"
17
+ return hashlib.sha256(raw_id.encode()).hexdigest()
18
+
19
+
20
+ def format_timestamp(dt: Optional[datetime] = None) -> str:
21
+ """
22
+ 📅 FORMAT TIMESTAMP
23
+ """
24
+ if dt is None:
25
+ dt = datetime.now()
26
+ return dt.strftime('%Y-%m-%d %H:%M:%S')
27
+
28
+
29
+ def truncate_text(text: str, max_length: int = 100, suffix: str = "...") -> str:
30
+ """
31
+ ✂️ TRUNCATE TEXT TO MAX LENGTH
32
+ """
33
+ if len(text) <= max_length:
34
+ return text
35
+ return text[:max_length - len(suffix)] + suffix
36
+
37
+
38
+ def sanitize_filename(filename: str) -> str:
39
+ """
40
+ 🧹 SANITIZE FILENAME
41
+ """
42
+ import re
43
+ filename = re.sub(r'[<>:"/\\|?*]', '_', filename)
44
+ return filename[:255]
src/utils/logger.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Enhanced logging configuration - Independent module
3
+ """
4
+ import logging
5
+ import os
6
+ from pathlib import Path
7
+ from logging.handlers import RotatingFileHandler
8
+
9
+
10
+ def setup_logging():
11
+ """
12
+ 🔧 ENHANCED LOGGING SETUP
13
+ """
14
+ logger = logging.getLogger('reasoning_system')
15
+
16
+ log_level_str = os.getenv('LOG_LEVEL', 'INFO').upper()
17
+ log_level = getattr(logging, log_level_str, logging.INFO)
18
+ logger.setLevel(log_level)
19
+
20
+ if logger.handlers:
21
+ return logger
22
+
23
+ # Console handler
24
+ console_handler = logging.StreamHandler()
25
+ console_handler.setLevel(logging.INFO)
26
+ console_format = logging.Formatter(
27
+ '%(asctime)s | %(levelname)-8s | %(message)s',
28
+ datefmt='%H:%M:%S'
29
+ )
30
+ console_handler.setFormatter(console_format)
31
+
32
+ # File handler with rotation
33
+ log_dir = Path('logs')
34
+ log_dir.mkdir(exist_ok=True)
35
+
36
+ file_handler = RotatingFileHandler(
37
+ log_dir / 'reasoning_system.log',
38
+ maxBytes=10*1024*1024,
39
+ backupCount=5,
40
+ encoding='utf-8'
41
+ )
42
+ file_handler.setLevel(logging.DEBUG)
43
+ file_format = logging.Formatter(
44
+ '%(asctime)s | %(levelname)-8s | %(name)s:%(lineno)d | %(message)s',
45
+ datefmt='%Y-%m-%d %H:%M:%S'
46
+ )
47
+ file_handler.setFormatter(file_format)
48
+
49
+ # Error-only file handler
50
+ error_handler = RotatingFileHandler(
51
+ log_dir / 'errors.log',
52
+ maxBytes=5*1024*1024,
53
+ backupCount=3,
54
+ encoding='utf-8'
55
+ )
56
+ error_handler.setLevel(logging.ERROR)
57
+ error_handler.setFormatter(file_format)
58
+
59
+ logger.addHandler(console_handler)
60
+ logger.addHandler(file_handler)
61
+ logger.addHandler(error_handler)
62
+ logger.propagate = False
63
+
64
+ return logger
65
+
66
+
67
+ # Initialize logger at module level
68
+ logger = setup_logging()
src/utils/validators.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Input validation utilities
3
+ """
4
+ from typing import Any
5
+ from src.utils.logger import logger
6
+
7
+
8
+ def validate_input(text: str, max_length: int = 10000) -> tuple[bool, str]:
9
+ """
10
+ ✅ VALIDATE USER INPUT
11
+ """
12
+ if not text or not text.strip():
13
+ return False, "Input cannot be empty"
14
+
15
+ if len(text) > max_length:
16
+ return False, f"Input exceeds maximum length of {max_length} characters"
17
+
18
+ # XSS protection
19
+ dangerous_patterns = ['<script>', 'javascript:', 'onerror=', 'onclick=']
20
+ text_lower = text.lower()
21
+ if any(pattern in text_lower for pattern in dangerous_patterns):
22
+ return False, "Input contains potentially dangerous content"
23
+
24
+ return True, ""
25
+
26
+
27
+ def validate_temperature(temp: float, min_temp: float = 0.0, max_temp: float = 2.0) -> tuple[bool, str]:
28
+ """
29
+ ✅ VALIDATE TEMPERATURE PARAMETER
30
+ """
31
+ if not isinstance(temp, (int, float)):
32
+ return False, "Temperature must be a number"
33
+
34
+ if not (min_temp <= temp <= max_temp):
35
+ return False, f"Temperature must be between {min_temp} and {max_temp}"
36
+
37
+ return True, ""
38
+
39
+
40
+ def validate_max_tokens(tokens: int, min_tokens: int = 100, max_tokens: int = 32000) -> tuple[bool, str]:
41
+ """
42
+ ✅ VALIDATE MAX TOKENS PARAMETER
43
+ """
44
+ if not isinstance(tokens, int):
45
+ return False, "Max tokens must be an integer"
46
+
47
+ if not (min_tokens <= tokens <= max_tokens):
48
+ return False, f"Max tokens must be between {min_tokens} and {max_tokens}"
49
+
50
+ return True, ""
tests/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Tests package."""
tests/test_api.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ def test_placeholder():
2
+ assert True
tests/test_cache.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ def test_placeholder():
2
+ assert True
tests/test_export.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ def test_placeholder():
2
+ assert True
tests/test_reasoner.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ def test_placeholder():
2
+ assert True