diff --git a/.gitignore b/.gitignore index fba57f85..c122c426 100644 --- a/.gitignore +++ b/.gitignore @@ -278,6 +278,11 @@ MSG*.bin # python *.pyc +__pycache__/ +*.pytest_cache/ + +# Frontend configuration files (contain API keys) +src/frontend/config/settings.json **/Generated Files/ **/Merged/* diff --git a/README.md b/README.md index d34987ee..602d0ae8 100644 --- a/README.md +++ b/README.md @@ -99,8 +99,17 @@ Documentation](https://github.com/SoftwareDevLabs). 📁 notebooks/ → Quick experiments and prototyping 📁 tests/ → Unit, integration, and end-to-end tests 📁 src/ → The core engine — all logic lives here (./src/README.md) + └── frontend/ → Web-based GUI for LLM backend configuration ``` + +### Frontend GUI Features + +The new frontend provides a web-based interface for: +- **Backend Selection**: Choose between OpenAI, Anthropic, and other LLM providers +- **Configuration Management**: Set API keys, models, and provider-specific settings +- **Real-time Switching**: Switch between backends with live updates +- **Settings Persistence**: All configurations are saved and persist across sessions --- ## ⚡ Best Practices @@ -120,10 +129,18 @@ Documentation](https://github.com/SoftwareDevLabs). ## 🧭 Getting Started 1. Clone the repo -2. Install via `requirements.txt` -3. Set up model configs -4. Check sample code -5. Begin in notebooks +2. Install via `requirements.txt`: + ```bash + pip install -r requirements.txt + ``` +3. Launch the frontend interface: + ```bash + python run_frontend.py + ``` +4. Access the web interface at `http://localhost:5000` +5. Configure your LLM backends in the Settings page +6. Check sample code in the `examples/` directory +7. Begin experimenting in notebooks --- diff --git a/requirements.txt b/requirements.txt index e69de29b..1015ff7b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -0,0 +1,6 @@ +# Core SDLC dependencies +Flask==3.0.0 +Werkzeug==3.0.1 + +# Development and testing +pytest==8.4.1 diff --git a/run_frontend.py b/run_frontend.py new file mode 100755 index 00000000..2b3513ae --- /dev/null +++ b/run_frontend.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 +""" +Launch script for the SDLC Core frontend application. +""" + +import os +import sys + +# Add the src directory to the Python path +sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) + +from src.frontend.app import app + +if __name__ == '__main__': + print("Starting SDLC Core Frontend...") + print("Access the application at: http://localhost:5000") + print("Dashboard: http://localhost:5000/") + print("Settings: http://localhost:5000/settings") + print("\nPress Ctrl+C to stop the server") + + app.run(debug=True, host='0.0.0.0', port=5000) \ No newline at end of file diff --git a/src/frontend/README.md b/src/frontend/README.md new file mode 100644 index 00000000..9159965e --- /dev/null +++ b/src/frontend/README.md @@ -0,0 +1,64 @@ +# Frontend for SDLC Core LLM Infrastructure + +This frontend application provides a web-based GUI for managing LLM backend settings in the SDLC Core system. + +## Features + +- **Backend Selection**: Choose between different LLM providers (OpenAI, Anthropic, etc.) +- **Configuration Management**: Set API keys, models, and other backend-specific settings +- **Visual Interface**: Clean, responsive web interface with real-time updates +- **Settings Persistence**: Configuration is saved locally and persists across sessions + +## Installation + +1. Install dependencies: + ```bash + pip install -r requirements.txt + ``` + +2. Run the application: + ```bash + python app.py + ``` + +3. Open your browser and navigate to `http://localhost:5000` + +## Usage + +### Dashboard +- View the currently active LLM backend +- See all available backends +- Quick-switch between backends + +### Settings Page +- Configure API keys for each backend +- Set preferred models +- Enable/disable specific backends +- Save and persist configuration changes + +## Configuration + +The application stores settings in `config/settings.json`. This file is automatically created with default settings on first run. + +Default backends supported: +- **OpenAI**: GPT models (GPT-3.5, GPT-4, etc.) +- **Anthropic**: Claude models + +## API Endpoints + +- `GET /api/config` - Get current configuration +- `POST /api/config` - Update configuration +- `POST /api/backend/select` - Switch active backend + +## File Structure + +``` +frontend/ +├── app.py # Main Flask application +├── requirements.txt # Python dependencies +├── templates/ +│ ├── index.html # Dashboard page +│ └── settings.html # Settings configuration page +└── config/ + └── settings.json # Persistent configuration (auto-generated) +``` \ No newline at end of file diff --git a/src/frontend/app.py b/src/frontend/app.py new file mode 100644 index 00000000..b3b84f91 --- /dev/null +++ b/src/frontend/app.py @@ -0,0 +1,106 @@ +""" +Frontend web application for SDLC_core LLM infrastructure settings. +Provides a GUI interface for selecting and configuring LLM backends. +""" + +from flask import Flask, render_template, request, jsonify, redirect, url_for +import json +import os +from typing import Dict, Any + +app = Flask(__name__) + +# Configuration file path +CONFIG_FILE = os.path.join(os.path.dirname(__file__), 'config', 'settings.json') + +# Default configuration +DEFAULT_CONFIG = { + "selected_backend": "openai", + "backends": { + "openai": { + "name": "OpenAI", + "description": "OpenAI's GPT models (GPT-3.5, GPT-4, etc.)", + "api_key": "", + "model": "gpt-3.5-turbo", + "enabled": True + }, + "anthropic": { + "name": "Anthropic", + "description": "Anthropic's Claude models", + "api_key": "", + "model": "claude-3-sonnet-20240229", + "enabled": True + } + } +} + +def load_config() -> Dict[str, Any]: + """Load configuration from file or return default.""" + try: + if os.path.exists(CONFIG_FILE): + with open(CONFIG_FILE, 'r') as f: + return json.load(f) + except Exception as e: + print(f"Error loading config: {e}") + return DEFAULT_CONFIG.copy() + +def save_config(config: Dict[str, Any]) -> bool: + """Save configuration to file.""" + try: + os.makedirs(os.path.dirname(CONFIG_FILE), exist_ok=True) + with open(CONFIG_FILE, 'w') as f: + json.dump(config, f, indent=2) + return True + except Exception as e: + print(f"Error saving config: {e}") + return False + +@app.route('/') +def index(): + """Main dashboard.""" + config = load_config() + return render_template('index.html', config=config) + +@app.route('/settings') +def settings(): + """Settings page for backend configuration.""" + config = load_config() + return render_template('settings.html', config=config) + +@app.route('/api/config', methods=['GET']) +def get_config(): + """API endpoint to get current configuration.""" + config = load_config() + return jsonify(config) + +@app.route('/api/config', methods=['POST']) +def update_config(): + """API endpoint to update configuration.""" + try: + new_config = request.json + if save_config(new_config): + return jsonify({"success": True, "message": "Configuration updated successfully"}) + else: + return jsonify({"success": False, "message": "Failed to save configuration"}), 500 + except Exception as e: + return jsonify({"success": False, "message": str(e)}), 400 + +@app.route('/api/backend/select', methods=['POST']) +def select_backend(): + """API endpoint to select a backend.""" + try: + data = request.json + backend = data.get('backend') + + config = load_config() + if backend in config['backends']: + config['selected_backend'] = backend + if save_config(config): + return jsonify({"success": True, "message": f"Backend switched to {backend}"}) + + return jsonify({"success": False, "message": "Invalid backend"}), 400 + except Exception as e: + return jsonify({"success": False, "message": str(e)}), 400 + +if __name__ == '__main__': + app.run(debug=True, host='0.0.0.0', port=5000) \ No newline at end of file diff --git a/src/frontend/requirements.txt b/src/frontend/requirements.txt new file mode 100644 index 00000000..5beea3ba --- /dev/null +++ b/src/frontend/requirements.txt @@ -0,0 +1,2 @@ +Flask==3.0.0 +Werkzeug==3.0.1 \ No newline at end of file diff --git a/src/frontend/templates/index.html b/src/frontend/templates/index.html new file mode 100644 index 00000000..8edfbeb6 --- /dev/null +++ b/src/frontend/templates/index.html @@ -0,0 +1,144 @@ + + + + + + SDLC Core - LLM Infrastructure + + + + + + + +
+
+
+

LLM Infrastructure Dashboard

+ + +
+
+
+ Current Backend +
+
+

{{ config.backends[config.selected_backend].name }}

+ + Active + +
+

{{ config.backends[config.selected_backend].description }}

+
+
+ + +
+
+
+ Available Backends +
+
+ {% for backend_id, backend in config.backends.items() %} +
+
+
+
+
+
{{ backend.name }}
+

{{ backend.description }}

+ {% if backend.model %} + + {{ backend.model }} + + {% endif %} +
+
+ {% if backend_id == config.selected_backend %} + Current + {% else %} + + {% endif %} +
+
+
+
+
+ {% endfor %} +
+ + +
+
+
+
+
+ + + + + \ No newline at end of file diff --git a/src/frontend/templates/settings.html b/src/frontend/templates/settings.html new file mode 100644 index 00000000..42402438 --- /dev/null +++ b/src/frontend/templates/settings.html @@ -0,0 +1,262 @@ + + + + + + Settings - SDLC Core + + + + + + + +
+
+
+
+

LLM Backend Settings

+ + Back to Dashboard + +
+ +
+ +
+
+
+ Backend Selection +
+

Choose your preferred LLM backend provider.

+ +
+ {% for backend_id, backend in config.backends.items() %} +
+
+ + + {{ backend.description }} +
+
+ {% endfor %} +
+
+
+ + + {% for backend_id, backend in config.backends.items() %} +
+
+ {{ backend.name }} Configuration +
+ +
+
+
+ + +
+ Your API key is stored locally and used for authentication. +
+
+
+ +
+
+ + +
+ Specify the model to use for this backend. +
+
+
+
+ +
+ + +
+
+ {% endfor %} + + +
+ + +
+
+
+
+
+ + + + + + + + + + + \ No newline at end of file diff --git a/src/frontend/test_app.py b/src/frontend/test_app.py new file mode 100644 index 00000000..eb71424d --- /dev/null +++ b/src/frontend/test_app.py @@ -0,0 +1,182 @@ +""" +Tests for the SDLC Core frontend application. +""" + +import pytest +import json +import os +import tempfile +from unittest.mock import patch + +# Add the parent directory to sys.path for imports +import sys +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + +from app import app, load_config, save_config, DEFAULT_CONFIG + + +@pytest.fixture +def client(): + """Create a test client for the Flask app.""" + app.config['TESTING'] = True + with app.test_client() as client: + yield client + + +@pytest.fixture +def temp_config_file(): + """Create a temporary config file for testing.""" + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f: + json.dump(DEFAULT_CONFIG, f) + temp_file = f.name + + yield temp_file + + # Cleanup + if os.path.exists(temp_file): + os.unlink(temp_file) + + +class TestConfigManagement: + """Test configuration loading and saving.""" + + def test_load_default_config(self): + """Test that default config is returned when no file exists.""" + with patch('app.CONFIG_FILE', '/nonexistent/file.json'): + config = load_config() + assert config == DEFAULT_CONFIG + + def test_save_and_load_config(self, temp_config_file): + """Test saving and loading configuration.""" + test_config = { + "selected_backend": "anthropic", + "backends": { + "openai": {"name": "OpenAI", "api_key": "test-key", "enabled": True}, + "anthropic": {"name": "Anthropic", "api_key": "test-key-2", "enabled": False} + } + } + + with patch('app.CONFIG_FILE', temp_config_file): + assert save_config(test_config) + loaded_config = load_config() + assert loaded_config == test_config + + +class TestRoutes: + """Test Flask routes.""" + + def test_index_route(self, client): + """Test the main dashboard route.""" + response = client.get('/') + assert response.status_code == 200 + assert b'LLM Infrastructure Dashboard' in response.data + + def test_settings_route(self, client): + """Test the settings page route.""" + response = client.get('/settings') + assert response.status_code == 200 + assert b'LLM Backend Settings' in response.data + + def test_get_config_api(self, client): + """Test the GET /api/config endpoint.""" + response = client.get('/api/config') + assert response.status_code == 200 + data = json.loads(response.data) + assert 'selected_backend' in data + assert 'backends' in data + + def test_update_config_api(self, client, temp_config_file): + """Test the POST /api/config endpoint.""" + test_config = { + "selected_backend": "anthropic", + "backends": {"test": {"name": "Test", "enabled": True}} + } + + with patch('app.CONFIG_FILE', temp_config_file): + response = client.post('/api/config', + data=json.dumps(test_config), + content_type='application/json') + assert response.status_code == 200 + data = json.loads(response.data) + assert data['success'] is True + + def test_select_backend_api(self, client, temp_config_file): + """Test the POST /api/backend/select endpoint.""" + with patch('app.CONFIG_FILE', temp_config_file): + # Test valid backend selection + response = client.post('/api/backend/select', + data=json.dumps({"backend": "anthropic"}), + content_type='application/json') + assert response.status_code == 200 + data = json.loads(response.data) + assert data['success'] is True + + # Test invalid backend selection + response = client.post('/api/backend/select', + data=json.dumps({"backend": "invalid"}), + content_type='application/json') + assert response.status_code == 400 + data = json.loads(response.data) + assert data['success'] is False + + +class TestFrontendIntegration: + """Test frontend integration scenarios.""" + + def test_backend_switching_workflow(self, client, temp_config_file): + """Test the complete backend switching workflow.""" + with patch('app.CONFIG_FILE', temp_config_file): + # Get initial config + response = client.get('/api/config') + initial_config = json.loads(response.data) + initial_backend = initial_config['selected_backend'] + + # Switch to a different backend + new_backend = 'anthropic' if initial_backend == 'openai' else 'openai' + response = client.post('/api/backend/select', + data=json.dumps({"backend": new_backend}), + content_type='application/json') + assert response.status_code == 200 + + # Verify the switch took effect + response = client.get('/api/config') + updated_config = json.loads(response.data) + assert updated_config['selected_backend'] == new_backend + + def test_settings_persistence(self, client, temp_config_file): + """Test that settings persist across requests.""" + test_config = { + "selected_backend": "anthropic", + "backends": { + "openai": { + "name": "OpenAI", + "description": "OpenAI's GPT models", + "api_key": "test-openai-key", + "model": "gpt-4", + "enabled": True + }, + "anthropic": { + "name": "Anthropic", + "description": "Anthropic's Claude models", + "api_key": "test-anthropic-key", + "model": "claude-3-opus", + "enabled": True + } + } + } + + with patch('app.CONFIG_FILE', temp_config_file): + # Save configuration + response = client.post('/api/config', + data=json.dumps(test_config), + content_type='application/json') + assert response.status_code == 200 + + # Verify persistence by getting config again + response = client.get('/api/config') + retrieved_config = json.loads(response.data) + assert retrieved_config == test_config + + +if __name__ == '__main__': + pytest.main([__file__, '-v']) \ No newline at end of file