diff --git a/.github/workflows/nextjs.yml b/.github/workflows/astro.yml
similarity index 63%
rename from .github/workflows/nextjs.yml
rename to .github/workflows/astro.yml
index ed74736..54d4be4 100644
--- a/.github/workflows/nextjs.yml
+++ b/.github/workflows/astro.yml
@@ -1,8 +1,8 @@
-# Sample workflow for building and deploying a Next.js site to GitHub Pages
+# Sample workflow for building and deploying an Astro site to GitHub Pages
#
-# To get started with Next.js see: https://nextjs.org/docs/getting-started
+# To get started with Astro see: https://docs.astro.build/en/getting-started/
#
-name: Deploy Next.js site to Pages
+name: Deploy Astro site to Pages
on:
# Runs on pushes targeting the default branch
@@ -24,9 +24,13 @@ concurrency:
group: "pages"
cancel-in-progress: false
+env:
+ BUILD_PATH: "." # default value when not using subfolders
+ # BUILD_PATH: subfolder
+
jobs:
- # Build job
build:
+ name: Build
runs-on: ubuntu-latest
steps:
- name: Checkout
@@ -38,11 +42,13 @@ jobs:
echo "manager=yarn" >> $GITHUB_OUTPUT
echo "command=install" >> $GITHUB_OUTPUT
echo "runner=yarn" >> $GITHUB_OUTPUT
+ echo "lockfile=yarn.lock" >> $GITHUB_OUTPUT
exit 0
elif [ -f "${{ github.workspace }}/package.json" ]; then
echo "manager=npm" >> $GITHUB_OUTPUT
echo "command=ci" >> $GITHUB_OUTPUT
echo "runner=npx --no-install" >> $GITHUB_OUTPUT
+ echo "lockfile=package-lock.json" >> $GITHUB_OUTPUT
exit 0
else
echo "Unable to determine package manager"
@@ -53,40 +59,31 @@ jobs:
with:
node-version: "20"
cache: ${{ steps.detect-package-manager.outputs.manager }}
+ cache-dependency-path: ${{ env.BUILD_PATH }}/${{ steps.detect-package-manager.outputs.lockfile }}
- name: Setup Pages
+ id: pages
uses: actions/configure-pages@v5
- with:
- # Automatically inject basePath in your Next.js configuration file and disable
- # server side image optimization (https://nextjs.org/docs/api-reference/next/image#unoptimized).
- #
- # You may remove this line if you want to manage the configuration yourself.
- static_site_generator: next
- - name: Restore cache
- uses: actions/cache@v4
- with:
- path: |
- .next/cache
- # Generate a new cache whenever packages or source files change.
- key: ${{ runner.os }}-nextjs-${{ hashFiles('**/package-lock.json', '**/yarn.lock') }}-${{ hashFiles('**.[jt]s', '**.[jt]sx') }}
- # If source files changed but packages didn't, rebuild from a prior cache.
- restore-keys: |
- ${{ runner.os }}-nextjs-${{ hashFiles('**/package-lock.json', '**/yarn.lock') }}-
- name: Install dependencies
run: ${{ steps.detect-package-manager.outputs.manager }} ${{ steps.detect-package-manager.outputs.command }}
- - name: Build with Next.js
- run: ${{ steps.detect-package-manager.outputs.runner }} next build
+ working-directory: ${{ env.BUILD_PATH }}
+ - name: Build with Astro
+ run: |
+ ${{ steps.detect-package-manager.outputs.runner }} astro build \
+ --site "${{ steps.pages.outputs.origin }}" \
+ --base "${{ steps.pages.outputs.base_path }}"
+ working-directory: ${{ env.BUILD_PATH }}
- name: Upload artifact
uses: actions/upload-pages-artifact@v3
with:
- path: ./out
+ path: ${{ env.BUILD_PATH }}/dist
- # Deployment job
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
- runs-on: ubuntu-latest
needs: build
+ runs-on: ubuntu-latest
+ name: Deploy
steps:
- name: Deploy to GitHub Pages
id: deployment
diff --git a/gen-man-publish.yml b/.github/workflows/gen-man-publish.yml
similarity index 100%
rename from gen-man-publish.yml
rename to .github/workflows/gen-man-publish.yml
diff --git a/.github/workflows/gen-man.yml b/.github/workflows/gen-man.yml
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/.github/workflows/gen-man.yml
@@ -0,0 +1 @@
+
diff --git a/.github/workflows/ai-code-review.yml b/ai-code-review.yaml
similarity index 100%
rename from .github/workflows/ai-code-review.yml
rename to ai-code-review.yaml
diff --git a/api/submit b/api/submit
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/api/submit
@@ -0,0 +1 @@
+
diff --git a/app/ai_review.py b/app/ai_review.py
new file mode 100644
index 0000000..c92befa
--- /dev/null
+++ b/app/ai_review.py
@@ -0,0 +1,12 @@
+import random
+
+def analyze_pr(repo_url: str, pr_number: int):
+ # Real logic would clone repo, checkout PR diff, run LLM, etc.
+ mock_comments = [
+ f"โ
Efficient use of data structures in PR #{pr_number}.",
+ "๐งน Consider removing unused imports.",
+ "๐ Security tip: mask sensitive keys in logs.",
+ "๐ฆ Use semantic versioning in your package updates.",
+ "๐ง Consider adding docstrings to helper functions."
+ ]
+ return random.sample(mock_comments, k=3)
diff --git a/app/main.py b/app/main.py
index a1af575..d5268fc 100644
--- a/app/main.py
+++ b/app/main.py
@@ -1,28 +1,28 @@
-from fastapi import FastAPI, HTTPException
+from fastapi import FastAPI, Request
+from fastapi.staticfiles import StaticFiles
+from fastapi.responses import JSONResponse, FileResponse
from pydantic import BaseModel
-from celery.result import AsyncResult
-from app.tasks import analyze_pull_request
import os
+from app.ai_review import analyze_pr
app = FastAPI()
+app.mount("/static", StaticFiles(directory="static"), name="static")
+
+@app.get("/")
+async def serve_index():
+ return FileResponse("static/index.html")
+
class PRRequest(BaseModel):
repo_url: str
pr_number: int
@app.post("/api/submit")
-async def submit_pr(pr: PRRequest):
- task = analyze_pull_request.delay(pr.repo_url, pr.pr_number)
- return {"job_id": task.id}
-
-@app.get("/api/status/{job_id}")
-async def get_status(job_id: str):
- task_result = AsyncResult(job_id)
- if task_result.state == 'PENDING':
- return {"status": "pending"}
- elif task_result.state == 'SUCCESS':
- return {"status": "completed", "result": task_result.result}
- elif task_result.state == 'FAILURE':
- return {"status": "failed", "error": str(task_result.result)}
- else:
- return {"status": task_result.state}
+async def submit_review(pr: PRRequest):
+ try:
+ result = analyze_pr(pr.repo_url, pr.pr_number)
+ return JSONResponse(content={"success": True, "comments": result})
+ except Exception as e:
+ import logging
+ logging.error("An error occurred while processing the request", exc_info=True)
+ return JSONResponse(content={"success": False, "error": "An internal error has occurred."})
diff --git a/app/run.sh b/app/run.sh
new file mode 100644
index 0000000..ab8a9bd
--- /dev/null
+++ b/app/run.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+uvicorn app.main:app --host 0.0.0.0 --port 8000 --reload
+chmod +x run.sh
diff --git a/app/tasks.py b/app/tasks.py
index cc192c8..4f9eb4f 100644
--- a/app/tasks.py
+++ b/app/tasks.py
@@ -1,8 +1,36 @@
from celery import Celery
+from typing import List
+from celery import shared_task
+from .utils.github_client import fetch_pull_request_diff
+from .utils.llm_client import analyze_code_with_llm
+from .models import save_review_comments
import httpx
import os
import difflib
-from typing import List
+
+@shared_task
+def analyze_pull_request(repo_url: str, pr_number: int):
+ try:
+ # 1. Fetch PR diff
+ diff_text = fetch_pull_request_diff(repo_url, pr_number)
+ if not diff_text:
+ return {"success": False, "error": "Unable to fetch diff"}
+
+ # 2. Use LLM to analyze PR diff
+ review_comments = analyze_code_with_llm(diff_text)
+
+ # 3. Persist and/or post comments
+ save_review_comments(repo_url, pr_number, review_comments)
+
+ return {
+ "success": True,
+ "comments": review_comments,
+ }
+ except Exception as e:
+ return {
+ "success": False,
+ "error": str(e),
+ }
celery_app = Celery(
'tasks',
diff --git a/backend/app.py b/backend/app/main.py
similarity index 81%
rename from backend/app.py
rename to backend/app/main.py
index 1399ebc..06ea15a 100644
--- a/backend/app.py
+++ b/backend/app/main.py
@@ -38,6 +38,15 @@ async def predict(request: PredictRequest):
@app.get("/health")
async def health_check():
return {"status": "healthy"}
+# in FastAPI backend
+@app.post("/api/submit")
+async def submit_pr(payload: dict):
+ repo_url = payload.get("repo_url")
+ pr_number = payload.get("pr_number")
+ # enqueue task to Celery
+ task = review_pr.delay(repo_url, pr_number)
+ return {"success": True, "task_id": task.id}
+
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
diff --git a/database.py b/database.py
new file mode 100644
index 0000000..fc73192
--- /dev/null
+++ b/database.py
@@ -0,0 +1,6 @@
+# models.py or database.py
+
+def save_review_comments(repo_url: str, pr_number: int, comments: list):
+ print(f"Saving review for {repo_url} PR #{pr_number}")
+ for comment in comments:
+ print(f"- {comment}")
diff --git a/index.html b/index.html
deleted file mode 100644
index 0de8708..0000000
--- a/index.html
+++ /dev/null
@@ -1,301 +0,0 @@
-
-
-
-
-
- enclov-AI โ GitHub PR Code Reviewer
-
-
-
-
-
-
-
- enclov-AI
-
- Supercharge your GitHub PR workflow with real-time AI-driven review automation.
- Powered by OpenAI, FastAPI, Redis, and Celery.
-
-
- View on GitHub
-
-
-
-
-
-
-
๐ Submit GitHub PR for Review
-
-
-
-
-
๐ About QUBUHUB
-
- QUBUHUB is the heartbeat of next-gen open innovation. From soulful companions like Lola to decentralized infrastructure with Fadaka , we've been crafting the future, one stack at a time. Built by dreamers, hackers, and pragmatists.
-
-
-
๐ Projects from QUBUHUB
-
- Lola : Emotionally intelligent AI companion. GitHub
- RODAAI : Open-source AI + data science orchestration. GitHub
- Fadaka Blockchain : Secure and extensible blockchain network. GitHub
- AgbakoAI : Adaptable modular AI for real-world industries. GitHub
- Swiftbot : Fast orchestration bot for modern workflows. GitHub
- Kubu-Hai : Full-stack generator with FastAPI + Dart. GitHub
- Enclov-AI : The AI code reviewer you're reading right now. GitHub
-
-
-
-
-
-
-
-
-
diff --git a/main.py b/main.py
index e824c69..f568919 100644
--- a/main.py
+++ b/main.py
@@ -8,6 +8,14 @@
from submit_pr import router as submit_pr_router
from providers.enclovai_provider import call_enclovai
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"], # or specific origin
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
def process_prompt(prompt, model="auto"):
return call_enclovai(prompt, model=model)
diff --git a/requirements.txt b/requirements.txt
index 1bce781..57e016d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -18,7 +18,7 @@ fastapi==0.95.1
uvicorn[standard]==0.22.0
kubernetes==26.1.0
cpufeature==0.9.0
-torch==2.0.1
+torch==2.7.1
petals==0.1
Flask==2.2.5
Flask-Cors==6.0.0
@@ -26,20 +26,20 @@ Flask-SQLAlchemy==3.0.0
Flask-RESTful==0.3.9
Flask-Migrate==3.1.0
Flask-WTF==1.0.1
-gunicorn==20.1.0
-requests==2.31.0
+gunicorn==23.0.0
+requests==2.32.2
numpy==1.24.3
pandas==1.5.3
-scikit-learn==1.2.2
-tensorflow==2.11.1
-torch==2.0.1
+scikit-learn==1.5.1
+tensorflow==2.12.1
+torch==2.7.1
torchvision==0.15.2
-transformers==4.28.1
+transformers==4.50.0
spacy==3.5.0
-nltk==3.7
+nltk==3.9.1
python-dotenv==0.21.1
celery==5.2.6
-redis==4.4.0
+redis==4.4.4
Flask-Login==0.6.2
Flask-Mail==0.9.1
Flask-Admin==1.5.8
@@ -53,25 +53,25 @@ codecov==2.1.13
pytest-cov==4.1.0
build==0.10.0
wheel==0.42.0
-black==23.3.0
+black==24.3.0
mypy==1.12.0
pytest==7.4.4
pytest-pyodide==0.54.0
pyodide-build==0.24.1
webdriver-manager==4.0.1
-keras==2.12.0
+keras==3.9.0
imgaug==0.4.0
nlpaug==1.1.3
tsaug==0.2.1
boto3==1.18.69
-sagemaker==2.59.3
-Pillow==9.5.0
+sagemaker==2.237.3
+Pillow==10.3.0
sentencepiece==0.1.98
matplotlib==3.7.1
onnxruntime==1.14.1
librosa==0.9.2
scipy==1.10.1
-aiohttp==3.8.4
+aiohttp==3.11.0b0
google-cloud-speech==2.22.2
fastapi==0.95.0
uvicorn==0.22.0
diff --git a/scripts/generate_man_pages.py b/scripts/gen_man_pages.py
similarity index 100%
rename from scripts/generate_man_pages.py
rename to scripts/gen_man_pages.py
diff --git a/static/index.html b/static/index.html
new file mode 100644
index 0000000..bb99cd0
--- /dev/null
+++ b/static/index.html
@@ -0,0 +1,200 @@
+
+
+
+
+
+ enclovAI โ GitHub PR Code Reviewer
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ยฉ 2025 enclovAI โ Built with โค๏ธ for developers
+
+
+
+
+
diff --git a/utils/github_client.py b/utils/github_client.py
new file mode 100644
index 0000000..972e496
--- /dev/null
+++ b/utils/github_client.py
@@ -0,0 +1,13 @@
+# utils/github_client.py
+
+import requests
+
+def fetch_pull_request_diff(repo_url: str, pr_number: int) -> str:
+ owner_repo = repo_url.replace("https://github.com/", "")
+ headers = {
+ "Accept": "application/vnd.github.v3.diff",
+ "Authorization": f"Bearer YOUR_GITHUB_TOKEN"
+ }
+ url = f"https://api.github.com/repos/{owner_repo}/pulls/{pr_number}"
+ response = requests.get(url, headers=headers)
+ return response.text if response.status_code == 200 else ""
diff --git a/utils/llm_client.py b/utils/llm_client.py
new file mode 100644
index 0000000..3840efe
--- /dev/null
+++ b/utils/llm_client.py
@@ -0,0 +1,21 @@
+# utils/llm_client.py
+
+from openai import OpenAI
+
+openai_client = OpenAI(api_key="sk-...")
+
+def analyze_code_with_llm(diff: str) -> list:
+ system_prompt = (
+ "You are a senior code reviewer. Analyze this GitHub PR diff and return improvement suggestions, "
+ "style issues, refactor ideas, and security concerns."
+ )
+ response = openai_client.chat.completions.create(
+ model="gpt-4", # or gpt-3.5-turbo, llama3, etc.
+ messages=[
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": diff}
+ ],
+ temperature=0.3
+ )
+ suggestions = response.choices[0].message.content.strip()
+ return suggestions.split("\n")