-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcodebase_analyzer.py
More file actions
327 lines (275 loc) · 11.4 KB
/
codebase_analyzer.py
File metadata and controls
327 lines (275 loc) · 11.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
#!/usr/bin/env python3
"""
Codebase Analyzer — Scan any project directory for code health intelligence.
Produces: language breakdown, file size distribution, largest files,
TODO/FIXME/HACK audit, documentation coverage, complexity hotspots,
and dependency summary. No git required.
Usage:
py codebase_analyzer.py /path/to/project
py codebase_analyzer.py /path/to/project --verbose
py codebase_analyzer.py /path/to/project --json
py codebase_analyzer.py . # Current directory
"""
import argparse
import json
import os
import re
import sys
from collections import Counter, defaultdict
from pathlib import Path
# Language detection by extension
LANG_MAP = {
".py": "Python", ".js": "JavaScript", ".ts": "TypeScript", ".tsx": "TypeScript (JSX)",
".jsx": "JavaScript (JSX)", ".java": "Java", ".go": "Go", ".rs": "Rust",
".rb": "Ruby", ".php": "PHP", ".c": "C", ".cpp": "C++", ".h": "C/C++ Header",
".cs": "C#", ".swift": "Swift", ".kt": "Kotlin", ".scala": "Scala",
".r": "R", ".sql": "SQL", ".sh": "Shell", ".bash": "Shell",
".html": "HTML", ".css": "CSS", ".scss": "SCSS", ".less": "LESS",
".json": "JSON", ".yaml": "YAML", ".yml": "YAML", ".toml": "TOML",
".xml": "XML", ".md": "Markdown", ".txt": "Text", ".csv": "CSV",
".dockerfile": "Dockerfile", ".tf": "Terraform", ".hcl": "HCL",
".vue": "Vue", ".svelte": "Svelte", ".dart": "Dart",
}
# Directories to skip
SKIP_DIRS = {
"node_modules", ".git", "__pycache__", ".venv", "venv", "env",
"dist", "build", ".next", ".nuxt", "target", "bin", "obj",
".cache", ".tox", "vendor", "bower_components", ".idea", ".vscode",
".claude", "coverage", ".nyc_output",
}
# TODO patterns
TODO_PATTERNS = [
(r'\b(TODO)\b[:\s]*(.*)', "TODO"),
(r'\b(FIXME)\b[:\s]*(.*)', "FIXME"),
(r'\b(HACK)\b[:\s]*(.*)', "HACK"),
(r'\b(XXX)\b[:\s]*(.*)', "XXX"),
(r'\b(BUG)\b[:\s]*(.*)', "BUG"),
(r'\b(DEPRECATED)\b[:\s]*(.*)', "DEPRECATED"),
]
# Import patterns by language
IMPORT_PATTERNS = {
"Python": r'^\s*(?:import|from)\s+([a-zA-Z0-9_\.]+)',
"JavaScript": r'(?:import\s+.*from\s+|require\s*\(\s*)["\']([^"\']+)["\']',
"TypeScript": r'(?:import\s+.*from\s+|require\s*\(\s*)["\']([^"\']+)["\']',
"Go": r'^\s*"([^"]+)"',
"Rust": r'^\s*(?:use|extern crate)\s+([a-zA-Z0-9_:]+)',
"Java": r'^\s*import\s+([a-zA-Z0-9_.]+)',
}
def should_skip(path: Path) -> bool:
"""Check if path should be skipped."""
return any(skip in path.parts for skip in SKIP_DIRS)
def detect_language(filepath: Path) -> str | None:
"""Detect language from file extension."""
# Special cases
if filepath.name == "Dockerfile":
return "Dockerfile"
if filepath.name == "Makefile":
return "Makefile"
return LANG_MAP.get(filepath.suffix.lower())
def count_lines(filepath: Path) -> dict:
"""Count total lines, code lines, comment lines, blank lines."""
try:
content = filepath.read_text(encoding="utf-8", errors="ignore")
except Exception:
return {"total": 0, "code": 0, "comment": 0, "blank": 0}
lines = content.splitlines()
total = len(lines)
blank = sum(1 for l in lines if not l.strip())
comment = sum(1 for l in lines if l.strip().startswith(("#", "//", "/*", "*", "<!--")))
code = total - blank - comment
return {"total": total, "code": code, "comment": comment, "blank": blank}
def find_todos(filepath: Path) -> list[dict]:
"""Find TODO/FIXME/HACK comments in a file."""
todos = []
try:
lines = filepath.read_text(encoding="utf-8", errors="ignore").splitlines()
except Exception:
return todos
for i, line in enumerate(lines, 1):
for pattern, tag in TODO_PATTERNS:
match = re.search(pattern, line, re.IGNORECASE)
if match:
todos.append({
"file": str(filepath),
"line": i,
"tag": tag,
"text": match.group(2).strip()[:80] if match.group(2) else "",
})
break
return todos
def find_imports(filepath: Path, language: str) -> list[str]:
"""Extract import/dependency names from a file."""
pattern = IMPORT_PATTERNS.get(language)
if not pattern:
return []
try:
content = filepath.read_text(encoding="utf-8", errors="ignore")
except Exception:
return []
return re.findall(pattern, content, re.MULTILINE)
def check_documentation(filepath: Path, language: str) -> bool:
"""Check if a file has documentation (docstrings, header comments)."""
try:
content = filepath.read_text(encoding="utf-8", errors="ignore")
except Exception:
return False
if language == "Python":
return '"""' in content[:500] or "'''" in content[:500]
elif language in ("JavaScript", "TypeScript", "TypeScript (JSX)", "JavaScript (JSX)"):
return "/**" in content[:500] or "* @" in content
elif language in ("Java", "C#", "Go", "Rust"):
return "/**" in content[:500] or "///" in content[:500]
else:
return content.strip().startswith(("#", "//", "/*"))
def analyze_project(project_dir: Path) -> dict:
"""Full codebase analysis."""
lang_stats = defaultdict(lambda: {"files": 0, "lines": 0, "code_lines": 0})
all_files = []
todos = []
imports = Counter()
documented = 0
total_source = 0
largest_files = []
for filepath in project_dir.rglob("*"):
if not filepath.is_file() or should_skip(filepath):
continue
language = detect_language(filepath)
if not language:
continue
file_size = filepath.stat().st_size
line_counts = count_lines(filepath)
all_files.append({
"path": str(filepath.relative_to(project_dir)),
"language": language,
"size": file_size,
"lines": line_counts["total"],
"code_lines": line_counts["code"],
})
lang_stats[language]["files"] += 1
lang_stats[language]["lines"] += line_counts["total"]
lang_stats[language]["code_lines"] += line_counts["code"]
# Track largest files
largest_files.append((str(filepath.relative_to(project_dir)), file_size, line_counts["total"]))
# TODOs
todos.extend(find_todos(filepath))
# Imports
file_imports = find_imports(filepath, language)
for imp in file_imports:
imports[imp.split(".")[0]] += 1
# Documentation coverage (source files only)
if language not in ("JSON", "YAML", "TOML", "XML", "Markdown", "Text", "CSV", "HTML", "CSS"):
total_source += 1
if check_documentation(filepath, language):
documented += 1
# Sort largest files
largest_files.sort(key=lambda x: x[1], reverse=True)
# Compute totals
total_files = sum(s["files"] for s in lang_stats.values())
total_lines = sum(s["lines"] for s in lang_stats.values())
total_code = sum(s["code_lines"] for s in lang_stats.values())
return {
"project": str(project_dir),
"summary": {
"total_files": total_files,
"total_lines": total_lines,
"total_code_lines": total_code,
"languages": len(lang_stats),
"doc_coverage": round(documented / total_source * 100, 1) if total_source else 0,
"todo_count": len(todos),
},
"languages": dict(sorted(lang_stats.items(), key=lambda x: x[1]["code_lines"], reverse=True)),
"largest_files": largest_files[:10],
"todos": todos,
"top_imports": imports.most_common(15),
"documented_files": documented,
"total_source_files": total_source,
}
def format_report(result: dict, verbose: bool = False) -> str:
"""Format analysis as readable report."""
lines = []
s = result["summary"]
lines.append("=" * 60)
lines.append(" CODEBASE ANALYSIS REPORT")
lines.append("=" * 60)
lines.append(f" Project: {result['project']}")
lines.append(f" Files: {s['total_files']:,}")
lines.append(f" Lines: {s['total_lines']:,} ({s['total_code_lines']:,} code)")
lines.append(f" Languages: {s['languages']}")
lines.append(f" Doc coverage: {s['doc_coverage']}%")
lines.append(f" TODOs/FIXMEs: {s['todo_count']}")
lines.append("")
# Language breakdown
lines.append("-" * 60)
lines.append(" LANGUAGE BREAKDOWN")
lines.append("-" * 60)
for lang, stats in result["languages"].items():
pct = round(stats["code_lines"] / s["total_code_lines"] * 100, 1) if s["total_code_lines"] else 0
bar = "#" * int(pct / 2)
lines.append(f" {lang:<20} {stats['files']:>4} files {stats['code_lines']:>6} lines ({pct:>5.1f}%) {bar}")
# Largest files
if result["largest_files"]:
lines.append("")
lines.append("-" * 60)
lines.append(" LARGEST FILES")
lines.append("-" * 60)
for path, size, line_count in result["largest_files"][:10]:
size_kb = size / 1024
lines.append(f" {size_kb:>8.1f} KB {line_count:>5} lines {path}")
# TODOs
if result["todos"]:
lines.append("")
lines.append("-" * 60)
lines.append(f" TECHNICAL DEBT MARKERS ({len(result['todos'])} items)")
lines.append("-" * 60)
tag_counts = Counter(t["tag"] for t in result["todos"])
for tag, count in tag_counts.most_common():
lines.append(f" {tag}: {count}")
if verbose:
lines.append("")
for todo in result["todos"][:20]:
lines.append(f" [{todo['tag']}] {todo['file']}:{todo['line']} -- {todo['text']}")
# Top dependencies
if result["top_imports"]:
lines.append("")
lines.append("-" * 60)
lines.append(" TOP DEPENDENCIES (by import frequency)")
lines.append("-" * 60)
for imp, count in result["top_imports"]:
lines.append(f" {count:>4}x {imp}")
# Health indicators
lines.append("")
lines.append("-" * 60)
lines.append(" HEALTH INDICATORS")
lines.append("-" * 60)
if s["doc_coverage"] < 20:
lines.append(" [!!] Low documentation coverage (<20%)")
elif s["doc_coverage"] < 50:
lines.append(" [!] Moderate documentation coverage (<50%)")
else:
lines.append(" [ok] Good documentation coverage")
if s["todo_count"] > 50:
lines.append(f" [!!] High tech debt markers ({s['todo_count']} TODOs/FIXMEs)")
elif s["todo_count"] > 10:
lines.append(f" [!] Some tech debt markers ({s['todo_count']})")
else:
lines.append(f" [ok] Low tech debt markers ({s['todo_count']})")
lines.append("")
lines.append("=" * 60)
return "\n".join(lines)
def main():
parser = argparse.ArgumentParser(description="Codebase Analyzer -- scan any project for code health")
parser.add_argument("path", nargs="?", default=".", help="Project directory to analyze")
parser.add_argument("--verbose", "-v", action="store_true", help="Show detailed TODO list")
parser.add_argument("--json", action="store_true", help="Output as JSON")
args = parser.parse_args()
project = Path(args.path)
if not project.exists():
print(f"Error: {project} not found")
sys.exit(1)
result = analyze_project(project)
if args.json:
print(json.dumps(result, indent=2, default=str))
else:
print(format_report(result, verbose=args.verbose))
if __name__ == "__main__":
main()