-
Notifications
You must be signed in to change notification settings - Fork 20
Expand file tree
/
Copy pathserver.py
More file actions
1839 lines (1593 loc) · 74.7 KB
/
server.py
File metadata and controls
1839 lines (1593 loc) · 74.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
"""
BitDive MCP Server
Proxies tool calls to BitDive monitoring API endpoints (/mcp/*)
using X-BitDive-MCP-Token for authentication.
Each tool accepts optional ``mcp_token``; if omitted, ``BITDIVE_MCP_TOKEN`` env is used.
"""
import os
import json
import httpx
import re
import asyncio
import inspect
from collections import Counter
from typing import Annotated
from urllib.parse import parse_qsl, quote, unquote, urlparse, urlunparse
from mcp.server.fastmcp import FastMCP
from mcp.server.transport_security import TransportSecuritySettings
from pydantic import Field
# ── Configuration ───────────────────────────────────────────────
BITDIVE_API_URL = os.getenv(
"BITDIVE_API_URL",
"https://cloud.bitdive.io/monitoring-api"
)
# Default token when a tool call does not pass ``mcp_token``.
BITDIVE_MCP_TOKEN = os.getenv("BITDIVE_MCP_TOKEN", "")
BITDIVE_SKIP_VERIFY = os.getenv("BITDIVE_SKIP_VERIFY", "false").lower() == "true"
TIMEOUT = 30.0
# ── MCP Server ──────────────────────────────────────────────────
mcp = FastMCP(
"BitDive",
instructions=(
"BitDive monitoring and tracing MCP server.\n\n"
"CRITICAL WORKFLOW FOR DISCOVERING AND REPRODUCING TRACES:\n"
"1. DISCOVERY: If you need a method signature, ALWAYS use get_heatmap_all_system or get_heatmap_for_module. It returns all methods (even with 0 calls).\n"
"2. FIND TRACE: Once you know the exact className and methodName from the heatmap, use find_trace_between_time to fetch historical call_ids.\n"
"3. REPRODUCE: Pass the call_id to get_reproduction_command to get a CURL/PowerShell command to manually trigger the endpoint.\n"
"4. UPDATE CACHE: Execute the reproduction command, wait 45s, and the trace will be in the hot cache (get_last_calls) ready for test generation."
),
transport_security=TransportSecuritySettings(
enable_dns_rebinding_protection=False
),
)
# ── HTTP helpers ────────────────────────────────────────────────
def _resolve_mcp_token(mcp_token: str | None) -> str:
"""Per-request token overrides ``BITDIVE_MCP_TOKEN`` when non-empty."""
t = (mcp_token or "").strip() or BITDIVE_MCP_TOKEN
if not t:
raise RuntimeError(
"BitDive MCP token missing: pass `mcp_token` on the tool call "
"or set the BITDIVE_MCP_TOKEN environment variable."
)
return t
def _auth_headers(mcp_token: str | None = None) -> dict[str, str]:
return {"X-BitDive-MCP-Token": _resolve_mcp_token(mcp_token)}
async def _get(path: str, params: dict | None = None, mcp_token: str | None = None):
"""Make an authenticated GET request to BitDive API."""
headers = _auth_headers(mcp_token)
async with httpx.AsyncClient(timeout=TIMEOUT, verify=not BITDIVE_SKIP_VERIFY) as client:
resp = await client.get(
f"{BITDIVE_API_URL}{path}",
headers=headers,
params=params,
)
resp.raise_for_status()
return resp.json()
async def _post_json(
path: str, body: dict, params: dict | None = None, mcp_token: str | None = None
):
"""Make an authenticated POST request with JSON body to BitDive API."""
headers = _auth_headers(mcp_token)
async with httpx.AsyncClient(timeout=TIMEOUT, verify=not BITDIVE_SKIP_VERIFY) as client:
resp = await client.post(
f"{BITDIVE_API_URL}{path}",
headers=headers,
json=body,
params=params,
)
resp.raise_for_status()
if resp.status_code != 204 and resp.text.strip():
return resp.json()
return {}
async def _delete(path: str, params: dict | None = None, mcp_token: str | None = None):
"""Make an authenticated DELETE request to BitDive API."""
headers = _auth_headers(mcp_token)
async with httpx.AsyncClient(timeout=TIMEOUT, verify=not BITDIVE_SKIP_VERIFY) as client:
resp = await client.delete(
f"{BITDIVE_API_URL}{path}",
headers=headers,
params=params,
)
resp.raise_for_status()
if resp.status_code != 204 and resp.text.strip():
return resp.json()
return {}
def _decode_repeatedly(value: str, max_rounds: int = 3) -> str:
"""Decode URL-encoded text until it stabilizes or the guard limit is reached."""
decoded = value
for _ in range(max_rounds):
next_value = unquote(decoded)
if next_value == decoded:
break
decoded = next_value
return decoded
def _normalize_reproduction_url(raw_url: str) -> str:
"""Make captured URLs usable from the host machine."""
try:
parsed = urlparse(raw_url)
except Exception:
return raw_url
hostname = parsed.hostname or ""
port = parsed.port
# Internal Docker DNS names are not usable from the host shell.
if hostname.endswith("-ms"):
netloc = f"localhost:{port}" if port else "localhost"
else:
netloc = parsed.netloc
if parsed.query:
query_pairs = [
(key, _decode_repeatedly(value))
for key, value in parse_qsl(parsed.query, keep_blank_values=True)
]
query = "&".join(
f"{quote(key, safe='')}={quote(value, safe='')}"
for key, value in query_pairs
)
else:
query = parsed.query
return urlunparse(parsed._replace(netloc=netloc, query=query))
def _should_skip_reproduction_header(header_name: str) -> bool:
normalized = header_name.lower()
if normalized in {
"@class",
"host",
"content-length",
"connection",
"accept-encoding",
"expect",
}:
return True
return normalized.startswith("x-bitdiv-")
def _escape_single_quotes(value: str) -> str:
return value.replace("'", "'\"'\"'")
def _escape_powershell_single_quotes(value: str) -> str:
return value.replace("'", "''")
def _normalize_sql(sql: str) -> str:
"""Removes specific IDs and values from SQL to group similar queries.
Example: 'where id=123' -> 'where id=?'
"""
if not sql:
return ""
# Replace numeric values in quotes: '123' -> '?'
sql = re.sub(r"'\d+'", "'?'", sql)
# Replace standalone numeric values: = 123 -> = ?
sql = re.sub(r"=\s*\d+", "=?", sql)
# Replace values in IN clauses: IN (1, 2, 3) -> IN (?)
sql = re.sub(r"IN\s*\([^)]+\)", "IN(?)", sql, flags=re.IGNORECASE)
# Replace UUIDs
sql = re.sub(r"'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'", "?", sql)
# Handle PostgreSQL type casting: ('1'::int4) -> (?)
sql = re.sub(r"\('\?'::\w+\)", "(?)", sql)
# Remove extra spaces
sql = re.sub(r"\s+", " ", sql)
return sql.strip()
_VOLATILE_KEY_RE = re.compile(
r"(^|\.)(id|.*Id|traceId|spanId|messageId|callId|uuid|timestamp|date|createdAt|updatedAt)$",
re.IGNORECASE,
)
_UUID_RE = re.compile(
r"\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b",
re.IGNORECASE,
)
_ISO_DATE_RE = re.compile(r"^\d{4}-\d{2}-\d{2}[T ][0-9:\.\-+Z]*$")
def _normalize_typed_key(key: str) -> str:
"""Collapse BitDive's typed map keys into plain field names."""
if key.startswith("string:") and key.count(":") >= 2:
return key.split(":", 2)[2]
return key
def _looks_like_json_blob(value: str) -> bool:
if not isinstance(value, str):
return False
trimmed = value.strip()
return (
trimmed.startswith("{")
or trimmed.startswith("[")
or trimmed.startswith("\"{")
or trimmed.startswith("\"[")
)
def _safe_json_loads(raw: str):
"""Parse JSON when possible, including quoted JSON payloads."""
if not isinstance(raw, str):
return raw
text = raw.strip()
if not text:
return raw
try:
parsed = json.loads(text)
except Exception:
return raw
if isinstance(parsed, str) and _looks_like_json_blob(parsed):
return _safe_json_loads(parsed)
return parsed
def _normalize_payload(value):
"""Generic normalization for BitDive typed JSON structures."""
if isinstance(value, str):
parsed = _safe_json_loads(value)
if parsed is value:
return value.strip()
return _normalize_payload(parsed)
if isinstance(value, list):
if len(value) == 2 and isinstance(value[0], str) and value[0].startswith("java."):
return _normalize_payload(value[1])
return [_normalize_payload(item) for item in value]
if isinstance(value, dict):
normalized = {}
class_name = value.get("@class")
if class_name and not class_name.startswith("java.util."):
normalized["__class__"] = class_name.rsplit(".", 1)[-1]
for key, item in value.items():
if key == "@class":
continue
normalized[_normalize_typed_key(key)] = _normalize_payload(item)
if set(normalized.keys()) == {"parIndex", "paramType", "val"}:
return {
"index": normalized.get("parIndex"),
"type": normalized.get("paramType"),
"value": normalized.get("val"),
}
return normalized
return value
def _summarize_scalar(value) -> str:
text = str(value)
if len(text) > 180:
return f"{text[:177]}..."
return text
def _is_volatile_change(path: str, before, after) -> bool:
if _VOLATILE_KEY_RE.search(path):
return True
for value in (before, after):
if isinstance(value, str) and (_UUID_RE.search(value) or _ISO_DATE_RE.match(value)):
return True
return False
def _diff_values(before, after, path: str = "", changes: list | None = None, *, ignore_volatile: bool = False):
"""Recursively diff normalized payloads."""
if changes is None:
changes = []
if before == after:
return changes
if type(before) != type(after):
if not (ignore_volatile and _is_volatile_change(path, before, after)):
changes.append(f"{path or '$'}: type {type(before).__name__} -> {type(after).__name__}")
return changes
if isinstance(before, dict):
before_keys = set(before.keys())
after_keys = set(after.keys())
for key in sorted(before_keys - after_keys):
sub_path = f"{path}.{key}" if path else key
if ignore_volatile and _is_volatile_change(sub_path, before.get(key), None):
continue
changes.append(f"{sub_path}: removed")
for key in sorted(after_keys - before_keys):
sub_path = f"{path}.{key}" if path else key
if ignore_volatile and _is_volatile_change(sub_path, None, after.get(key)):
continue
changes.append(f"{sub_path}: added={_summarize_scalar(after[key])}")
for key in sorted(before_keys & after_keys):
sub_path = f"{path}.{key}" if path else key
_diff_values(before[key], after[key], sub_path, changes, ignore_volatile=ignore_volatile)
return changes
if isinstance(before, list):
if len(before) != len(after):
if not (ignore_volatile and _is_volatile_change(path, len(before), len(after))):
changes.append(f"{path or '$'}: list length {len(before)} -> {len(after)}")
for index, (before_item, after_item) in enumerate(zip(before[:5], after[:5])):
_diff_values(before_item, after_item, f"{path}[{index}]", changes, ignore_volatile=ignore_volatile)
return changes
if isinstance(before, str) and isinstance(after, str):
if len(before) > 240 or len(after) > 240:
if before != after and not (ignore_volatile and _is_volatile_change(path, before, after)):
changes.append(
f"{path or '$'}: text changed "
f"(len {len(before)} -> {len(after)}, before={_summarize_scalar(before)}, after={_summarize_scalar(after)})"
)
return changes
if not (ignore_volatile and _is_volatile_change(path, before, after)):
changes.append(
f"{path or '$'}: {_summarize_scalar(before)} -> {_summarize_scalar(after)}"
)
return changes
def _signature(node: dict) -> str:
return f"{_short_class(node.get('className', '?'))}.{node.get('methodName', '?')}()"
def _build_contract_entries(trace: dict) -> list[dict]:
"""Extract generic request/response contracts for every node in the trace tree."""
entries = []
path_counts: Counter[str] = Counter()
def _walk(node: dict, parent_path: str = ""):
signature = _signature(node)
path_counts[parent_path] += 1
ordinal = path_counts[parent_path]
path = f"{parent_path}/{signature}[{ordinal}]"
request_contract = {}
args_payload = _normalize_payload(node.get("args"))
if args_payload not in (None, "", [], {}):
request_contract["args"] = args_payload
body_payload = _normalize_payload(node.get("bodyRest"))
if body_payload not in (None, "", [], {}):
request_contract["body"] = body_payload
header_payload = _normalize_payload(node.get("headerRest"))
if header_payload not in (None, "", [], {}):
request_contract["headers"] = header_payload
url_payload = node.get("urlRest") or node.get("url")
if url_payload:
request_contract["url"] = url_payload
response_contract = {}
return_payload = _normalize_payload(node.get("methodReturn"))
if return_payload not in (None, "", [], {}):
response_contract["return"] = return_payload
status = node.get("codeResponse")
if status:
response_contract["status"] = status
error_message = node.get("errorCallMessage")
if error_message:
response_contract["error"] = error_message
rest_contracts = []
for rest in node.get("restCalls", []):
rest_contracts.append(
{
"method": rest.get("methodRest") or rest.get("method"),
"uri": rest.get("uri"),
"status": rest.get("statusCode"),
"requestHeaders": _normalize_payload(rest.get("headers")),
"requestBody": _normalize_payload(rest.get("body")),
"responseHeaders": _normalize_payload(rest.get("responseHeaders")),
"responseBody": _normalize_payload(rest.get("responseBody")),
"error": rest.get("errorCallMessage"),
}
)
entry = {
"path": path,
"signature": signature,
"operationType": node.get("operationType"),
"request": request_contract,
"response": response_contract,
"restCalls": rest_contracts,
"delta": node.get("callTimeDelta") or 0,
}
entries.append(entry)
for child in node.get("childCalls", []):
_walk(child, path)
_walk(trace)
return entries
def _index_contract_entries(entries: list[dict]) -> dict[str, dict]:
return {entry["path"]: entry for entry in entries}
def _format_contract_section(before: dict, after: dict) -> list[str]:
lines = []
root_before = {
"request": before.get("request", {}),
"response": before.get("response", {}),
}
root_after = {
"request": after.get("request", {}),
"response": after.get("response", {}),
}
root_changes = _diff_values(root_before, root_after, ignore_volatile=True)
if root_changes:
lines.append("ROOT CONTRACT CHANGES:")
lines.extend(f" - {change}" for change in root_changes[:15])
if len(root_changes) > 15:
lines.append(f" ... and {len(root_changes) - 15} more root changes")
lines.append("")
request_changes = _diff_values(before.get("request", {}), after.get("request", {}), ignore_volatile=True)
if request_changes:
lines.append("ROOT REQUEST DIFF:")
lines.extend(f" - {change}" for change in request_changes[:10])
lines.append("")
response_changes = _diff_values(before.get("response", {}), after.get("response", {}), ignore_volatile=True)
if response_changes:
lines.append("ROOT RESPONSE DIFF:")
lines.extend(f" - {change}" for change in response_changes[:10])
lines.append("")
return lines
def _format_path_contract_changes(before_entries: list[dict], after_entries: list[dict]) -> list[str]:
lines = []
before_index = _index_contract_entries(before_entries)
after_index = _index_contract_entries(after_entries)
changed_nodes = []
for path in sorted(set(before_index.keys()) & set(after_index.keys())):
before_entry = before_index[path]
after_entry = after_index[path]
changes = _diff_values(
{
"request": before_entry.get("request", {}),
"response": before_entry.get("response", {}),
"restCalls": before_entry.get("restCalls", []),
},
{
"request": after_entry.get("request", {}),
"response": after_entry.get("response", {}),
"restCalls": after_entry.get("restCalls", []),
},
ignore_volatile=True,
)
if changes:
changed_nodes.append((path, before_entry["signature"], changes))
if changed_nodes:
lines.append("PAYLOAD / CONTRACT DRIFT:")
for path, signature, changes in changed_nodes[:8]:
lines.append(f" {signature} @ {path}")
for change in changes[:4]:
lines.append(f" - {change}")
if len(changes) > 4:
lines.append(f" ... and {len(changes) - 4} more changes")
if len(changed_nodes) > 8:
lines.append(f" ... and {len(changed_nodes) - 8} more changed nodes")
lines.append("")
added_paths = sorted(set(after_index.keys()) - set(before_index.keys()))
removed_paths = sorted(set(before_index.keys()) - set(after_index.keys()))
if added_paths or removed_paths:
lines.append("TRACE PATH CHANGES:")
for path in added_paths[:6]:
lines.append(f" + {path}")
for path in removed_paths[:6]:
lines.append(f" - {path}")
if len(added_paths) > 6 or len(removed_paths) > 6:
lines.append(" ... additional path changes omitted")
lines.append("")
downstream_changes = []
for path in sorted(set(before_index.keys()) & set(after_index.keys())):
before_rest = before_index[path].get("restCalls", [])
after_rest = after_index[path].get("restCalls", [])
changes = _diff_values(before_rest, after_rest, ignore_volatile=True)
if changes:
downstream_changes.append((path, before_index[path]["signature"], changes))
if downstream_changes:
lines.append("DOWNSTREAM HTTP CONTRACT CHANGES:")
for path, signature, changes in downstream_changes[:6]:
lines.append(f" {signature} @ {path}")
for change in changes[:4]:
lines.append(f" - {change}")
if len(downstream_changes) > 6:
lines.append(f" ... and {len(downstream_changes) - 6} more downstream changes")
lines.append("")
return lines
# ═══════════════════════════════════════════════════════════════
# Dashboard / HeatMap (from HeadMapTools.java)
# ═══════════════════════════════════════════════════════════════
def _format_heatmap(modules: list) -> str:
"""Convert raw heatmap JSON into a compact human-readable summary.
Strips history[], alert fields, and other verbose data.
"""
lines = []
for mod in modules:
mod_name = mod.get("moduleName", "?")
lines.append(f"\n📦 Module: {mod_name}")
for svc in mod.get("services", []):
svc_name = svc.get("serviceName", "?")
svc_calls = svc.get("callCountWeb", 0)
svc_errs = svc.get("errorCount", 0)
svc_avg = svc.get("avgCallTimeWeb", 0)
svc_summary = f"{svc_calls} calls, {svc_avg:.0f}ms avg"
if svc_errs:
svc_summary += f", ⚠ {svc_errs} errors"
lines.append(f" 🔹 {svc_name} ({svc_summary})")
for cls in svc.get("classes", []):
cls_name = (cls.get("className") or "").rsplit(".", 1)[-1]
for ip in cls.get("inPoints", []):
m_name = ip.get("inPointName", "?")
calls = ip.get("callCountWeb", 0) or ip.get("callCountScheduler", 0)
avg = ip.get("avgCallTimeWeb", 0) or ip.get("avgCallTimeScheduler", 0)
errs = ip.get("errorCount", 0)
sql_count = ip.get("sqlCallCount", 0)
rest_count = ip.get("restCallCount", 0)
c4xx = ip.get("count4xx", 0)
c5xx = ip.get("count5xx", 0)
q_send = ip.get("queueSendCount", 0)
q_consume = ip.get("queueConsumerCount", 0)
parts = [f"{calls} calls", f"{avg:.0f}ms"]
if errs:
parts.append(f"⚠ {errs} err")
if c4xx:
parts.append(f"{c4xx}×4xx")
if c5xx:
parts.append(f"{c5xx}×5xx")
if sql_count:
parts.append(f"{sql_count} SQL")
if rest_count:
parts.append(f"{rest_count} REST")
if q_send or q_consume:
parts.append(f"Q:{q_send}↑{q_consume}↓")
lines.append(f" {cls_name}.{m_name}(): {' | '.join(parts)}")
return "\n".join(lines) if lines else "No heatmap data"
@mcp.tool(description="Show the system heatmap for all modules, services, and entry methods.")
async def get_heatmap_all_system(
last_minutes: Annotated[int, Field(description="How many recent minutes to include in the heatmap, capped at 30.")] = 10,
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Returns system performance metrics (heatmap) for ALL modules and services.
Shows error counts, call counts, average response times,
SQL/REST/Queue metrics for each module → service → class → method.
"""
last_minutes = min(last_minutes, 30)
data = await _get(
"/mcp/Dashboard/HeatMap", {"LastMinutes": last_minutes}, mcp_token=mcp_token
)
return _format_heatmap(data)
@mcp.tool(description="Show the heatmap for one module.")
async def get_heatmap_for_module(
module_name: Annotated[str, Field(description="Exact module name to filter by.")],
last_minutes: Annotated[int, Field(description="How many recent minutes to include in the heatmap, capped at 30.")] = 10,
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Returns performance metrics (heatmap) for a specific module.
Filters the full heatmap to only the given module.
"""
last_minutes = min(last_minutes, 30)
data = await _get(
"/mcp/Dashboard/HeatMap", {"LastMinutes": last_minutes}, mcp_token=mcp_token
)
filtered = [m for m in data if m.get("moduleName") == module_name]
return _format_heatmap(filtered)
@mcp.tool(description="Show the heatmap for one service inside a module.")
async def get_heatmap_for_service(
module_name: Annotated[str, Field(description="Exact module name that owns the service.")],
service_name: Annotated[str, Field(description="Exact service name to filter by.")],
last_minutes: Annotated[int, Field(description="How many recent minutes to include in the heatmap, capped at 30.")] = 10,
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Returns performance metrics (heatmap) for a specific module and service."""
last_minutes = min(last_minutes, 30)
data = await _get(
"/mcp/Dashboard/HeatMap", {"LastMinutes": last_minutes}, mcp_token=mcp_token
)
filtered = []
for m in data:
if m.get("moduleName") == module_name:
services = [
s for s in m.get("services", [])
if s.get("serviceName") == service_name
]
if services:
filtered.append({**m, "services": services})
return _format_heatmap(filtered)
# ═══════════════════════════════════════════════════════════════
# Last Call Service (from LastCallTools.java)
# ═══════════════════════════════════════════════════════════════
@mcp.tool(description="List recent call IDs for a module and service.")
async def get_last_calls(
module_name: Annotated[str, Field(description="Exact module name to inspect.")],
service_name: Annotated[str, Field(description="Exact service name to inspect.")],
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Returns a list of recent method executions with their trace IDs
for the given module and service. Use this to find call IDs
for deeper trace investigation.
"""
data = await _get(
"/mcp/LastCallService/getData",
{
"moduleName": module_name,
"serviceName": service_name,
},
mcp_token=mcp_token,
)
if not data or not isinstance(data, list):
return "No recent calls found."
lines = [f"Recent calls for {module_name}/{service_name}:\n"]
for item in data:
tid = item.get("traceId") or item.get("messageId") or "?"
cls = (item.get("className") or "").rsplit(".", 1)[-1]
method = item.get("methodName", "?")
dt = (item.get("callDateTime") or "")[:19].replace("T", " ")
lines.append(f" {tid} {cls}.{method}() {dt}")
return "\n".join(lines)
# ═══════════════════════════════════════════════════════════════
# Find Trace (from TraceTools.java)
# ═══════════════════════════════════════════════════════════════
@mcp.tool(description="Fetch the full raw trace JSON for a call ID.")
async def find_trace_all(
call_id: Annotated[str, Field(description="Trace or call ID to load.")],
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Returns the full call trace tree for the specified call ID.
Shows the complete hierarchy of method calls, SQL queries,
REST calls, and queue operations within a single request.
"""
data = await _get(
"/mcp/FindTrace/findTraceAll", {"callId": call_id}, mcp_token=mcp_token
)
return json.dumps(data, ensure_ascii=False, default=str)
@mcp.tool(description="Fetch the trace subtree for one class and method inside a call.")
async def find_trace_for_method(
call_id: Annotated[str, Field(description="Trace or call ID that contains the target method call.")],
class_name: Annotated[str, Field(description="Fully qualified class name to extract from the trace.")],
method_name: Annotated[str, Field(description="Method name to extract from the trace.")],
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Returns the call trace for a specific method within the given call ID.
Use this to drill down into a particular method's execution details.
"""
data = await _get(
"/mcp/FindTrace/findTraceForMethod",
{
"callId": call_id,
"className": class_name,
"methodName": method_name,
},
mcp_token=mcp_token,
)
return json.dumps(data, ensure_ascii=False, default=str)
@mcp.tool(description="Find call IDs for a class and method within a time range.")
async def find_trace_between_time(
class_name: Annotated[str, Field(description="Fully qualified class name to search for.")],
method_name: Annotated[str, Field(description="Method name to search for.")],
begin_date: Annotated[str, Field(description="Start of the search window in ISO-8601 format with timezone offset.")],
end_date: Annotated[str, Field(description="End of the search window in ISO-8601 format with timezone offset.")],
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Returns method call traces between two timestamps.
Dates must be in ISO-8601 format with timezone offset,
e.g. '2024-01-15T10:30:00+03:00'.
"""
data = await _get(
"/mcp/FindTrace/findTraceForMethodBetweenTime",
{
"className": class_name,
"methodName": method_name,
"beginDate": begin_date,
"endDate": end_date,
},
mcp_token=mcp_token,
)
return json.dumps(data, ensure_ascii=False, default=str)
@mcp.tool(description="Resolve call IDs into short Class.method names.")
async def get_trace_names_batch(
call_ids: Annotated[list[str], Field(description="List of trace or call IDs to resolve. Only the first 35 IDs are processed.")],
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Takes a list of trace call IDs and returns a quick mapping
of each ID to its short className and methodName.
Use this to quickly identify unknown call IDs (e.g. from test script groups).
"""
if not call_ids:
return "No call IDs provided."
# Cap to prevent too many requests
call_ids = call_ids[:35]
results = []
for cid in call_ids:
try:
data = await _get(
"/mcp/FindTrace/findTraceAll",
{"callId": cid},
mcp_token=mcp_token,
)
if data:
c_name = data.get("className", "?").split('.')[-1]
m_name = data.get("methodName", "?")
results.append(f"{cid} -> {c_name}.{m_name}")
else:
results.append(f"{cid} -> Not Found")
except Exception as e:
results.append(f"{cid} -> Error: {str(e)}")
return "\n".join(results)
@mcp.tool(description="Build curl and PowerShell commands to replay a captured web request.")
async def get_reproduction_command(
call_id: Annotated[str, Field(description="Trace or call ID of the captured web request to replay.")],
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Returns a curl command and structured info to reproduce the web request from a trace.
Extracts URL, method, headers, and body from the recorded BitDive trace.
"""
trace = await _get(
"/mcp/FindTrace/findTraceAll", {"callId": call_id}, mcp_token=mcp_token
)
op_type = trace.get("operationType", "WEB_GET")
method = op_type.replace("WEB_", "") if op_type.startswith("WEB_") else "GET"
url = _normalize_reproduction_url(
trace.get("urlRest") or trace.get("url") or "http://localhost:8080/???"
)
# Parse headers (Format: {"string:java.lang.String:user-agent": ["java.util.ArrayList", ["..."]]})
raw_headers = trace.get("headerRest")
headers = {}
if raw_headers:
if isinstance(raw_headers, str):
try:
raw_headers = json.loads(raw_headers)
except:
pass
if isinstance(raw_headers, dict):
for k, v in raw_headers.items():
# Clean up Java-serialized keys like "string:java.lang.String:user-agent"
key = k.split(":")[-1] if ":" in k else k
if _should_skip_reproduction_header(key):
continue
# Handle BitDive/Java list format
if isinstance(v, list) and len(v) == 2 and v[0] == "java.util.ArrayList":
val_list = v[1]
if val_list and isinstance(val_list, list):
headers[key] = val_list[0]
elif isinstance(v, list) and v:
headers[key] = v[0]
else:
headers[key] = str(v)
body = trace.get("bodyRest")
# Generate CURL command
curl = f"curl -X {method} '{url}'"
for k, v in headers.items():
curl += f" -H '{_escape_single_quotes(k)}: {_escape_single_quotes(str(v))}'"
if body:
# If body is a string (often JSON), escape single quotes for shell
body_str = json.dumps(body) if not isinstance(body, str) else body
curl += f" -d '{_escape_single_quotes(body_str)}'"
# Generate PowerShell Invoke-RestMethod
ps_headers = "@{" + "; ".join(
[f"'{_escape_powershell_single_quotes(k)}'='{_escape_powershell_single_quotes(str(v))}'" for k, v in headers.items()]
) + "}"
ps = (
f"Invoke-RestMethod -Method {method} "
f"-Uri '{_escape_powershell_single_quotes(url)}' -Headers {ps_headers}"
)
if body:
ps += (
f" -Body '{_escape_powershell_single_quotes(body_str)}'"
f" -ContentType 'application/json'"
)
return (
f"REPRODUCTION COMMANDS for Call {call_id}:\n\n"
f"--- BASH / CURL ---\n{curl}\n\n"
f"--- POWERSHELL ---\n{ps}\n\n"
f"--- DETAILS ---\n"
f"Method: {method}\n"
f"URL: {url}\n"
f"Body: {body or '(empty)'}"
)
# ═══════════════════════════════════════════════════════════════
# Method Documentation (from api-docs.json /mcp/MethodDoc/*)
# ═══════════════════════════════════════════════════════════════
@mcp.tool(description="Search method documentation and return short matches.")
async def search_methods_short(
query: Annotated[str, Field(description="Keyword, business term, or partial method name to search for.")],
limit: Annotated[int, Field(description="Maximum number of results to return.")] = 10,
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Search for method documentation by query string.
Returns short summaries of matching methods.
"""
data = await _get(
"/mcp/MethodDoc/searchShort",
{
"q": query,
"limit": limit,
},
mcp_token=mcp_token,
)
return json.dumps(data, ensure_ascii=False, default=str)
@mcp.tool(description="Search method documentation and return detailed matches.")
async def search_methods_full(
query: Annotated[str, Field(description="Keyword, business term, or partial method name to search for.")],
limit: Annotated[int, Field(description="Maximum number of detailed results to return.")] = 3,
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Search for method documentation by query string.
Returns full details including call statistics and trace info.
"""
data = await _get(
"/mcp/MethodDoc/searchFull",
{
"q": query,
"limit": limit,
},
mcp_token=mcp_token,
)
return json.dumps(data, ensure_ascii=False, default=str)
# ═══════════════════════════════════════════════════════════════
# Test Management (mirrors frontend QA flow via /mcp/Testing/*)
# ═══════════════════════════════════════════════════════════════
@mcp.tool(description="Create a new BitDive test group from trace call IDs.")
async def create_test_group(
name: Annotated[str, Field(description="Human-readable name for the new test group.")],
test_type: Annotated[str, Field(description="Test type to create: UNIT, COMPONENT, or INTEGRATION.")],
call_id_list: Annotated[list[str], Field(description="Trace call IDs that will be used to generate the new test group.")],
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Creates a NEW test group in BitDive from a list of call (trace) IDs.
⚠️ WARNING: This creates a BRAND NEW test group with a new UUID.
Check TestControllerTestAbstract.java for existing test group UUIDs before creating new ones.
New groups will NOT be executed by Maven unless their UUID is added to the Java test file.
Args:
name: Human-readable test name (e.g., "Faculty Service Unit Tests")
test_type: One of "UNIT", "COMPONENT", or "INTEGRATION"
call_id_list: List of trace/call IDs to include in the test
Returns: Created test group info (id, name, type)
"""
body = {
"name": name,
"type": test_type.upper(),
"testDataRules": {
"callIdList": call_id_list
}
}
data = await _post_json(
"/mcp/Testing/createTestGroup", body, mcp_token=mcp_token
)
return json.dumps(data, ensure_ascii=False, default=str)
@mcp.tool(description="List all BitDive test groups.")
async def get_all_test_scripts(
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Returns all test scripts (test groups) from the system."""
data = await _get("/mcp/Testing/getAllTestScript", mcp_token=mcp_token)
if not data or not isinstance(data, list):
return "No test scripts found."
lines = [f"Test Scripts ({len(data)} total):\n"]
lines.append(f"{'ID':>38} | {'Name':<35} | {'Type':<6} | {'Status':<4} | Classes")
lines.append("-" * 100)
for script in data:
sid = script.get("id", "?")
name = (script.get("name") or "?")[:35]
stype = (script.get("type") or "?")[:6]
rs = script.get("resultSuccess")
status = "✅" if rs and rs.get("success") else "❌" if rs else "—"
n_classes = len(script.get("scriptDataDTOList", []))
enabled = script.get("enabled", True)
prefix = " " if enabled else "🚫"
lines.append(f"{prefix}{sid} | {name:<35} | {stype:<6} | {status:<4} | {n_classes}")
return "\n".join(lines)
@mcp.tool(description="List class-level entries inside a test group.")
async def get_script_data(
test_script_id: Annotated[str, Field(description="Test group ID to inspect.")],
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Returns script data for a given test group ID."""
data = await _get(
"/mcp/Testing/getScriptData",
{"testScriptId": test_script_id},
mcp_token=mcp_token,
)
if not data or not isinstance(data, list):
return "No script data found."
lines = [f"Script data for {test_script_id} ({len(data)} entries):\n"]
for entry in data:
eid = entry.get("id", "?")
cls = (entry.get("className") or "").rsplit(".", 1)[-1]
svc = entry.get("serviceName", "?")
enabled = "✅" if entry.get("enabled") else "❌"
rs = entry.get("resultSuccess")
result = "✅" if rs and rs.get("success") else "❌" if rs else "—"
# Show which trace IDs were used (callIdData)
call_ids = entry.get("callIdData") or "—"
lines.append(f" {eid} {cls} ({svc}) enabled={enabled} result={result} calls={call_ids}")
return "\n".join(lines)
@mcp.tool(description="List method-level tests under one script data entry.")
async def get_script_data_test(
test_script_data_id: Annotated[str, Field(description="Class-level script data entry ID to inspect.")],
mcp_token: Annotated[str | None, Field(description="Optional BitDive MCP token. Uses BITDIVE_MCP_TOKEN from the environment when omitted.")] = None,
) -> str:
"""Returns the tests under a specific script data record."""
data = await _get(
"/mcp/Testing/getScriptDataTest",
{"testScriptDataId": test_script_data_id},
mcp_token=mcp_token,
)
return json.dumps(data, ensure_ascii=False, default=str)
async def _find_test_context(
script_data_test_id: str, mcp_token: str | None = None
):
"""Resolve a method-level test ID to its parent script data and group via MCP-only APIs."""
all_groups = await _get("/mcp/Testing/getAllTestScript", mcp_token=mcp_token)
for group in all_groups or []:
group_id = group.get("id")