-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathadvanced.py
More file actions
126 lines (106 loc) · 3.12 KB
/
advanced.py
File metadata and controls
126 lines (106 loc) · 3.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
"""Advanced features example for LogTide Python SDK."""
from datetime import datetime, timedelta
from logtide_sdk import (
AggregatedStatsOptions,
ClientOptions,
LogLevel,
LogTideClient,
QueryOptions,
)
# Full configuration
client = LogTideClient(
ClientOptions(
api_url="http://localhost:8080",
api_key="lp_your_api_key_here",
# Batching
batch_size=100,
flush_interval=5000,
# Buffer management
max_buffer_size=10000,
# Retry with exponential backoff
max_retries=3,
retry_delay_ms=1000,
# Circuit breaker
circuit_breaker_threshold=5,
circuit_breaker_reset_ms=30000,
# Metrics & debugging
enable_metrics=True,
debug=True,
# Global context
global_metadata={
"env": "production",
"version": "1.0.0",
"region": "us-east-1",
},
# Auto trace IDs
auto_trace_id=False,
)
)
# Logging methods
client.debug("service", "Debug message")
client.info("service", "Info message", {"userId": 123})
client.warn("service", "Warning message")
client.error("service", "Error message", {"custom": "data"})
client.critical("service", "Critical message")
# Error serialization
try:
raise RuntimeError("Database timeout")
except Exception as e:
client.error("database", "Query failed", e)
# Trace ID context
client.set_trace_id("request-456")
client.info("api", "Request received")
client.set_trace_id(None) # Clear
# Query API
result = client.query(
QueryOptions(
service="api-gateway",
level=LogLevel.ERROR,
from_time=datetime.now() - timedelta(hours=24),
to_time=datetime.now(),
limit=100,
offset=0,
)
)
print(f"Found {result.total} logs")
for log in result.logs:
print(log)
# Full-text search
result = client.query(QueryOptions(q="timeout", limit=50))
print(f"Search results: {len(result.logs)}")
# Get logs by trace ID
logs = client.get_by_trace_id("request-456")
print(f"Trace has {len(logs)} logs")
# Aggregated statistics
stats = client.get_aggregated_stats(
AggregatedStatsOptions(
from_time=datetime.now() - timedelta(days=7),
to_time=datetime.now(),
interval="1h",
)
)
print("Top services:", stats.top_services)
print("Top errors:", stats.top_errors)
# Live streaming
def handle_log(log):
print(f"[{log['time']}] {log['level']}: {log['message']}")
def handle_error(error):
print(f"Stream error: {error}")
# Note: This blocks. Run in separate thread for production
# client.stream(on_log=handle_log, on_error=handle_error, filters={'level': 'error'})
# Metrics
metrics = client.get_metrics()
print(f"Logs sent: {metrics.logs_sent}")
print(f"Logs dropped: {metrics.logs_dropped}")
print(f"Errors: {metrics.errors}")
print(f"Retries: {metrics.retries}")
print(f"Avg latency: {metrics.avg_latency_ms}ms")
print(f"Circuit breaker trips: {metrics.circuit_breaker_trips}")
# Circuit breaker state
print(f"Circuit state: {client.get_circuit_breaker_state()}")
# Reset metrics
client.reset_metrics()
# Manual flush
client.flush()
# Close
client.close()