From 5547b59a023dce399e37261a5a850a4379fef9b5 Mon Sep 17 00:00:00 2001 From: ReenigneArcher <42013603+ReenigneArcher@users.noreply.github.com> Date: Mon, 16 Feb 2026 18:01:21 -0500 Subject: [PATCH] fix(logging): implement proper logging system --- README.md | 1 + pyproject.toml | 1 + sample.env | 1 + src/__main__.py | 13 +- src/common/crypto.py | 11 +- src/common/database.py | 36 +- src/common/logging_config.py | 320 +++++++++++++++++ src/common/rank.py | 53 +-- src/common/sponsors.py | 7 +- src/common/webapp.py | 8 +- src/discord_bot/bot.py | 20 +- src/discord_bot/cogs/rank.py | 14 +- src/reddit_bot/bot.py | 38 +- src/reddit_bot/cogs/rank.py | 14 +- tests/unit/common/test_logging_config.py | 439 +++++++++++++++++++++++ 15 files changed, 892 insertions(+), 84 deletions(-) create mode 100644 src/common/logging_config.py create mode 100644 tests/unit/common/test_logging_config.py diff --git a/README.md b/README.md index f67632e..6ff858c 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,7 @@ platforms such as GitHub discussions/issues might be added in the future. | DISCORD_CLIENT_SECRET | True | `None` | Discord OAuth2 client secret. | | DISCORD_GITHUB_STATUS_CHANNEL_ID | True | `None` | Channel ID to send GitHub status updates to. | | DISCORD_LEVEL_UP_CHANNEL_ID | False | `None` | Channel ID to send user level up messages to. | +| DISCORD_LOG_CHANNEL_ID | False | `None` | Channel ID to send bot log messages to. This should be a private channel. | | DISCORD_REDDIT_CHANNEL_ID | True | `None` | Channel ID to send Reddit post updates to. | | DISCORD_REDIRECT_URI | False | `https://localhost:8080/discord/callback` | The redirect uri for OAuth2. Must be publicly accessible. | | DISCORD_SPONSORS_CHANNEL_ID | True | `None` | Channel ID to send sponsorship updates to. | diff --git a/pyproject.toml b/pyproject.toml index 5f6e1c1..5f0a165 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,6 +21,7 @@ classifiers = [ ] dependencies = [ + "colorlog==6.10.1", "cryptography==46.0.5", "Flask==3.1.2", "Flask-WTF==1.2.2", diff --git a/sample.env b/sample.env index a54edc1..49f8a38 100644 --- a/sample.env +++ b/sample.env @@ -8,6 +8,7 @@ SUPPORT_COMMANDS_BRANCH=master # Secret settings DISCORD_BOT_TOKEN= DISCORD_LEVEL_UP_CHANNEL_ID= +DISCORD_LOG_CHANNEL_ID= GRAVATAR_EMAIL= # discord/github oauth2 diff --git a/src/__main__.py b/src/__main__.py index 6b9f950..a68d75e 100644 --- a/src/__main__.py +++ b/src/__main__.py @@ -1,4 +1,5 @@ # standard imports +import logging import time # development imports @@ -7,17 +8,27 @@ # local imports, import after env loaded from src.common import globals # noqa: E402 +from src.common import logging_config # noqa: E402 from src.discord_bot import bot as d_bot # noqa: E402 from src.common import webapp # noqa: E402 from src.reddit_bot import bot as r_bot # noqa: E402 +# Get logger for this module +logger = logging.getLogger(__name__) + def main(): + # Initialize logging system + discord_handler = logging_config.setup_logging() + webapp.start() # Start the web server globals.DISCORD_BOT = d_bot.Bot() globals.DISCORD_BOT.start_threaded() # Start the discord bot + # Configure Discord handler after bot is initialized + discord_handler.setup(globals.DISCORD_BOT) + globals.REDDIT_BOT = r_bot.Bot() globals.REDDIT_BOT.start_threaded() # Start the reddit bot @@ -25,7 +36,7 @@ def main(): while globals.DISCORD_BOT.bot_thread.is_alive() or globals.REDDIT_BOT.bot_thread.is_alive(): time.sleep(0.5) except KeyboardInterrupt: - print("Keyboard Interrupt Detected") + logger.info("Keyboard Interrupt Detected") globals.DISCORD_BOT.stop() globals.REDDIT_BOT.stop() diff --git a/src/common/crypto.py b/src/common/crypto.py index a59cf77..6e21a83 100644 --- a/src/common/crypto.py +++ b/src/common/crypto.py @@ -1,4 +1,5 @@ # standard imports +import logging import os # lib imports @@ -12,6 +13,9 @@ # local imports from src.common import common +# Get logger for this module +logger = logging.getLogger(__name__) + CERT_FILE = os.path.join(common.data_dir, "cert.pem") KEY_FILE = os.path.join(common.data_dir, "key.pem") @@ -58,12 +62,13 @@ def generate_certificate(): def initialize_certificate() -> tuple[str, str]: - print("Initializing SSL certificate") + logger.info("Initializing SSL certificate") if os.path.exists(CERT_FILE) and os.path.exists(KEY_FILE): cert_expires_in = check_expiration(CERT_FILE) - print(f"Certificate expires in {cert_expires_in} days.") + logger.info(f"Certificate expires in {cert_expires_in} days.") if cert_expires_in >= 90: return CERT_FILE, KEY_FILE - print("Generating new certificate") + + logger.info("Generating new certificate") generate_certificate() return CERT_FILE, KEY_FILE diff --git a/src/common/database.py b/src/common/database.py index 4f7d5c9..79c017b 100644 --- a/src/common/database.py +++ b/src/common/database.py @@ -1,9 +1,9 @@ # standard imports +import logging import os from pathlib import Path import shelve import threading -import traceback from typing import Union # lib imports @@ -15,6 +15,9 @@ # local imports from src.common.common import data_dir +# Get logger for this module +logger = logging.getLogger(__name__) + # Constants DATA_REPO_LOCK = threading.Lock() GIT_ENABLED = True # disable to pause pushing to git, useful for heavy db operations @@ -52,7 +55,7 @@ def __init__(self, db_name: str, db_dir: Union[str, Path] = data_dir, use_git: b if not os.path.exists(self.db_dir): # Clone repo if it doesn't exist - print(f"Cloning repository {self.repo_url} to {self.db_dir}") + logger.info(f"Cloning repository {self.repo_url} to {self.db_dir}") try: # Try cloning with the specified branch self.repo = git.Repo.clone_from(clone_url, self.db_dir, branch=self.repo_branch) @@ -61,7 +64,7 @@ def __init__(self, db_name: str, db_dir: Union[str, Path] = data_dir, use_git: b except git.exc.GitCommandError as e: # Check if the error is due to branch not found if "Remote branch" in str(e) and "not found in upstream origin" in str(e): - print(f"Branch '{self.repo_branch}' not found in remote. Creating a new empty branch.") + logger.info(f"Branch '{self.repo_branch}' not found in remote. Creating a new empty branch.") # Clone with default branch first self.repo = git.Repo.clone_from(clone_url, self.db_dir) # Configure the repo @@ -99,9 +102,9 @@ def __init__(self, db_name: str, db_dir: Union[str, Path] = data_dir, use_git: b # Push the new branch to remote try: self.repo.git.push('--set-upstream', 'origin', self.repo_branch) - print(f"Created and pushed new empty branch '{self.repo_branch}'") + logger.info(f"Created and pushed new empty branch '{self.repo_branch}'") except git.exc.GitCommandError as e: - print(f"Failed to push new branch: {str(e)}") + logger.error(f"Failed to push new branch: {str(e)}") # Continue anyway - we might not have push permissions else: # Re-raise if it's a different error @@ -135,9 +138,10 @@ def __init__(self, db_name: str, db_dir: Union[str, Path] = data_dir, use_git: b self.repo.git.add(gitkeep_path) self.repo.git.commit('-m', f"Initialize empty branch '{self.repo_branch}'") self.repo.git.push('--set-upstream', 'origin', self.repo_branch) - print(f"Created and pushed new empty branch '{self.repo_branch}'") + logger.info(f"Created and pushed new empty branch '{self.repo_branch}'") except git.exc.GitCommandError: - print(f"Failed to work with branch '{self.repo_branch}'. Using current branch instead.") + logger.warning( + f"Failed to work with branch '{self.repo_branch}'. Using current branch instead.") else: # Branch exists locally, make sure it's checked out self.repo.git.checkout(self.repo_branch) @@ -181,7 +185,7 @@ def _configure_repo(self): origin = self.repo.remote('origin') origin.set_url(new_url) except git.exc.GitCommandError as e: - print(f"Failed to update remote URL: {str(e)}") + logger.error(f"Failed to update remote URL: {str(e)}") # Continue anyway, might work with stored credentials def _check_for_migration(self): @@ -191,7 +195,7 @@ def _check_for_migration(self): json_exists = os.path.exists(self.json_path) if shelve_exists and not json_exists: - print(f"Migrating database from shelve to TinyDB: {self.shelve_path}") + logger.info(f"Migrating database from shelve to TinyDB: {self.shelve_path}") self._migrate_from_shelve() def _migrate_from_shelve(self): @@ -265,10 +269,9 @@ def _migrate_from_shelve(self): migration_db.storage.flush() migration_db.close() - print(f"Migration completed successfully: {self.json_path}") + logger.info(f"Migration completed successfully: {self.json_path}") except Exception as e: - print(f"Migration failed: {str(e)}") - traceback.print_exc() + logger.error(f"Migration failed: {str(e)}") def __enter__(self): self.lock.acquire() @@ -315,7 +318,7 @@ def sync(self): # Commit all changes at once with a general message commit_message = "Update database files" self.repo.git.commit('-m', commit_message) - print("Committed changes to git data repository") + logger.info("Committed changes to git data repository") # Push to remote with credentials try: @@ -323,13 +326,12 @@ def sync(self): protocol, repo_path = self.repo_url.split("://", 1) push_url = f"{protocol}://{self.git_user_name}:{self.git_token}@{repo_path}" self.repo.git.push(push_url, self.repo_branch) - print("Pushed changes to remote git data repository") + logger.info("Pushed changes to remote git data repository") except git.exc.GitCommandError as e: - print(f"Failed to push changes: {str(e)}") + logger.error(f"Failed to push changes: {str(e)}") except Exception as e: - print(f"Git operation failed: {str(e)}") - traceback.print_exc() + logger.error(f"Git operation failed: {str(e)}") finally: # Ensure database is ready for next use if self.tinydb is None: diff --git a/src/common/logging_config.py b/src/common/logging_config.py new file mode 100644 index 0000000..c087b67 --- /dev/null +++ b/src/common/logging_config.py @@ -0,0 +1,320 @@ +""" +Logging configuration module for the support bot. + +This module provides a centralized logging configuration with multiple handlers: +- Console output with color support +- Rotating file handler (rotates when file reaches max size, keeps 5 files) +- Discord channel handler for critical logs +""" + +# standard imports +import logging +from logging.handlers import RotatingFileHandler +import os +import sys +from typing import Optional + +# lib imports +try: + import colorlog +except ImportError: + colorlog = None + + +class DiscordHandler(logging.Handler): + """ + Custom logging handler that sends log messages to a Discord channel. + + Messages logged before the bot is ready are queued and sent once the bot is initialized. + """ + + def __init__(self, level=logging.WARNING): + super().__init__(level) + self.bot = None + self.channel_id = None + self._setup_complete = False + self._message_queue = [] # Queue for messages before bot is ready + self._max_queue_size = 50 # Limit queue size to prevent memory issues + self._pending_tasks = set() # Track active tasks to prevent garbage collection + + def setup(self, bot, channel_id: Optional[int] = None): + """ + Setup the Discord handler with a bot instance and channel ID. + + After setup, any queued messages will be sent to Discord. + + Parameters + ---------- + bot : discord.Bot or Bot + The Discord bot instance. + channel_id : int, optional + The Discord channel ID to send logs to. If not provided, will use environment variable. + """ + self.bot = bot + self.channel_id = channel_id or os.getenv("DISCORD_LOG_CHANNEL_ID") + if self.channel_id: + self.channel_id = int(self.channel_id) + self._setup_complete = True + + # Send any queued messages + self._flush_queue() + + def emit(self, record: logging.LogRecord): + """ + Emit a log record to Discord. + + If the bot is not yet setup, the message is queued and will be sent once setup is complete. + + Parameters + ---------- + record : logging.LogRecord + The log record to emit. + """ + # If not setup yet, queue the message + if not self._setup_complete or not self.channel_id: + if len(self._message_queue) < self._max_queue_size: + self._message_queue.append(record) + return + + try: + self._send_to_discord(record) + except Exception: + self.handleError(record) + + def _send_to_discord(self, record: logging.LogRecord): + """ + Send a log record to Discord channel. + + Parameters + ---------- + record : logging.LogRecord + The log record to send. + """ + msg = self.format(record) + + # Create embed based on log level + import discord + import asyncio + from datetime import datetime, UTC + + # Define colors here to avoid import issues + color_map = { + logging.DEBUG: 0x0000FF, # Blue + logging.INFO: 0x00FF00, # Green + logging.WARNING: 0xFFFF00, # Yellow + logging.ERROR: 0xFFA500, # Orange + logging.CRITICAL: 0xFF0000, # Red + } + + embed = discord.Embed( + title=f"{record.levelname}", + description=f"```\n{msg}\n```", + color=color_map.get(record.levelno, 0x808080), + timestamp=datetime.fromtimestamp(record.created, tz=UTC) # Discord auto-formats as "X minutes ago" + ) + + # Add fields with better formatting + embed.add_field(name="Logger", value=record.name, inline=True) + # Format module field as "filename.py:lineno" + module_info = f"{record.filename}:{record.lineno}" + embed.add_field(name="Module", value=module_info, inline=True) + embed.add_field(name="Function", value=record.funcName, inline=True) + + # Send message to Discord asynchronously without blocking + # Use asyncio.create_task if we're in an event loop, otherwise use run_coroutine_threadsafe + if self.bot and hasattr(self.bot, 'loop'): + try: + # Try to get the running loop + loop = asyncio.get_running_loop() + # If we're in the bot's event loop, schedule the task without waiting + if loop == self.bot.loop: + # Create a task but don't wait for it + # Store the task to prevent garbage collection + task = asyncio.create_task(self.bot.async_send_message( + channel_id=self.channel_id, + embed=embed, + )) + self._pending_tasks.add(task) + # Remove task from set when completed to prevent memory leak + task.add_done_callback(self._pending_tasks.discard) + else: + # We're in a different thread, use run_coroutine_threadsafe but don't wait + future = asyncio.run_coroutine_threadsafe( + self.bot.async_send_message( + channel_id=self.channel_id, + embed=embed, + ), + self.bot.loop + ) + # Store future reference to prevent garbage collection + self._pending_tasks.add(future) + # Remove future from set when completed + future.add_done_callback(self._pending_tasks.discard) + except RuntimeError: + # No event loop running in this thread, use run_coroutine_threadsafe + future = asyncio.run_coroutine_threadsafe( + self.bot.async_send_message( + channel_id=self.channel_id, + embed=embed, + ), + self.bot.loop + ) + # Store future reference to prevent garbage collection + self._pending_tasks.add(future) + # Remove future from set when completed + future.add_done_callback(self._pending_tasks.discard) + + def _flush_queue(self): + """ + Flush any queued messages to Discord. + + Called automatically when the bot is setup. + """ + if not self._message_queue: + return + + # Send all queued messages + queued_count = len(self._message_queue) + for record in self._message_queue: + try: + self._send_to_discord(record) + except Exception: + # Silently fail for queued messages to avoid spam + pass + + # Clear the queue + self._message_queue.clear() + + # Log that we flushed queued messages (but don't send this to Discord to avoid recursion) + import logging + logger = logging.getLogger(__name__) + # Temporarily remove this handler to avoid recursion + root_logger = logging.getLogger() + if self in root_logger.handlers: + root_logger.removeHandler(self) + logger.info(f"Flushed {queued_count} queued log messages to Discord") + root_logger.addHandler(self) + + +def setup_logging( + log_level: int = logging.INFO, + log_file: str = "support-bot.log", + max_bytes: int = 10 * 1024 * 1024, # 10 MB + backup_count: int = 5, +) -> DiscordHandler: + """ + Setup logging configuration with multiple handlers. + + Parameters + ---------- + log_level : int, optional + The logging level (default: logging.INFO). + log_file : str, optional + The log file name (default: "support-bot.log"). + max_bytes : int, optional + Maximum size of each log file before rotation (default: 10 MB). + backup_count : int, optional + Number of backup log files to keep (default: 5). + + Returns + ------- + DiscordHandler + The Discord handler instance that needs to be configured later. + """ + # Compute data_dir locally to avoid circular imports + # Get the application directory + app_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + data_dir = os.path.join(app_dir, "data") + + # Create logs directory + logs_dir = os.path.join(data_dir, "logs") + os.makedirs(logs_dir, exist_ok=True) + + # Get root logger + root_logger = logging.getLogger() + root_logger.setLevel(log_level) + + # Remove any existing handlers + for handler in root_logger.handlers[:]: + root_logger.removeHandler(handler) + + # Define log format with file, module, and line number + log_format = "%(asctime)s - %(name)s - [%(filename)s:%(lineno)d] - %(levelname)s - %(message)s" + date_format = "%Y-%m-%d %H:%M:%S" + + # 1. Console Handler with color support + if colorlog and sys.stdout.isatty(): + # Use colorlog if available and stdout is a TTY + console_handler = colorlog.StreamHandler() + console_formatter = colorlog.ColoredFormatter( + fmt="%(log_color)s%(asctime)s - %(name)s - [%(filename)s:%(lineno)d] - %(levelname)s - %(message)s", + datefmt=date_format, + log_colors={ + 'DEBUG': 'cyan', + 'INFO': 'green', + 'WARNING': 'yellow', + 'ERROR': 'red', + 'CRITICAL': 'red,bg_white', + }, + ) + console_handler.setFormatter(console_formatter) + else: + # Fallback to standard console handler + console_handler = logging.StreamHandler(sys.stdout) + console_formatter = logging.Formatter(log_format, datefmt=date_format) + console_handler.setFormatter(console_formatter) + + console_handler.setLevel(log_level) + root_logger.addHandler(console_handler) + + # 2. Rotating File Handler (rotates automatically when file reaches max_bytes) + log_file_path = os.path.join(logs_dir, log_file) + file_handler = RotatingFileHandler( + filename=log_file_path, + maxBytes=max_bytes, + backupCount=backup_count, + encoding='utf-8', + ) + file_formatter = logging.Formatter(log_format, datefmt=date_format) + file_handler.setFormatter(file_formatter) + file_handler.setLevel(log_level) + root_logger.addHandler(file_handler) + + # 3. Discord Handler (will be configured later) + discord_handler = DiscordHandler(level=logging.WARNING) # Only send WARNING and above to Discord + discord_formatter = logging.Formatter("%(message)s") + discord_handler.setFormatter(discord_formatter) + root_logger.addHandler(discord_handler) + + # Set logging levels for noisy libraries + logging.getLogger('discord').setLevel(logging.WARNING) + logging.getLogger('discord.http').setLevel(logging.WARNING) + logging.getLogger('discord.gateway').setLevel(logging.WARNING) + logging.getLogger('discord.client').setLevel(logging.INFO) + logging.getLogger('urllib3').setLevel(logging.WARNING) + logging.getLogger('werkzeug').setLevel(logging.INFO) + logging.getLogger('prawcore').setLevel(logging.WARNING) + logging.getLogger('praw').setLevel(logging.WARNING) + + root_logger.info("Logging system initialized") + root_logger.info(f"Log file: {log_file_path}") + root_logger.info(f"Console color support: {'enabled' if colorlog and sys.stdout.isatty() else 'disabled'}") + + return discord_handler + + +def get_logger(name: str) -> logging.Logger: + """ + Get a logger instance with the specified name. + + Parameters + ---------- + name : str + The name of the logger. + + Returns + ------- + logging.Logger + The logger instance. + """ + return logging.getLogger(name) diff --git a/src/common/rank.py b/src/common/rank.py index e2b505b..023aa34 100644 --- a/src/common/rank.py +++ b/src/common/rank.py @@ -1,5 +1,6 @@ # standard imports from datetime import datetime, UTC +import logging import math import random import threading @@ -17,6 +18,9 @@ from src.common.rank_database import RankDatabase from src.discord_bot.bot import Bot +# Get logger for this module +logger = logging.getLogger(__name__) + # Global migration lock to prevent concurrent migrations MIGRATION_LOCK = threading.Lock() @@ -160,7 +164,7 @@ def get_rank_data( ) user_id = user.id - print(f"Getting rank data for user {user_id} in community {community_id} on platform {platform}") + logger.info(f"Getting rank data for user {user_id} in community {community_id} on platform {platform}") user_data = self.db.get_user_data( platform=platform, @@ -494,12 +498,12 @@ def _process_reddit_item( try: author = reddit_bot.fetch_user(name=author_name) except Exception as e: - print(f"Error fetching author '{author_name}' for {item_type}: {type(e).__name__}: {e}") + logger.warning(f"Error fetching author '{author_name}' for {item_type}: {type(e).__name__}: {e}") return False, True # Skip items without valid author if not author or not hasattr(author, 'id'): - print(f"Invalid author object for '{author_name}', skipping {item_type}") + logger.warning(f"Invalid author object for '{author_name}', skipping {item_type}") return False, True # Award random XP in range @@ -540,18 +544,18 @@ def _update_reddit_rank_database( updated_users = 0 total_users = len(user_xp_map) - print(f"Updating {total_users} users in rank database") + logger.info(f"Updating {total_users} users in rank database") with self.db as db: table = db.table('reddit_users') # Build a lookup map of existing users for faster access - print("Building lookup map of existing users...") + logger.info("Building lookup map of existing users...") existing_users_map = {} for item in table.all(): key = (item.get('user_id'), item.get('community_id')) existing_users_map[key] = item - print(f"Found {len(existing_users_map)} existing users") + logger.info(f"Found {len(existing_users_map)} existing users") # Process each user processed = 0 @@ -585,9 +589,9 @@ def _update_reddit_rank_database( processed += 1 # Print progress every 50 users if processed % 50 == 0: - print(f"Progress: {processed}/{total_users} users processed") + logger.info(f"Progress: {processed}/{total_users} users processed") - print(f"Finished updating {total_users} users") + logger.info(f"Finished updating {total_users} users") return new_users, updated_users @@ -607,14 +611,14 @@ def _do_reddit_migration( original_git_enabled = database.GIT_ENABLED database.GIT_ENABLED = False # Disable Git for this operation - print("Starting Reddit ranks migration") + logger.info("Starting Reddit ranks migration") try: # Process submissions with reddit_db as db: submissions_table = db.table('submissions') - print(f"Processing {len(submissions_table.all())} submissions") + logger.info(f"Processing {len(submissions_table.all())} submissions") for submission in submissions_table.all(): try: success, skipped = self._process_reddit_item( @@ -626,13 +630,14 @@ def _do_reddit_migration( skipped_submissions += 1 except Exception as e: author_name = submission.get('author', 'unknown') - print(f"Unexpected error processing submission by '{author_name}': {type(e).__name__}: {e}") + logger.error( + f"Unexpected error processing submission by '{author_name}': {type(e).__name__}: {e}") skipped_submissions += 1 # Process comments comments_table = db.table('comments') - print(f"Processing {len(comments_table.all())} comments") + logger.info(f"Processing {len(comments_table.all())} comments") for comment in comments_table.all(): try: success, skipped = self._process_reddit_item( @@ -644,14 +649,14 @@ def _do_reddit_migration( skipped_comments += 1 except Exception as e: author_name = comment.get('author', 'unknown') - print(f"Unexpected error processing comment by '{author_name}': {type(e).__name__}: {e}") + logger.error(f"Unexpected error processing comment by '{author_name}': {type(e).__name__}: {e}") skipped_comments += 1 # Update the rank database new_users, updated_users = self._update_reddit_rank_database(community_id, user_xp_map) except Exception as e: - print(f"Error during Reddit migration: {type(e).__name__}: {e}") + logger.error(f"Error during Reddit migration: {type(e).__name__}: {e}", exc_info=True) database.GIT_ENABLED = original_git_enabled raise @@ -672,7 +677,7 @@ def _do_reddit_migration( 'date': datetime.now(UTC).isoformat(), } - print(f"Reddit migration completed with stats: {stats}") + logger.info(f"Reddit migration completed with stats: {stats}") return stats async def migrate_from_mee6(self, guild_id: int) -> Dict[str, Union[int, str]]: @@ -726,29 +731,29 @@ async def _fetch_mee6_page( List of player data, or None if fetch failed """ url = f"https://mee6.xyz/api/plugins/levels/leaderboard/{guild_id}?page={page}" - print(f"Fetching Mee6 data from: {url}") + logger.info(f"Fetching Mee6 data from: {url}") try: async with session.get(url, timeout=10) as response: if response.status != 200: - print(f"Received status code {response.status}, stopping migration") + logger.warning(f"Received status code {response.status}, stopping migration") return None data = await response.json() if not data.get('players') or len(data['players']) == 0: - print("No more players found, stopping migration") + logger.info("No more players found, stopping migration") return None player_count = len(data['players']) - print(f"Processing {player_count} players from page {page}") + logger.info(f"Processing {player_count} players from page {page}") return data['players'] except aiohttp.ClientError as e: - print(f"HTTP error during migration: {e}") + logger.error(f"HTTP error during migration: {e}", exc_info=True) return None except Exception as e: - print(f"Unexpected error during migration: {e}") + logger.error(f"Unexpected error during migration: {e}", exc_info=True) import traceback traceback.print_exc() return None @@ -846,7 +851,7 @@ async def _do_mee6_migration(self, guild_id: int) -> Dict[str, Union[int, str]]: page += 1 # Now apply all updates in batches - print(f"Applying {len(batch_updates)} user updates in batches") + logger.info(f"Applying {len(batch_updates)} user updates in batches") BATCH_SIZE = 100 for i in range(0, len(batch_updates), BATCH_SIZE): @@ -854,7 +859,7 @@ async def _do_mee6_migration(self, guild_id: int) -> Dict[str, Union[int, str]]: batch_new, batch_updated = self._process_mee6_batch(guild_id, batch) new_users += batch_new updated_users += batch_updated - print(f"Completed batch {i//BATCH_SIZE + 1}/{(len(batch_updates)-1)//BATCH_SIZE + 1}") + logger.info(f"Completed batch {i//BATCH_SIZE + 1}/{(len(batch_updates)-1)//BATCH_SIZE + 1}") # Restore original Git state and force one final sync database.GIT_ENABLED = original_git_enabled @@ -869,5 +874,5 @@ async def _do_mee6_migration(self, guild_id: int) -> Dict[str, Union[int, str]]: 'date': datetime.now(UTC).isoformat(), } - print(f"Migration completed with stats: {stats}") + logger.info(f"Migration completed with stats: {stats}") return stats diff --git a/src/common/sponsors.py b/src/common/sponsors.py index 56b342a..793c5d8 100644 --- a/src/common/sponsors.py +++ b/src/common/sponsors.py @@ -1,10 +1,14 @@ # standard imports +import logging import os from typing import Union # lib imports import requests +# Get logger for this module +logger = logging.getLogger(__name__) + tier_map = { 't4-sponsors': 15, @@ -66,8 +70,7 @@ def get_github_sponsors() -> Union[dict, False]: data = response.json() if 'errors' in data or 'message' in data: - print(data) - print('::error::An error occurred while fetching sponsors.') + logger.error(f"Error fetching sponsors: {data}") return False return data diff --git a/src/common/webapp.py b/src/common/webapp.py index 55a4732..b941a94 100644 --- a/src/common/webapp.py +++ b/src/common/webapp.py @@ -1,6 +1,7 @@ # standard imports import asyncio import html +import logging import os from threading import Thread from typing import Tuple @@ -18,6 +19,9 @@ from src.common import globals from src.common import time +# Get logger for this module +logger = logging.getLogger(__name__) + DISCORD_CLIENT_ID = os.getenv("DISCORD_CLIENT_ID") DISCORD_CLIENT_SECRET = os.getenv("DISCORD_CLIENT_SECRET") @@ -290,9 +294,9 @@ def webhook(source: str, key: str) -> Tuple[Response, int]: if key != os.getenv("GITHUB_WEBHOOK_SECRET_KEY"): return jsonify({"status": "error", "message": "Invalid key"}), 400 - print(f"received webhook from {source}") + logger.info(f"received webhook from {source}") data = request.json - print(f"received webhook data: \n{data}") + logger.info(f"received webhook data: \n{data}") # process the webhook data if source == "github_sponsors": diff --git a/src/discord_bot/bot.py b/src/discord_bot/bot.py index fc9e3f5..1f2d1b2 100644 --- a/src/discord_bot/bot.py +++ b/src/discord_bot/bot.py @@ -1,5 +1,6 @@ # standard imports import asyncio +import logging import os import threading from typing import Literal, Optional @@ -12,6 +13,9 @@ from src.common.database import Database from src.discord_bot.views import DonateCommandView +# Get logger for this module +logger = logging.getLogger(__name__) + class Bot(discord.Bot): """ @@ -55,9 +59,9 @@ async def on_ready(self): This function runs when the discord bot is ready. The function will update the bot presence, update the username and avatar, and start tasks. """ - print(f'py-cord version: {discord.__version__}') - print(f'Logged in as {self.user.name} (ID: {self.user.id})') - print(f'Servers connected to: {self.guilds}') + logger.info(f'py-cord version: {discord.__version__}') + logger.info(f'Logged in as {self.user.name} (ID: {self.user.id})') + logger.info(f'Servers connected to: {self.guilds}') # update the username and avatar avatar_img = get_avatar_bytes() @@ -115,7 +119,7 @@ async def async_send_message( try: return await channel.send(content=message, embed=embed) except Exception as e: - print(f"Error sending message: {e}") + logger.error(f"Error sending message: {e}", exc_info=True) self.DEGRADED = True def send_message( @@ -266,17 +270,17 @@ def start_threaded(self): ) self.bot_thread.start() except KeyboardInterrupt: - print("Keyboard Interrupt Detected") + logger.info("Keyboard Interrupt Detected") self.DEGRADED = True self.stop() def stop(self, future: asyncio.Future = None): - print("Attempting to stop tasks") + logger.info("Attempting to stop tasks") self.DEGRADED = True self.role_update_task.stop() self.clean_ephemeral_cache.stop() - print("Attempting to close bot connection") + logger.info("Attempting to close bot connection") if self.bot_thread is not None and self.bot_thread.is_alive(): asyncio.run_coroutine_threadsafe(self.close(), self.loop) self.bot_thread.join() - print("Closed bot") + logger.info("Closed bot") diff --git a/src/discord_bot/cogs/rank.py b/src/discord_bot/cogs/rank.py index d2b1765..6e1f0df 100644 --- a/src/discord_bot/cogs/rank.py +++ b/src/discord_bot/cogs/rank.py @@ -1,4 +1,5 @@ # standard imports +import logging import os from typing import Union @@ -11,6 +12,9 @@ from src.common.rank import RankSystem from src.common import globals +# Get logger for this module +logger = logging.getLogger(__name__) + class RankCog(discord.Cog): """ @@ -58,7 +62,7 @@ async def xp_award_task(self): embed.set_thumbnail(url=user.display_avatar.url) await channel.send(embed=embed) except Exception as e: - print(f"Error handling level up notification: {e}") + logger.error(f"Error handling level up notification: {e}", exc_info=True) # Clear the set for the next minute self.active_users.clear() @@ -513,11 +517,11 @@ async def auto_migrate_mee6(self): source_id=guild.id, ) if migration_status: - print(f"Mee6 migration already completed for guild: {guild.name} ({guild.id})") + logger.info(f"Mee6 migration already completed for guild: {guild.name} ({guild.id})") continue # Skip if already migrated try: - print(f"Starting automatic Mee6 migration for guild: {guild.name} ({guild.id})") + logger.info(f"Starting automatic Mee6 migration for guild: {guild.name} ({guild.id})") result = await self.rank_system.migrate_from_mee6(guild_id=guild.id) # Save migration status @@ -528,7 +532,7 @@ async def auto_migrate_mee6(self): stats=result, ) - print(f"Completed Mee6 migration for {guild.name}: {result['total_processed']} users processed") + logger.info(f"Completed Mee6 migration for {guild.name}: {result['total_processed']} users processed") # Optional: Notify in a system channel or log channel system_channel = guild.system_channel @@ -548,7 +552,7 @@ async def auto_migrate_mee6(self): pass # Silently fail if can't send except Exception as e: - print(f"Error during automatic Mee6 migration for guild {guild.id}: {e}") + logger.error(f"Error during automatic Mee6 migration for guild {guild.id}: {e}", exc_info=True) def setup(bot): diff --git a/src/reddit_bot/bot.py b/src/reddit_bot/bot.py index c2a1d25..a2a28d1 100644 --- a/src/reddit_bot/bot.py +++ b/src/reddit_bot/bot.py @@ -1,5 +1,6 @@ # standard imports from datetime import datetime +import logging import os import sys import threading @@ -19,6 +20,9 @@ from src.common.database import Database from src.common.rank import RankSystem +# Get logger for this module +logger = logging.getLogger(__name__) + class Bot: def __init__(self, **kwargs): @@ -112,7 +116,7 @@ def fetch_user(self, name: str) -> Optional[models.Redditor]: redditor.id except prawcore.NotFound: # If the user is suspended or deleted, return None - print(f"User {name} not found or suspended.") + logger.warning(f"User {name} not found or suspended.") return None return redditor @@ -141,9 +145,9 @@ def process_comment(self, comment: models.Comment): # Pass the actual Reddit user object to award_xp xp_result = self.award_reddit_xp(comment.author) if xp_result and xp_result.get('level_up'): - print(f"User {comment.author.name} leveled up to {xp_result.get('level')}!") + logger.info(f"User {comment.author.name} leveled up to {xp_result.get('level')}!") except Exception as e: - print(f"Error awarding XP: {e}") + logger.error(f"Error awarding XP: {e}", exc_info=True) comment_data = self.slash_commands(comment=comment, comment_data=comment_data) comment_data['processed'] = True @@ -204,9 +208,7 @@ def process_submission(self, submission: models.Submission): 'bot_discord': {'sent': False, 'sent_utc': None}, } - print(f'submission id: {submission.id}') - print(f'submission title: {submission.title}') - print('---------') + logger.info(f"New submission: {submission_data}") # Award XP for new submissions try: @@ -214,9 +216,9 @@ def process_submission(self, submission: models.Submission): # Pass the actual Reddit user object xp_result = self.award_reddit_xp(submission.author) if xp_result and xp_result.get('level_up'): - print(f"User {submission.author.name} leveled up to {xp_result.get('level')}!") + logger.info(f"User {submission.author.name} leveled up to {xp_result.get('level')}!") except Exception as e: - print(f"Error awarding XP: {e}") + logger.error(f"Error awarding XP: {e}", exc_info=True) if os.getenv('DISCORD_REDDIT_CHANNEL_ID'): submission_data = self.discord(submission=submission, submission_data=submission_data) @@ -264,10 +266,10 @@ def migrate_reddit_ranks(self): ) if migration_status: - print(f"Reddit ranks migration already completed on {migration_status.get('timestamp')}") + logger.info(f"Reddit ranks migration already completed on {migration_status.get('timestamp')}") return - print("Starting Reddit ranks migration...") + logger.info("Starting Reddit ranks migration...") # Start the migration process stats = self.rank_system.migrate_from_reddit_database( @@ -284,10 +286,10 @@ def migrate_reddit_ranks(self): stats=stats, ) - print(f"Reddit ranks migration completed: {stats}") + logger.info(f"Reddit ranks migration completed: {stats}") except Exception as e: - print(f"Error during Reddit ranks migration: {e}") + logger.error(f"Error during Reddit ranks migration: {e}", exc_info=True) self.DEGRADED = True reason = inspector.current_name() self.DEGRADED_REASONS.append(reason) if reason not in self.DEGRADED_REASONS else None @@ -362,7 +364,7 @@ def slash_commands(self, comment: models.Comment, comment_data: dict) -> dict: if not comment.body.startswith("/"): return comment_data - print(f"Processing slash command: {comment.body}") + logger.info(f"Processing slash command: {comment.body}") # Split the comment into project and command parts = comment.body[1:].split() project = parts[0] @@ -406,7 +408,7 @@ def _comment_loop(self, test: bool = False): if test: return comment except prawcore.exceptions.ServerError as e: - print(f"Server Error: {e}") + logger.error(f"Server Error: {e}", exc_info=True) self.DEGRADED = True self.DEGRADED_REASONS.append(reason) if reason not in self.DEGRADED_REASONS else None time.sleep(60) @@ -429,7 +431,7 @@ def _submission_loop(self, test: bool = False): if test: return submission except prawcore.exceptions.ServerError as e: - print(f"Server Error: {e}") + logger.error(f"Server Error: {e}", exc_info=True) self.DEGRADED = True self.DEGRADED_REASONS.append(reason) if reason not in self.DEGRADED_REASONS else None time.sleep(60) @@ -448,14 +450,14 @@ def start_threaded(self): self.bot_thread = threading.Thread(target=self.start, daemon=True) self.bot_thread.start() except KeyboardInterrupt: - print("Keyboard Interrupt Detected") + logger.info("Keyboard Interrupt Detected") self.DEGRADED = True reason = inspector.current_name() self.DEGRADED_REASONS.append(reason) if reason not in self.DEGRADED_REASONS else None self.stop() def stop(self): - print("Attempting to stop reddit bot") + logger.info("Attempting to stop reddit bot") self.STOP_SIGNAL = True self.DEGRADED = True reason = inspector.current_name() @@ -464,4 +466,4 @@ def stop(self): self.comment_thread.join() self.submission_thread.join() self.bot_thread.join() - print("Reddit bot stopped") + logger.info("Reddit bot stopped") diff --git a/src/reddit_bot/cogs/rank.py b/src/reddit_bot/cogs/rank.py index 32a4ab0..34b39ba 100644 --- a/src/reddit_bot/cogs/rank.py +++ b/src/reddit_bot/cogs/rank.py @@ -1,6 +1,12 @@ +# standard imports +import logging + # lib imports from praw import models +# Get logger for this module +logger = logging.getLogger(__name__) + class RedditRankManager: """ @@ -74,7 +80,7 @@ def __init__(self, name, subreddit_id): 'xp_for_next_level': next_level_xp - user_data['xp'] } except Exception as e: - print(f"Error getting rank for Reddit user {username}: {e}") + logger.error(f"Error getting rank for Reddit user {username}: {e}", exc_info=True) return None def get_leaderboard(self, limit: int = 10, offset: int = 0): @@ -107,7 +113,7 @@ def get_leaderboard(self, limit: int = 10, offset: int = 0): return leaderboard except Exception as e: - print(f"Error getting Reddit leaderboard: {e}") + logger.error(f"Error getting Reddit leaderboard: {e}", exc_info=True) return [] def respond_to_rank_command(self, comment: models.Comment): @@ -169,7 +175,7 @@ def respond_to_rank_command(self, comment: models.Comment): return True except Exception as e: - print(f"Error responding to rank command: {e}") + logger.error(f"Error responding to rank command: {e}", exc_info=True) return False def respond_to_leaderboard_command(self, comment: models.Comment): @@ -231,5 +237,5 @@ def respond_to_leaderboard_command(self, comment: models.Comment): return True except Exception as e: - print(f"Error responding to leaderboard command: {e}") + logger.error(f"Error responding to leaderboard command: {e}", exc_info=True) return False diff --git a/tests/unit/common/test_logging_config.py b/tests/unit/common/test_logging_config.py new file mode 100644 index 0000000..f91c3fb --- /dev/null +++ b/tests/unit/common/test_logging_config.py @@ -0,0 +1,439 @@ +""" +Unit tests for the logging configuration module. +""" + +# standard imports +import logging +import os +from unittest.mock import Mock, patch + +# lib imports +import pytest + +# local imports +from src.common import logging_config + + +class TestDiscordHandler: + """Test the DiscordHandler class.""" + + def test_discord_handler_initialization(self): + """Test that Discord handler initializes correctly.""" + handler = logging_config.DiscordHandler(level=logging.WARNING) + + assert handler.bot is None + assert handler.channel_id is None + assert handler._setup_complete is False + + def test_discord_handler_setup_with_channel_id(self): + """Test Discord handler setup with explicit channel ID.""" + handler = logging_config.DiscordHandler() + mock_bot = Mock() + + handler.setup(mock_bot, channel_id=123456789) + + assert handler.bot == mock_bot + assert handler.channel_id == 123456789 + assert handler._setup_complete is True + + def test_discord_handler_setup_with_env_variable(self): + """Test Discord handler setup with environment variable.""" + handler = logging_config.DiscordHandler() + mock_bot = Mock() + + with patch.dict(os.environ, {'DISCORD_LOG_CHANNEL_ID': '987654321'}): + handler.setup(mock_bot) + + assert handler.bot == mock_bot + assert handler.channel_id == 987654321 + assert handler._setup_complete is True + + def test_discord_handler_emit_not_setup(self): + """Test that emit does nothing if handler is not setup.""" + handler = logging_config.DiscordHandler() + record = logging.LogRecord( + name="test", + level=logging.ERROR, + pathname="test.py", + lineno=1, + msg="Test message", + args=(), + exc_info=None + ) + + # Should not raise an error + handler.emit(record) + + def test_discord_handler_emit_sends_message(self): + """Test that emit sends a message to Discord.""" + handler = logging_config.DiscordHandler() + mock_bot = Mock() + mock_bot.loop = None # No event loop in test + mock_bot.async_send_message = Mock() + + handler.setup(mock_bot, channel_id=123456789) + handler.setFormatter(logging.Formatter("%(message)s")) + + record = logging.LogRecord( + name="test", + level=logging.ERROR, + pathname="test.py", + lineno=42, + msg="Test error message", + args=(), + exc_info=None + ) + + # The handler now uses asyncio, so we just verify it doesn't crash + # In actual usage, it will send the message asynchronously + with patch('discord.Embed'): + handler.emit(record) + + # The emit should not raise an exception even if bot.loop is None + # This is expected behavior - it will fail gracefully + + def test_discord_handler_emit_handles_exceptions(self): + """Test that emit handles exceptions gracefully.""" + handler = logging_config.DiscordHandler() + mock_bot = Mock() + mock_bot.send_message = Mock(side_effect=Exception("Discord error")) + + handler.setup(mock_bot, channel_id=123456789) + handler.setFormatter(logging.Formatter("%(message)s")) + + record = logging.LogRecord( + name="test", + level=logging.ERROR, + pathname="test.py", + lineno=1, + msg="Test message", + args=(), + exc_info=None + ) + + # Should handle the exception internally + with patch('discord.Embed'): + handler.emit(record) + + def test_discord_handler_queues_messages_before_setup(self): + """Test that messages are queued before bot is ready.""" + handler = logging_config.DiscordHandler() + handler.setFormatter(logging.Formatter("%(message)s")) + + # Create some log records before setup + record1 = logging.LogRecord( + name="test", + level=logging.WARNING, + pathname="test.py", + lineno=1, + msg="Message 1", + args=(), + exc_info=None + ) + record2 = logging.LogRecord( + name="test", + level=logging.ERROR, + pathname="test.py", + lineno=2, + msg="Message 2", + args=(), + exc_info=None + ) + + # Emit before setup - should be queued + handler.emit(record1) + handler.emit(record2) + + # Verify messages were queued + assert len(handler._message_queue) == 2 + assert handler._message_queue[0] == record1 + assert handler._message_queue[1] == record2 + + def test_discord_handler_flushes_queue_on_setup(self): + """Test that queued messages are sent when bot is setup.""" + handler = logging_config.DiscordHandler() + handler.setFormatter(logging.Formatter("%(message)s")) + + # Queue some messages + record = logging.LogRecord( + name="test", + level=logging.WARNING, + pathname="test.py", + lineno=1, + msg="Queued message", + args=(), + exc_info=None + ) + handler.emit(record) + + assert len(handler._message_queue) == 1 + + # Setup the handler + mock_bot = Mock() + mock_bot.loop = None + mock_bot.async_send_message = Mock() + + with patch('discord.Embed'): + handler.setup(mock_bot, channel_id=123456789) + + # Queue should be cleared after setup + assert len(handler._message_queue) == 0 + + def test_discord_handler_queue_size_limit(self): + """Test that queue size is limited to prevent memory issues.""" + handler = logging_config.DiscordHandler() + handler.setFormatter(logging.Formatter("%(message)s")) + + # Try to queue more than max_queue_size messages + for i in range(handler._max_queue_size + 10): + record = logging.LogRecord( + name="test", + level=logging.WARNING, + pathname="test.py", + lineno=i, + msg=f"Message {i}", + args=(), + exc_info=None + ) + handler.emit(record) + + # Should not exceed max_queue_size + assert len(handler._message_queue) == handler._max_queue_size + + +class TestSetupLogging: + """Test the setup_logging function.""" + + @pytest.fixture(autouse=True) + def cleanup_handlers(self): + """Fixture to clean up logging handlers after each test.""" + yield + # Clean up all handlers after test + root_logger = logging.getLogger() + for h in root_logger.handlers: + try: + h.close() + except Exception: + pass + try: + root_logger.removeHandler(h) + except Exception: + pass + + def test_setup_logging_returns_discord_handler(self, tmp_path): + """Test that setup_logging returns a DiscordHandler.""" + # Create proper directory structure + logs_dir = tmp_path / "data" / "logs" + logs_dir.mkdir(parents=True) + + with patch('src.common.logging_config.os.path.dirname', return_value=str(tmp_path)): + handler = logging_config.setup_logging(log_file="test-bot.log") + + assert isinstance(handler, logging_config.DiscordHandler) + + def test_setup_logging_configures_root_logger(self, tmp_path): + """Test that setup_logging configures the root logger.""" + # Create proper directory structure + logs_dir = tmp_path / "data" / "logs" + logs_dir.mkdir(parents=True) + + with patch('src.common.logging_config.os.path.dirname', return_value=str(tmp_path)): + logging_config.setup_logging(log_level=logging.DEBUG, log_file="test-bot2.log") + + # Check root logger level + root_logger = logging.getLogger() + assert root_logger.level == logging.DEBUG + + # Check handlers were added + assert len(root_logger.handlers) >= 3 # Console, File, Discord + + def test_setup_logging_sets_third_party_log_levels(self, tmp_path): + """Test that setup_logging sets appropriate levels for third-party loggers.""" + # Create proper directory structure + logs_dir = tmp_path / "data" / "logs" + logs_dir.mkdir(parents=True) + + with patch('src.common.logging_config.os.path.dirname', return_value=str(tmp_path)): + logging_config.setup_logging(log_file="test-bot3.log") + + # Check third-party library log levels + assert logging.getLogger('discord').level == logging.WARNING + assert logging.getLogger('urllib3').level == logging.WARNING + assert logging.getLogger('werkzeug').level == logging.INFO + + def test_setup_logging_with_colorlog(self, tmp_path): + """Test that setup_logging works without colorlog.""" + # Create proper directory structure + logs_dir = tmp_path / "data" / "logs" + logs_dir.mkdir(parents=True) + + with patch('src.common.logging_config.colorlog', None): + with patch('sys.stdout.isatty', return_value=True): + with patch('src.common.logging_config.os.path.dirname', return_value=str(tmp_path)): + handler = logging_config.setup_logging(log_file="test-bot4.log") + + # Should still work without colorlog + assert handler is not None + + def test_setup_logging_creates_rotating_file_handler(self, tmp_path): + """Test that setup_logging creates a rotating file handler.""" + # Create logs directory + logs_dir = tmp_path / "data" / "logs" + logs_dir.mkdir(parents=True, exist_ok=True) + + with patch('src.common.logging_config.os.path.dirname', return_value=str(tmp_path)): + logging_config.setup_logging(log_file="test-rotation.log", max_bytes=1024, backup_count=3) + + # Check that log file was created + log_file = logs_dir / "test-rotation.log" + assert log_file.exists() + + +class TestGetLogger: + """Test the get_logger function.""" + + def test_get_logger_returns_logger_instance(self): + """Test that get_logger returns a Logger instance.""" + logger = logging_config.get_logger("test_logger") + + assert isinstance(logger, logging.Logger) + assert logger.name == "test_logger" + + def test_get_logger_returns_same_instance(self): + """Test that get_logger returns the same instance for same name.""" + logger1 = logging_config.get_logger("test_logger") + logger2 = logging_config.get_logger("test_logger") + + assert logger1 is logger2 + + +class TestLogFormatting: + """Test log message formatting.""" + + @pytest.fixture(autouse=True) + def cleanup_handlers(self): + """Fixture to clean up logging handlers after each test.""" + yield + # Clean up all handlers after test + root_logger = logging.getLogger() + for h in root_logger.handlers: + try: + h.close() + except Exception: + pass + try: + root_logger.removeHandler(h) + except Exception: + pass + + def test_log_format_includes_file_and_line_number(self, tmp_path): + """Test that log messages include file and line number.""" + # Create logs directory + logs_dir = tmp_path / "data" / "logs" + logs_dir.mkdir(parents=True, exist_ok=True) + + with patch('src.common.logging_config.os.path.dirname', return_value=str(tmp_path)): + # Setup logging + logging_config.setup_logging(log_file="format-test.log") + + # Log a message + logger = logging.getLogger("test_format") + logger.info("Test message") + + # Flush handlers + root_logger = logging.getLogger() + for h in root_logger.handlers: + if hasattr(h, 'flush'): + h.flush() + + # Read log file + log_file = logs_dir / "format-test.log" + log_content = log_file.read_text() + + # Check format includes file and line number + assert "[" in log_content and "]" in log_content + assert "test_format" in log_content + assert "Test message" in log_content + + +class TestRotatingFileHandler: + """Test the rotating file handler functionality.""" + + @pytest.fixture(autouse=True) + def cleanup_handlers(self): + """Fixture to clean up logging handlers after each test.""" + yield + # Clean up all handlers after test + root_logger = logging.getLogger() + for h in root_logger.handlers: + try: + h.close() + except Exception: + pass + try: + root_logger.removeHandler(h) + except Exception: + pass + + def test_file_rotation_when_size_exceeded(self, tmp_path): + """Test that log files rotate when they exceed max size.""" + # Create logs directory + logs_dir = tmp_path / "data" / "logs" + logs_dir.mkdir(parents=True) + + # Set very small max size to trigger rotation + max_bytes = 500 # 500 bytes + + with patch('src.common.logging_config.os.path.dirname', return_value=str(tmp_path)): + logging_config.setup_logging(log_file="rotation-test.log", max_bytes=max_bytes, backup_count=2) + + logger = logging.getLogger("rotation_test") + + # Write enough messages to exceed max_bytes + for i in range(50): + logger.info(f"This is test message number {i} with some additional text to make it longer") + + # Force flush + root_logger = logging.getLogger() + for h in root_logger.handlers: + if hasattr(h, 'flush'): + h.flush() + + # Check that log file exists + log_file = logs_dir / "rotation-test.log" + assert log_file.exists() + + def test_backup_count_limit(self, tmp_path): + """Test that only the specified number of backups are kept.""" + # Create logs directory + logs_dir = tmp_path / "data" / "logs" + logs_dir.mkdir(parents=True) + + # Set very small max size and only 2 backups + max_bytes = 100 + backup_count = 2 + + with patch('src.common.logging_config.os.path.dirname', return_value=str(tmp_path)): + logging_config.setup_logging( + log_file="backup-limit-test.log", + max_bytes=max_bytes, + backup_count=backup_count + ) + + logger = logging.getLogger("backup_test") + + # Write many messages to trigger multiple rotations + for i in range(100): + logger.error(f"Long error message number {i} to fill up the log file quickly") + + # Force flush + root_logger = logging.getLogger() + for h in root_logger.handlers: + if hasattr(h, 'flush'): + h.flush() + + # Count backup files + backup_files = list(logs_dir.glob("backup-limit-test.log.*")) + + # Should not exceed backup_count + assert len(backup_files) <= backup_count