Skip to content

Commit cc8ec35

Browse files
committed
Enhance Python CI: Add comprehensive formatting and import sorting checks
- Added ruff format checking to python-test.yml workflow - Created new python-format.yml workflow for dedicated formatting checks - Added pyproject.toml with comprehensive ruff configuration - Configured import sorting (isort replacement) and formatting (black replacement) - Auto-formatted all Python code with ruff format - Fixed exception chaining issues (B904) in parser files - Enhanced CI now covers: linting, formatting, import sorting, and type checking This replaces the need for separate black and isort tools by using ruff's built-in capabilities, providing faster and more consistent code quality checks.
1 parent c9c9d08 commit cc8ec35

File tree

17 files changed

+850
-635
lines changed

17 files changed

+850
-635
lines changed
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
# Python Code Formatting
2+
# This workflow checks and can auto-fix Python code formatting
3+
name: Python Code Formatting
4+
permissions:
5+
contents: write
6+
pull-requests: write
7+
8+
on:
9+
push:
10+
paths:
11+
- '**.py'
12+
pull_request:
13+
paths:
14+
- '**.py'
15+
workflow_dispatch:
16+
17+
jobs:
18+
format-check:
19+
name: Check Python formatting
20+
runs-on: ubuntu-latest
21+
steps:
22+
- name: Checkout code
23+
uses: actions/checkout@v4
24+
- name: Setup Python Environment
25+
uses: ./.github/actions/setup-python-env
26+
with:
27+
python-version: '3.11'
28+
install-dev-reqs: 'true'
29+
30+
- name: Check import sorting with ruff
31+
run: |
32+
python -m ruff check src/ --select I --diff
33+
echo "Import sorting check completed"
34+
35+
- name: Check code formatting with ruff
36+
run: |
37+
python -m ruff format --check --diff src/
38+
echo "Code formatting check completed"
39+
40+
- name: Run ruff --fix (dry-run) to show potential fixes
41+
continue-on-error: true
42+
run: |
43+
echo "Preview of potential fixes:"
44+
python -m ruff check src/ --fix --diff || true

.github/workflows/python-test.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,9 @@ jobs:
2929
- name: Run ruff (lint)
3030
run: |
3131
python -m ruff check src/
32+
- name: Run ruff (format check)
33+
run: |
34+
python -m ruff format --check src/
3235
- name: Run unit tests with coverage
3336
run: |
3437
PYTHONPATH=. pytest --cov=src/ --cov-report=xml

pyproject.toml

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# Ruff configuration for unstructuredDataHandler
2+
[tool.ruff]
3+
target-version = "py310"
4+
line-length = 88
5+
exclude = [
6+
".git",
7+
".mypy_cache",
8+
".pytest_cache",
9+
".ruff_cache",
10+
".venv",
11+
".venv_ci",
12+
"__pycache__",
13+
"build",
14+
"dist",
15+
]
16+
17+
[tool.ruff.lint]
18+
# Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
19+
select = [
20+
"E", # pycodestyle errors
21+
"W", # pycodestyle warnings
22+
"F", # Pyflakes
23+
"I", # isort
24+
"B", # flake8-bugbear
25+
"C4", # flake8-comprehensions
26+
"UP", # pyupgrade
27+
]
28+
ignore = [
29+
"E501", # line too long, handled by black
30+
"B008", # do not perform function calls in argument defaults
31+
"C901", # too complex
32+
]
33+
34+
[tool.ruff.lint.per-file-ignores]
35+
"__init__.py" = ["F401"] # Allow unused imports in __init__.py
36+
"test/**/*.py" = ["B018"] # Allow useless expressions in tests
37+
38+
[tool.ruff.lint.isort]
39+
known-first-party = ["src"]
40+
force-single-line = true
41+
force-sort-within-sections = true
42+
43+
[tool.ruff.format]
44+
quote-style = "double"
45+
indent-style = "space"
46+
skip-magic-trailing-comma = false
47+
line-ending = "auto"

src/agents/deepagent.py

Lines changed: 33 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,16 @@
33
import argparse
44
import os
55
import re
6-
import yaml
7-
from typing import Any, Optional, List
6+
from typing import Any
87

9-
from langchain.agents import AgentExecutor, create_tool_calling_agent
10-
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
8+
from langchain.agents import AgentExecutor
9+
from langchain.agents import create_tool_calling_agent
10+
from langchain.prompts import ChatPromptTemplate
11+
from langchain.prompts import MessagesPlaceholder
1112
from langchain.tools import BaseTool
1213
from langchain_community.chat_message_histories import ChatMessageHistory
1314
from langchain_core.runnables.history import RunnableWithMessageHistory
15+
import yaml
1416

1517
# LLM names declared as Any so mypy accepts fallback to None if imports fail
1618
GoogleGenerativeAI: Any
@@ -24,12 +26,14 @@
2426

2527
try:
2628
from langchain_openai import ChatOpenAI # type: ignore
29+
2730
OpenAI = ChatOpenAI
2831
except Exception:
2932
OpenAI = None
3033

3134
try:
3235
from langchain_ollama import ChatOllama # type: ignore
36+
3337
Ollama = ChatOllama
3438
except Exception:
3539
Ollama = None
@@ -53,12 +57,13 @@ class FlexibleAgent:
5357
Flexible agent supporting multiple LLM providers (Gemini, OpenAI,
5458
Ollama, etc.) for unstructuredDataHandler.
5559
"""
60+
5661
def __init__(
5762
self,
58-
provider: Optional[str] = None,
59-
api_key: Optional[str] = None,
60-
model: Optional[str] = None,
61-
tools: Optional[List[Any]] = None,
63+
provider: str | None = None,
64+
api_key: str | None = None,
65+
model: str | None = None,
66+
tools: list[Any] | None = None,
6267
dry_run: bool = False,
6368
config_path: str = "config/model_config.yaml",
6469
**kwargs,
@@ -72,15 +77,14 @@ def __init__(
7277
config_path: Path to the model configuration file.
7378
kwargs: Additional LLM-specific arguments
7479
"""
75-
with open(config_path, 'r', encoding='utf-8') as f:
80+
with open(config_path, encoding="utf-8") as f:
7681
config = yaml.safe_load(f)
7782

7883
# Normalize provider and validate
79-
provider = (provider or config.get('default_provider') or "").lower()
84+
provider = (provider or config.get("default_provider") or "").lower()
8085
if not provider:
8186
raise ValueError(
82-
"Provider not specified and no default_provider found in "
83-
"config."
87+
"Provider not specified and no default_provider found in " "config."
8488
)
8589

8690
self.dry_run = bool(dry_run)
@@ -94,28 +98,24 @@ def __init__(
9498
return
9599

96100
# Configure agent from YAML
97-
agent_config = config.get('agent', {})
98-
verbose = agent_config.get('verbose', True)
101+
agent_config = config.get("agent", {})
102+
verbose = agent_config.get("verbose", True)
99103

100104
# Configure provider
101-
provider_config = config.get('providers', {}).get(provider, {})
102-
model = model or provider_config.get('default_model')
105+
provider_config = config.get("providers", {}).get(provider, {})
106+
model = model or provider_config.get("default_model")
103107

104108
try:
105-
if provider == 'gemini':
109+
if provider == "gemini":
106110
self.llm = GoogleGenerativeAI(
107111
google_api_key=api_key, model=model, **kwargs
108112
)
109-
elif provider == 'openai':
110-
self.llm = OpenAI(
111-
openai_api_key=api_key, model=model, **kwargs
112-
)
113+
elif provider == "openai":
114+
self.llm = OpenAI(openai_api_key=api_key, model=model, **kwargs)
113115
elif provider == "ollama" and Ollama is not None:
114116
self.llm = Ollama(model=model, **kwargs)
115117
else:
116-
raise ValueError(
117-
f"Unsupported or unavailable provider: {provider}"
118-
)
118+
raise ValueError(f"Unsupported or unavailable provider: {provider}")
119119
except Exception as e:
120120
raise RuntimeError(
121121
f"Failed to initialize LLM provider '{provider}': {e}"
@@ -133,9 +133,7 @@ def __init__(
133133
)
134134

135135
agent = create_tool_calling_agent(self.llm, self.tools, prompt)
136-
agent_executor = AgentExecutor(
137-
agent=agent, tools=self.tools, verbose=verbose
138-
)
136+
agent_executor = AgentExecutor(agent=agent, tools=self.tools, verbose=verbose)
139137

140138
def get_session_history(session_id: str) -> ChatMessageHistory:
141139
if session_id not in self.store:
@@ -154,15 +152,15 @@ def run(self, input_data: str, session_id: str = "default"):
154152
Run the agent on the provided input data (prompt).
155153
"""
156154
result = self.agent.invoke(
157-
{"input": input_data},
158-
config={"configurable": {"session_id": session_id}}
155+
{"input": input_data}, config={"configurable": {"session_id": session_id}}
159156
)
160157
return result["output"]
161158

162159

163160
class MockAgent:
164161
"""A mock agent for dry-run and CI that can echo or use tools."""
165-
def __init__(self, tools: Optional[List[BaseTool]] = None):
162+
163+
def __init__(self, tools: list[BaseTool] | None = None):
166164
self.last_input = None
167165
self.tools = tools or []
168166

@@ -188,21 +186,21 @@ def main():
188186
try:
189187
from dotenv import load_dotenv # type: ignore
190188
except Exception: # pragma: no cover - fallback if dotenv is unavailable
189+
191190
def load_dotenv(*_args, **_kwargs): # type: ignore
192191
return False
192+
193193
parser = argparse.ArgumentParser()
194194
parser.add_argument(
195195
"--dry-run", action="store_true", help="Run agent in dry-run mode"
196196
)
197197
parser.add_argument("--provider", help="LLM provider to use")
198198
parser.add_argument("--model", help="Model name to use")
199199
parser.add_argument(
200-
"--prompt", default="What is the capital of France?",
201-
help="The prompt to run"
200+
"--prompt", default="What is the capital of France?", help="The prompt to run"
202201
)
203202
parser.add_argument(
204-
"--session-id", default="default",
205-
help="The session ID for the conversation"
203+
"--session-id", default="default", help="The session ID for the conversation"
206204
)
207205
args = parser.parse_args()
208206

@@ -212,8 +210,7 @@ def load_dotenv(*_args, **_kwargs): # type: ignore
212210
api_key = None
213211
if not args.dry_run:
214212
if args.provider and (
215-
args.provider.lower() == "gemini" or
216-
args.provider.lower() == "google"
213+
args.provider.lower() == "gemini" or args.provider.lower() == "google"
217214
):
218215
api_key = os.getenv("GOOGLE_GEMINI_API_KEY")
219216
elif args.provider and args.provider.lower() == "openai":

src/llm/__init__.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,7 @@
55
Model providers.
66
"""
77

8-
from .base import BaseLLMClient, MockLLMClient
8+
from .base import BaseLLMClient
9+
from .base import MockLLMClient
910

10-
__all__ = [
11-
'BaseLLMClient',
12-
'MockLLMClient'
13-
]
11+
__all__ = ["BaseLLMClient", "MockLLMClient"]

0 commit comments

Comments
 (0)