Skip to content

Commit 5bf7198

Browse files
authored
Merge pull request #1028 from ScrapeGraphAI/copilot/fix-whitespace-formatting-errors
Fix whitespace formatting errors (W291, W292, W293)
2 parents 518d1b0 + 6deac76 commit 5bf7198

File tree

7 files changed

+275
-237
lines changed

7 files changed

+275
-237
lines changed

scrapegraphai/graphs/markdownify_graph.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -64,9 +64,7 @@ def __init__(
6464
graph_name="Markdownify",
6565
)
6666

67-
def execute(
68-
self, initial_state: Dict
69-
) -> Tuple[Dict, List[Dict]]:
67+
def execute(self, initial_state: Dict) -> Tuple[Dict, List[Dict]]:
7068
"""
7169
Execute the markdownify graph.
7270
@@ -80,4 +78,4 @@ def execute(
8078
- Dictionary with the markdown result in the "markdown" key
8179
- List of execution logs
8280
"""
83-
return super().execute(initial_state)
81+
return super().execute(initial_state)

scrapegraphai/helpers/models_tokens.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
"o1-preview": 128000,
3333
"o1-mini": 128000,
3434
"o1": 128000,
35-
"gpt-4.5-preview": 128000,
3635
"o3-mini": 200000,
3736
},
3837
"azure_openai": {

scrapegraphai/models/xai.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
"""
22
xAI Grok Module
33
"""
4+
45
from langchain_openai import ChatOpenAI
56

67

@@ -19,4 +20,4 @@ def __init__(self, **llm_config):
1920
llm_config["openai_api_key"] = llm_config.pop("api_key")
2021
llm_config["openai_api_base"] = "https://api.x.ai/v1"
2122

22-
super().__init__(**llm_config)
23+
super().__init__(**llm_config)

scrapegraphai/nodes/markdownify_node.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,4 +64,4 @@ def execute(self, state: dict) -> dict:
6464
# Update state with markdown content
6565
state.update({self.output[0]: markdown_content})
6666

67-
return state
67+
return state

scrapegraphai/utils/code_error_analysis.py

Lines changed: 80 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@
1414
import json
1515
from typing import Any, Dict, Optional
1616

17-
from pydantic import BaseModel, Field, validator
18-
from langchain_core.prompts import PromptTemplate
1917
from langchain_core.output_parsers import StrOutputParser
18+
from langchain_core.prompts import PromptTemplate
19+
from pydantic import BaseModel, Field, validator
2020

2121
from ..prompts import (
2222
TEMPLATE_EXECUTION_ANALYSIS,
@@ -28,20 +28,25 @@
2828

2929
class AnalysisError(Exception):
3030
"""Base exception for code analysis errors."""
31+
3132
pass
3233

3334

3435
class InvalidStateError(AnalysisError):
3536
"""Exception raised when state dictionary is missing required keys."""
37+
3638
pass
3739

3840

3941
class CodeAnalysisState(BaseModel):
4042
"""Base model for code analysis state validation."""
43+
4144
generated_code: str = Field(..., description="The generated code to analyze")
42-
errors: Dict[str, Any] = Field(..., description="Dictionary containing error information")
45+
errors: Dict[str, Any] = Field(
46+
..., description="Dictionary containing error information"
47+
)
4348

44-
@validator('errors')
49+
@validator("errors")
4550
def validate_errors(cls, v):
4651
"""Ensure errors dictionary has expected structure."""
4752
if not isinstance(v, dict):
@@ -51,39 +56,41 @@ def validate_errors(cls, v):
5156

5257
class ExecutionAnalysisState(CodeAnalysisState):
5358
"""Model for execution analysis state validation."""
59+
5460
html_code: Optional[str] = Field(None, description="HTML code if available")
5561
html_analysis: Optional[str] = Field(None, description="Analysis of HTML code")
5662

57-
@validator('errors')
63+
@validator("errors")
5864
def validate_execution_errors(cls, v):
5965
"""Ensure errors dictionary contains execution key."""
6066
super().validate_errors(v)
61-
if 'execution' not in v:
67+
if "execution" not in v:
6268
raise ValueError("errors dictionary must contain 'execution' key")
6369
return v
6470

6571

6672
class ValidationAnalysisState(CodeAnalysisState):
6773
"""Model for validation analysis state validation."""
74+
6875
json_schema: Dict[str, Any] = Field(..., description="JSON schema for validation")
6976
execution_result: Any = Field(..., description="Result of code execution")
7077

71-
@validator('errors')
78+
@validator("errors")
7279
def validate_validation_errors(cls, v):
7380
"""Ensure errors dictionary contains validation key."""
7481
super().validate_errors(v)
75-
if 'validation' not in v:
82+
if "validation" not in v:
7683
raise ValueError("errors dictionary must contain 'validation' key")
7784
return v
7885

7986

8087
def get_optimal_analysis_template(error_type: str) -> str:
8188
"""
8289
Returns the optimal prompt template based on the error type.
83-
90+
8491
Args:
8592
error_type (str): Type of error to analyze.
86-
93+
8794
Returns:
8895
str: The prompt template text.
8996
"""
@@ -106,10 +113,10 @@ def syntax_focused_analysis(state: Dict[str, Any], llm_model) -> str:
106113
107114
Returns:
108115
str: The result of the syntax error analysis.
109-
116+
110117
Raises:
111118
InvalidStateError: If state is missing required keys.
112-
119+
113120
Example:
114121
>>> state = {
115122
'generated_code': 'print("Hello World")',
@@ -121,26 +128,28 @@ def syntax_focused_analysis(state: Dict[str, Any], llm_model) -> str:
121128
# Validate state using Pydantic model
122129
validated_state = CodeAnalysisState(
123130
generated_code=state.get("generated_code", ""),
124-
errors=state.get("errors", {})
131+
errors=state.get("errors", {}),
125132
)
126-
133+
127134
# Check if syntax errors exist
128135
if "syntax" not in validated_state.errors:
129136
raise InvalidStateError("No syntax errors found in state dictionary")
130-
137+
131138
# Create prompt template and chain
132139
prompt = PromptTemplate(
133140
template=get_optimal_analysis_template("syntax"),
134-
input_variables=["generated_code", "errors"]
141+
input_variables=["generated_code", "errors"],
135142
)
136143
chain = prompt | llm_model | StrOutputParser()
137-
144+
138145
# Execute chain with validated state
139-
return chain.invoke({
140-
"generated_code": validated_state.generated_code,
141-
"errors": validated_state.errors["syntax"]
142-
})
143-
146+
return chain.invoke(
147+
{
148+
"generated_code": validated_state.generated_code,
149+
"errors": validated_state.errors["syntax"],
150+
}
151+
)
152+
144153
except KeyError as e:
145154
raise InvalidStateError(f"Missing required key in state dictionary: {e}")
146155
except Exception as e:
@@ -157,10 +166,10 @@ def execution_focused_analysis(state: Dict[str, Any], llm_model) -> str:
157166
158167
Returns:
159168
str: The result of the execution error analysis.
160-
169+
161170
Raises:
162171
InvalidStateError: If state is missing required keys.
163-
172+
164173
Example:
165174
>>> state = {
166175
'generated_code': 'print(x)',
@@ -176,24 +185,26 @@ def execution_focused_analysis(state: Dict[str, Any], llm_model) -> str:
176185
generated_code=state.get("generated_code", ""),
177186
errors=state.get("errors", {}),
178187
html_code=state.get("html_code", ""),
179-
html_analysis=state.get("html_analysis", "")
188+
html_analysis=state.get("html_analysis", ""),
180189
)
181-
190+
182191
# Create prompt template and chain
183192
prompt = PromptTemplate(
184193
template=get_optimal_analysis_template("execution"),
185194
input_variables=["generated_code", "errors", "html_code", "html_analysis"],
186195
)
187196
chain = prompt | llm_model | StrOutputParser()
188-
197+
189198
# Execute chain with validated state
190-
return chain.invoke({
191-
"generated_code": validated_state.generated_code,
192-
"errors": validated_state.errors["execution"],
193-
"html_code": validated_state.html_code,
194-
"html_analysis": validated_state.html_analysis,
195-
})
196-
199+
return chain.invoke(
200+
{
201+
"generated_code": validated_state.generated_code,
202+
"errors": validated_state.errors["execution"],
203+
"html_code": validated_state.html_code,
204+
"html_analysis": validated_state.html_analysis,
205+
}
206+
)
207+
197208
except KeyError as e:
198209
raise InvalidStateError(f"Missing required key in state dictionary: {e}")
199210
except Exception as e:
@@ -211,10 +222,10 @@ def validation_focused_analysis(state: Dict[str, Any], llm_model) -> str:
211222
212223
Returns:
213224
str: The result of the validation error analysis.
214-
225+
215226
Raises:
216227
InvalidStateError: If state is missing required keys.
217-
228+
218229
Example:
219230
>>> state = {
220231
'generated_code': 'return {"name": "John"}',
@@ -230,24 +241,31 @@ def validation_focused_analysis(state: Dict[str, Any], llm_model) -> str:
230241
generated_code=state.get("generated_code", ""),
231242
errors=state.get("errors", {}),
232243
json_schema=state.get("json_schema", {}),
233-
execution_result=state.get("execution_result", {})
244+
execution_result=state.get("execution_result", {}),
234245
)
235-
246+
236247
# Create prompt template and chain
237248
prompt = PromptTemplate(
238249
template=get_optimal_analysis_template("validation"),
239-
input_variables=["generated_code", "errors", "json_schema", "execution_result"],
250+
input_variables=[
251+
"generated_code",
252+
"errors",
253+
"json_schema",
254+
"execution_result",
255+
],
240256
)
241257
chain = prompt | llm_model | StrOutputParser()
242-
258+
243259
# Execute chain with validated state
244-
return chain.invoke({
245-
"generated_code": validated_state.generated_code,
246-
"errors": validated_state.errors["validation"],
247-
"json_schema": validated_state.json_schema,
248-
"execution_result": validated_state.execution_result,
249-
})
250-
260+
return chain.invoke(
261+
{
262+
"generated_code": validated_state.generated_code,
263+
"errors": validated_state.errors["validation"],
264+
"json_schema": validated_state.json_schema,
265+
"execution_result": validated_state.execution_result,
266+
}
267+
)
268+
251269
except KeyError as e:
252270
raise InvalidStateError(f"Missing required key in state dictionary: {e}")
253271
except Exception as e:
@@ -268,10 +286,10 @@ def semantic_focused_analysis(
268286
269287
Returns:
270288
str: The result of the semantic error analysis.
271-
289+
272290
Raises:
273291
InvalidStateError: If state or comparison_result is missing required keys.
274-
292+
275293
Example:
276294
>>> state = {
277295
'generated_code': 'def add(a, b): return a + b'
@@ -286,30 +304,32 @@ def semantic_focused_analysis(
286304
# Validate state using Pydantic model
287305
validated_state = CodeAnalysisState(
288306
generated_code=state.get("generated_code", ""),
289-
errors=state.get("errors", {})
307+
errors=state.get("errors", {}),
290308
)
291-
309+
292310
# Validate comparison_result
293311
if "differences" not in comparison_result:
294312
raise InvalidStateError("comparison_result missing 'differences' key")
295313
if "explanation" not in comparison_result:
296314
raise InvalidStateError("comparison_result missing 'explanation' key")
297-
315+
298316
# Create prompt template and chain
299317
prompt = PromptTemplate(
300318
template=get_optimal_analysis_template("semantic"),
301319
input_variables=["generated_code", "differences", "explanation"],
302320
)
303321
chain = prompt | llm_model | StrOutputParser()
304-
322+
305323
# Execute chain with validated inputs
306-
return chain.invoke({
307-
"generated_code": validated_state.generated_code,
308-
"differences": json.dumps(comparison_result["differences"], indent=2),
309-
"explanation": comparison_result["explanation"],
310-
})
311-
324+
return chain.invoke(
325+
{
326+
"generated_code": validated_state.generated_code,
327+
"differences": json.dumps(comparison_result["differences"], indent=2),
328+
"explanation": comparison_result["explanation"],
329+
}
330+
)
331+
312332
except KeyError as e:
313333
raise InvalidStateError(f"Missing required key: {e}")
314334
except Exception as e:
315-
raise AnalysisError(f"Semantic analysis failed: {str(e)}")
335+
raise AnalysisError(f"Semantic analysis failed: {str(e)}")

0 commit comments

Comments
 (0)