Skip to content

Commit 1ad1d1b

Browse files
committed
minor linting fixes and code cleanup
1 parent ca31888 commit 1ad1d1b

File tree

2 files changed

+18
-8
lines changed

2 files changed

+18
-8
lines changed

sources/pipedrive/settings.py

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,10 @@
9393
"deal_followers": {
9494
"parent": "deals",
9595
"endpoint_path": "deals/{id}/followers",
96-
"primary_key": ["user_id", "_deals_id"], # Followers don't have 'id', use composite key
96+
"primary_key": [
97+
"user_id",
98+
"_deals_id",
99+
], # Followers don't have 'id', use composite key
97100
"include_from_parent": ["id"], # Include deal id from parent
98101
"params": {
99102
"limit": 500,
@@ -105,4 +108,11 @@
105108
# This curated set includes the most commonly used endpoints.
106109
# Users can customize this list to match their needs.
107110
# See ENTITIES_V2 above for all available v2 endpoints.
108-
DEFAULT_V2_RESOURCES = ["deals", "persons", "organizations" ,"products", "pipelines", "stages"]
111+
DEFAULT_V2_RESOURCES = [
112+
"deals",
113+
"persons",
114+
"organizations",
115+
"products",
116+
"pipelines",
117+
"stages",
118+
]

sources/pipedrive_pipeline.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ def load_pipedrive() -> None:
1010
# configure the pipeline with your destination details
1111
pipeline = dlt.pipeline(
1212
pipeline_name="pipedrive",
13-
destination='duckdb',
13+
destination="duckdb",
1414
dataset_name="pipedrive_data",
1515
progress="log",
1616
)
@@ -23,15 +23,15 @@ def load_selected_data() -> None:
2323
"""Shows how to load just selected tables using `with_resources`"""
2424
pipeline = dlt.pipeline(
2525
pipeline_name="pipedrive",
26-
destination='duckdb',
26+
destination="duckdb",
2727
dataset_name="pipedrive_data",
2828
progress="log",
2929
)
3030
# Use with_resources to select which entities to load
3131
# Note: `custom_fields_mapping` must be included to translate custom field hashes to corresponding names
3232
load_info = pipeline.run(
3333
pipedrive_source().with_resources(
34-
"products", "deals", "deals_participants", "custom_fields_mapping"
34+
"products", "deals", "deals_participants", "custom_fields_mapping"
3535
)
3636
)
3737
print(load_info)
@@ -54,7 +54,7 @@ def load_from_start_date() -> None:
5454
"""Example to incrementally load activities limited to items updated after a given date"""
5555
pipeline = dlt.pipeline(
5656
pipeline_name="pipedrive",
57-
destination='duckdb',
57+
destination="duckdb",
5858
dataset_name="pipedrive_data",
5959
progress="log",
6060
)
@@ -79,7 +79,7 @@ def load_v2_resources(resources: Optional[Sequence[str]] = None) -> None:
7979
resources = list(resources or DEFAULT_V2_RESOURCES)
8080
pipeline = dlt.pipeline(
8181
pipeline_name="pipedrive",
82-
destination='duckdb',
82+
destination="duckdb",
8383
dataset_name="pipedrive_data",
8484
progress="log",
8585
)
@@ -116,6 +116,6 @@ def load_selected_v2_data() -> None:
116116
# load activities updated since given date
117117
# load_from_start_date()
118118
# load v2 resources (optional addon)
119-
#load_v2_resources()
119+
# load_v2_resources()
120120
# load only selected v2 resources (3 major endpoints: deals, persons, organizations)
121121
load_selected_v2_data()

0 commit comments

Comments
 (0)