Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 25 additions & 15 deletions openfeature_flagsmith/provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,51 +45,61 @@ def resolve_boolean_details(
self,
flag_key: str,
default_value: bool,
context: EvaluationContext = EvaluationContext(),
evaluation_context: EvaluationContext = EvaluationContext(),
) -> FlagResolutionDetails[bool]:
return self._resolve(flag_key, FlagType.BOOLEAN, default_value, context)
return self._resolve(
flag_key, FlagType.BOOLEAN, default_value, evaluation_context
)

def resolve_string_details(
self,
flag_key: str,
default_value: str,
context: EvaluationContext = EvaluationContext(),
evaluation_context: EvaluationContext = EvaluationContext(),
) -> FlagResolutionDetails[str]:
return self._resolve(flag_key, FlagType.STRING, default_value, context)
return self._resolve(
flag_key, FlagType.STRING, default_value, evaluation_context
)

def resolve_integer_details(
self,
flag_key: str,
default_value: int,
context: EvaluationContext = EvaluationContext(),
evaluation_context: EvaluationContext = EvaluationContext(),
) -> FlagResolutionDetails[int]:
return self._resolve(flag_key, FlagType.INTEGER, default_value, context)
return self._resolve(
flag_key, FlagType.INTEGER, default_value, evaluation_context
)

def resolve_float_details(
self,
flag_key: str,
default_value: float,
context: EvaluationContext = EvaluationContext(),
evaluation_context: EvaluationContext = EvaluationContext(),
) -> FlagResolutionDetails[float]:
return self._resolve(flag_key, FlagType.FLOAT, default_value, context)
return self._resolve(
flag_key, FlagType.FLOAT, default_value, evaluation_context
)

def resolve_object_details(
self,
flag_key: str,
default_value: typing.Union[dict, list],
context: EvaluationContext = EvaluationContext(),
evaluation_context: EvaluationContext = EvaluationContext(),
) -> FlagResolutionDetails[typing.Union[dict, list]]:
return self._resolve(flag_key, FlagType.OBJECT, default_value, context)
return self._resolve(
flag_key, FlagType.OBJECT, default_value, evaluation_context
)

def _resolve(
self,
flag_key: str,
flag_type: FlagType,
default_value: typing.Any,
context: EvaluationContext,
evaluation_context: EvaluationContext,
) -> FlagResolutionDetails:
try:
flag = self._get_flags(context).get_flag(flag_key)
flag = self._get_flags(evaluation_context).get_flag(flag_key)
except FlagsmithClientError as e:
raise FlagsmithProviderError(
error_code=ErrorCode.GENERAL,
Expand Down Expand Up @@ -123,10 +133,10 @@ def _resolve(
% (flag_key, flag_type.value)
)

def _get_flags(self, context: EvaluationContext = EvaluationContext()):
if targeting_key := context.targeting_key:
def _get_flags(self, evaluation_context: EvaluationContext = EvaluationContext()):
if targeting_key := evaluation_context.targeting_key:
return self._client.get_identity_flags(
identifier=targeting_key,
traits=context.attributes.get("traits", {}),
traits=evaluation_context.attributes.get("traits", {}),
)
return self._client.get_environment_flags()
2 changes: 1 addition & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ readme = "README.md"
requires-python = ">=3.9,<4.0"
dependencies = [
"flagsmith (>=3.6.0,<4.0.0)",
"openfeature-sdk (>=0.6.0,<1.0.0)",
"openfeature-sdk (>=0.6.0,<0.9.0)",
]

[tool.poetry]
Expand Down
2 changes: 1 addition & 1 deletion tests/test_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ def test_identity_flags_are_used_if_targeting_key_provided(
result = provider.resolve_string_details(
flag_key=key,
default_value=default_value,
context=EvaluationContext(
evaluation_context=EvaluationContext(
targeting_key=targeting_key, attributes={"traits": traits}
),
)
Expand Down