Skip to content

Commit 2f40c6a

Browse files
Narek MkhitaryanNarek Mkhitaryan
authored andcommitted
delete benchmark function
1 parent 4c446f1 commit 2f40c6a

File tree

10 files changed

+4
-336
lines changed

10 files changed

+4
-336
lines changed

docs/source/api_reference/helpers.rst

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,5 +29,4 @@ ________________________
2929
Utility functions
3030
--------------------------------
3131

32-
.. autofunction:: superannotate.SAClient.consensus
33-
.. autofunction:: superannotate.SAClient.benchmark
32+
.. autofunction:: superannotate.SAClient.consensus

docs/source/userguide/quickstart.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ It can be installed on Ubuntu with:
2323
2424
sudo apt-get install ffmpeg
2525
26-
For Windows and Mac OS based installations to use :py:obj:`benchmark` and :py:obj:`consensus`
27-
functions you might also need to install beforehand :py:obj:`shapely` package,
26+
For Windows and Mac OS based installations to use :py:obj:`consensus`
27+
function you might also need to install beforehand :py:obj:`shapely` package,
2828
which we found to work properly only under Anaconda distribution, with:
2929

3030
.. code-block:: bash

docs/source/userguide/utilities.rst

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -188,10 +188,3 @@ The points are colored according to class name. Each annotator is represented wi
188188

189189
.. image:: images/consensus_scatter.png
190190

191-
192-
Computing benchmark scores for instances between ground truth project and given project list
193-
============
194-
195-
196-
Benchmark is a tool to compare the quallity of the annotations of the same image that is present in several projects with
197-
the ground truth annotation of the same image that is in a separate project.

src/superannotate/lib/app/analytics/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -449,7 +449,7 @@ def consensus(df, item_name, annot_type):
449449
from shapely.geometry import Point, Polygon, box
450450
except ImportError:
451451
raise ImportError(
452-
"To use superannotate.benchmark or superannotate.consensus functions please install shapely package."
452+
"To use superannotate.consensus function please install shapely package."
453453
)
454454

455455
image_df = df[df["itemName"] == item_name]

src/superannotate/lib/app/interface/sdk_interface.py

Lines changed: 0 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
import json
55
import os
66
import sys
7-
import tempfile
87
import warnings
98
from pathlib import Path
109
from typing import Callable
@@ -1794,74 +1793,6 @@ def download_model(self, model: MLModel, output_dir: Union[str, Path]):
17941793
else:
17951794
return BaseSerializer(res.data).serialize()
17961795

1797-
def benchmark(
1798-
self,
1799-
project: Union[NotEmptyStr, dict],
1800-
gt_folder: str,
1801-
folder_names: List[NotEmptyStr],
1802-
export_root: Optional[Union[str, Path]] = None,
1803-
image_list=None,
1804-
annot_type: Optional[ANNOTATION_TYPE] = "bbox",
1805-
show_plots=False,
1806-
):
1807-
"""Computes benchmark score for each instance of given images that are present both gt_project_name project
1808-
and projects in folder_names list:
1809-
1810-
:param project: project name or metadata of the project
1811-
:type project: str or dict
1812-
:param gt_folder: project folder name that contains the ground truth annotations
1813-
:type gt_folder: str
1814-
:param folder_names: list of folder names in the project for which the scores will be computed
1815-
:type folder_names: list of str
1816-
:param export_root: root export path of the projects
1817-
:type export_root: Path-like (str or Path)
1818-
:param image_list: List of image names from the projects list that must be used. If None,
1819-
then all images from the projects list will be used. Default: None
1820-
:type image_list: list
1821-
:param annot_type: Type of annotation instances to consider.
1822-
Available candidates are: ["bbox", "polygon", "point"]
1823-
:type annot_type: str
1824-
:param show_plots: If True, show plots based on results of consensus computation. Default: False
1825-
:type show_plots: bool
1826-
1827-
:return: Pandas DateFrame with columns
1828-
(creatorEmail, QA, imageName, instanceId, className, area, attribute, folderName, score)
1829-
:rtype: pandas DataFrame
1830-
"""
1831-
project_name = project
1832-
if isinstance(project, dict):
1833-
project_name = project["name"]
1834-
1835-
project = self.controller.projects.get_by_name(project_name).data
1836-
if project.type not in constants.ProjectType.images:
1837-
raise AppException(LIMITED_FUNCTIONS[project.type])
1838-
1839-
if not export_root:
1840-
with tempfile.TemporaryDirectory() as temp_dir:
1841-
response = self.controller.benchmark(
1842-
project_name=project_name,
1843-
ground_truth_folder_name=gt_folder,
1844-
folder_names=folder_names,
1845-
export_root=temp_dir,
1846-
image_list=image_list,
1847-
annot_type=annot_type,
1848-
show_plots=show_plots,
1849-
)
1850-
1851-
else:
1852-
response = self.controller.benchmark(
1853-
project_name=project_name,
1854-
ground_truth_folder_name=gt_folder,
1855-
folder_names=folder_names,
1856-
export_root=export_root,
1857-
image_list=image_list,
1858-
annot_type=annot_type,
1859-
show_plots=show_plots,
1860-
)
1861-
if response.errors:
1862-
raise AppException(response.errors)
1863-
return response.data
1864-
18651796
def consensus(
18661797
self,
18671798
project: NotEmptyStr,

src/superannotate/lib/core/usecases/models.py

Lines changed: 0 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,7 @@
1515
import requests
1616
from botocore.exceptions import ClientError
1717
from lib.app.analytics.aggregators import DataAggregator
18-
from lib.app.analytics.common import aggregate_image_annotations_as_df
1918
from lib.app.analytics.common import consensus
20-
from lib.app.analytics.common import consensus_plot
2119
from lib.core.conditions import Condition
2220
from lib.core.conditions import CONDITION_EQ as EQ
2321
from lib.core.entities import FolderEntity
@@ -320,99 +318,6 @@ def execute(self):
320318
return self._response
321319

322320

323-
class BenchmarkUseCase(BaseUseCase):
324-
def __init__(
325-
self,
326-
project: ProjectEntity,
327-
ground_truth_folder_name: str,
328-
folder_names: list,
329-
export_dir: str,
330-
image_list: list,
331-
annotation_type: str,
332-
show_plots: bool,
333-
):
334-
super().__init__()
335-
self._project = project
336-
self._ground_truth_folder_name = ground_truth_folder_name
337-
self._folder_names = folder_names
338-
self._export_dir = export_dir
339-
self._image_list = image_list
340-
self._annotation_type = annotation_type
341-
self._show_plots = show_plots
342-
343-
def execute(self):
344-
project_df = aggregate_image_annotations_as_df(self._export_dir)
345-
gt_project_df = project_df[
346-
project_df["folderName"] == self._ground_truth_folder_name
347-
]
348-
benchmark_dfs = []
349-
for folder_name in self._folder_names:
350-
folder_df = project_df[project_df["folderName"] == folder_name]
351-
project_gt_df = pd.concat([folder_df, gt_project_df])
352-
project_gt_df = project_gt_df[project_gt_df["instanceId"].notna()]
353-
354-
if self._image_list is not None:
355-
project_gt_df = project_gt_df.loc[
356-
project_gt_df["itemName"].isin(self._image_list)
357-
]
358-
359-
project_gt_df.query("type == '" + self._annotation_type + "'", inplace=True)
360-
361-
project_gt_df = project_gt_df.groupby(
362-
["itemName", "instanceId", "folderName"]
363-
)
364-
365-
def aggregate_attributes(instance_df):
366-
def attribute_to_list(attribute_df):
367-
attribute_names = list(attribute_df["attributeName"])
368-
attribute_df["attributeNames"] = len(attribute_df) * [
369-
attribute_names
370-
]
371-
return attribute_df
372-
373-
attributes = None
374-
if not instance_df["attributeGroupName"].isna().all():
375-
attrib_group_name = instance_df.groupby("attributeGroupName")[
376-
["attributeGroupName", "attributeName"]
377-
].apply(attribute_to_list)
378-
attributes = dict(
379-
zip(
380-
attrib_group_name["attributeGroupName"],
381-
attrib_group_name["attributeNames"],
382-
)
383-
)
384-
385-
instance_df.drop(
386-
["attributeGroupName", "attributeName"], axis=1, inplace=True
387-
)
388-
instance_df.drop_duplicates(
389-
subset=["itemName", "instanceId", "folderName"], inplace=True
390-
)
391-
instance_df["attributes"] = [attributes]
392-
return instance_df
393-
394-
project_gt_df = project_gt_df.apply(aggregate_attributes).reset_index(
395-
drop=True
396-
)
397-
unique_images = set(project_gt_df["itemName"])
398-
all_benchmark_data = []
399-
for image_name in unique_images:
400-
image_data = image_consensus(
401-
project_gt_df, image_name, self._annotation_type
402-
)
403-
all_benchmark_data.append(pd.DataFrame(image_data))
404-
benchmark_project_df = pd.concat(all_benchmark_data, ignore_index=True)
405-
benchmark_project_df = benchmark_project_df[
406-
benchmark_project_df["folderName"] == folder_name
407-
]
408-
benchmark_dfs.append(benchmark_project_df)
409-
benchmark_df = pd.concat(benchmark_dfs, ignore_index=True)
410-
if self._show_plots:
411-
consensus_plot(benchmark_df, self._folder_names)
412-
self._response.data = benchmark_df
413-
return self._response
414-
415-
416321
class ConsensusUseCase(BaseUseCase):
417322
def __init__(
418323
self,

src/superannotate/lib/infrastructure/controller.py

Lines changed: 0 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -1147,48 +1147,6 @@ def download_export(
11471147
)
11481148
return use_case.execute()
11491149

1150-
def benchmark(
1151-
self,
1152-
project_name: str,
1153-
ground_truth_folder_name: str,
1154-
folder_names: List[str],
1155-
export_root: str,
1156-
image_list: List[str],
1157-
annot_type: str,
1158-
show_plots: bool,
1159-
):
1160-
project = self.get_project(project_name)
1161-
export_response = self.prepare_export(
1162-
project.name,
1163-
folder_names=folder_names,
1164-
include_fuse=False,
1165-
only_pinned=False,
1166-
)
1167-
if export_response.errors:
1168-
return export_response
1169-
1170-
response = usecases.DownloadExportUseCase(
1171-
service_provider=self.service_provider,
1172-
project=project,
1173-
export_name=export_response.data["name"],
1174-
folder_path=export_root,
1175-
extract_zip_contents=True,
1176-
to_s3_bucket=False,
1177-
reporter=self.get_default_reporter(),
1178-
).execute()
1179-
if response.errors:
1180-
raise AppException(response.errors)
1181-
use_case = usecases.BenchmarkUseCase(
1182-
project=project,
1183-
ground_truth_folder_name=ground_truth_folder_name,
1184-
folder_names=folder_names,
1185-
export_dir=export_root,
1186-
image_list=image_list,
1187-
annotation_type=annot_type,
1188-
show_plots=show_plots,
1189-
)
1190-
return use_case.execute()
1191-
11921150
def consensus(
11931151
self,
11941152
project_name: str,

tests/integration/test_benchmark.py

Lines changed: 0 additions & 110 deletions
This file was deleted.

0 commit comments

Comments
 (0)