From c001fef9dab26de958c0e71b3a8cbc5e43f339bb Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 27 May 2022 15:39:50 +0200 Subject: [PATCH 01/58] added sctransform vst wrapper --- batchglm/train/numpy/base_glm/estimator.py | 95 +++++++++++++++++ .../train/numpy/base_glm/model_container.py | 10 ++ .../numpy/base_glm/training_strategies.py | 23 +++- batchglm/train/numpy/base_glm/vst.py | 100 ++++++++++++++++++ 4 files changed, 226 insertions(+), 2 deletions(-) create mode 100644 batchglm/train/numpy/base_glm/vst.py diff --git a/batchglm/train/numpy/base_glm/estimator.py b/batchglm/train/numpy/base_glm/estimator.py index 0ffe9006..a30aa5b4 100644 --- a/batchglm/train/numpy/base_glm/estimator.py +++ b/batchglm/train/numpy/base_glm/estimator.py @@ -17,9 +17,17 @@ from .external import pkg_constants from .model_container import BaseModelContainer from .training_strategies import TrainingStrategies +from .vst import bw_kde, geometric_mean, is_outlier, log_geometric_mean logger = logging.getLogger("batchglm") +try: + from skfda.preprocessing.smoothing.kernel_smoothers import NadarayaWatsonSmoother as NWSmoother + from skfda.representation.grid import FDataGrid +except ImportError: + FDataGrid = None + NWSmoother = None + class EstimatorGlm(metaclass=abc.ABCMeta): """ @@ -86,8 +94,95 @@ def train_sequence(self, training_strategy, **kwargs): "overrding %s from training strategy with value %s with new value %s\n" % (x, str(d[x]), str(kwargs[x])) ) + if "dispersion_smoothing" in d: + if d["dispersion_smoothing"] == "sctransform": + if FDataGrid is None or NWSmoother is None: + logger.error("Missing optional dependency scikit-fda.") self.train(**d, **kwargs) logger.debug("Training sequence #%d complete", idx + 1) + if "dispersion_smoothing" in d: + if d["dispersion_smoothing"] == "sctransform": + self.perform_vst() + elif d["dispersion_smoothing"] == "edger": + raise NotImplementedError() + elif d["dispersion_smoothing"] == "deseq2": + raise NotImplementedError() + + def perform_vst( + self, + theta_regularization: str = "od_factor", + use_geometric_mean: bool = True, + gmean_eps: float = 1.0, + bw_adjust: float = 1.5, + ): + logger.info("Performing Dispersion smoothing with flavour sctransform...") + # compute geometric log mean counts + if use_geometric_mean: + genes_log_gmean = np.log10(geometric_mean(self.model_container.x, axis=0, eps=gmean_eps)) + else: + genes_log_gmean = np.log10(np.mean(self.model_container.x, axis=0)) + if isinstance(genes_log_gmean, dask.array.core.Array): + genes_log_gmean = genes_log_gmean.compute() + + # specify which kind of regularization is performed + scale_param = np.exp(self.model_container.theta_scale[0]) # TODO check if this is always correct + if theta_regularization == "log_theta": + dispersion_par = np.log10(scale_param) + elif theta_regularization == "od_factor": + dispersion_par = np.log10(1 + np.power(10, genes_log_gmean) / scale_param) + else: + raise ValueError(f"Unrecognized regularization method {theta_regularization}") + if isinstance(dispersion_par, dask.array.core.Array): + dispersion_par = dispersion_par.compute() + + # downsample because KDE and bandwidth selection would take too long if performed on the entire dataset. + # It is sufficient to get a general idea of the data distribution + if len(genes_log_gmean) > 2000: + logger.info("Sampling 2000 random features...") + idx = np.random.choice(np.arange(len(genes_log_gmean)), 2000) + genes_log_gmean_filtered = genes_log_gmean[idx] + dispersion_par_filtered = dispersion_par[idx] + else: + dispersion_par_filtered = dispersion_par + genes_log_gmean_filtered = genes_log_gmean + + # check for outliers in the function f(genes_log_gmean_filtered) = dispersion_par_filtered and remove them + logger.info("Searching for outliers...") + outliers = is_outlier(model_param=dispersion_par_filtered, means=genes_log_gmean_filtered) + outliers = outliers["outlier"].values + if np.any(outliers): + # toss out the outliers + logger.info(f"Excluded {np.sum(outliers)} outliers.") + genes_log_gmean_filtered = genes_log_gmean_filtered[~outliers] + dispersion_par_filtered = dispersion_par_filtered[~outliers] + + # define a data grid with the downsampled and filtered values + domain_range = genes_log_gmean_filtered.min(), genes_log_gmean_filtered.max() + fd = FDataGrid( + data_matrix=dispersion_par_filtered, grid_points=genes_log_gmean_filtered, domain_range=domain_range + ) + # select bandwidth to be used for smoothing + bw = bw_kde(genes_log_gmean_filtered) * bw_adjust + # bandwidth = FFTKDE(kernel='gaussian', bw='ISJ').fit(fd).bw * 0.37 * 3 + + # define points for evaluation. Ensure x_points is within the range of genes_log_gmean_filtered + x_points = np.clip(genes_log_gmean, *domain_range) + # smooth the dispersion_par + logger.info("Performing smoothing...") + smoother = NWSmoother(smoothing_parameter=bw, output_points=x_points) + dispersion_par_smoothed = smoother.fit_transform(fd).data_matrix[0, :, 0] + + # transform back to scale param + if theta_regularization == "log_theta": + smoothed_scale = np.power(10, dispersion_par_smoothed) + elif theta_regularization == "od_factor": + smoothed_scale = np.power(10, genes_log_gmean) / (np.power(10, dispersion_par_smoothed) - 1) + else: + raise ValueError(f"Unrecognized regularization method {theta_regularization}") + + # store smoothed dispersion_par + self.model_container.theta_scale_smoothed = np.log(smoothed_scale) + logger.info("Done with dispersion smoothing.") def initialize(self): pass diff --git a/batchglm/train/numpy/base_glm/model_container.py b/batchglm/train/numpy/base_glm/model_container.py index 4483b46c..9f98c5df 100644 --- a/batchglm/train/numpy/base_glm/model_container.py +++ b/batchglm/train/numpy/base_glm/model_container.py @@ -162,6 +162,16 @@ def theta_scale_j_setter(self, value, j): else: self.params[self.npar_location :, j] = value + # dispersion_smoothing + + @property + def theta_scale_smoothed(self) -> np.ndarray: + return self._theta_scale_smoothed + + @theta_scale_smoothed.setter + def theta_scale_smoothed(self, value): + self._theta_scale_smoothed = value + # jacobians @abc.abstractmethod diff --git a/batchglm/train/numpy/base_glm/training_strategies.py b/batchglm/train/numpy/base_glm/training_strategies.py index 6b8477bd..855ca99d 100644 --- a/batchglm/train/numpy/base_glm/training_strategies.py +++ b/batchglm/train/numpy/base_glm/training_strategies.py @@ -13,6 +13,25 @@ class TrainingStrategies(Enum): "max_iter_scale": 1000, }, ] - GD = [ - {"max_steps": 1000, "method_scale": "gd", "update_scale_freq": 5, "ftol_scale": 1e-6, "max_iter_scale": 100}, + GD = ( + [ + { + "max_steps": 1000, + "method_scale": "gd", + "update_scale_freq": 5, + "ftol_scale": 1e-6, + "max_iter_scale": 100, + }, + ], + ) + + SCTRANSFORM = [ + { + "max_steps": 1000, + "method_scale": "brent", + "update_scale_freq": 5, + "ftol_scale": 1e-6, + "max_iter_scale": 1000, + "dispersion_smoothing": "sctransform", + }, ] diff --git a/batchglm/train/numpy/base_glm/vst.py b/batchglm/train/numpy/base_glm/vst.py new file mode 100644 index 00000000..2e0bf091 --- /dev/null +++ b/batchglm/train/numpy/base_glm/vst.py @@ -0,0 +1,100 @@ +from typing import Optional, Union + +import numpy as np +import pandas as pd +from scipy.stats import gaussian_kde + + +def log_geometric_mean(a: np.ndarray, axis: Optional[int] = None, eps: Union[int, float] = 1.0): + """ + Returns the log of the geometric mean defined as mean(log(a + eps)). + :param a: np.ndarray containing the data + :param axis: the axis over which the log geometric mean is calculated. If None, computes the log geometric mean + over the entire data. + :param eps: small value added to each value in order to avoid computing log(0). Default is 1, i.e. log(x) is + equivalent to np.log1p(x). + :return np.ndarray: An array with the same length as the axis not used for computation containing the log geometric + means. + """ + log_a = np.log(a + eps) + return np.mean(log_a, axis=axis) + + +def geometric_mean(a: np.ndarray, axis: Optional[int] = None, eps: Union[int, float] = 1.0): + r""" + Return the geometric mean defined as (\prod_{i=1}^{n}(x_{i} + eps))^{1/n} - eps computed in log space as + exp(1/n(sum_{i=1}^{n}(ln(x_{i} + eps)))) - eps for numerical stability. + :param a: np.ndarray containing the data + :param axis: the axis over which the geometric mean is calculated. If None, computes the geometric mean over the + entire data. + :param eps: small value added to each value in order to avoid computing log(0). Default is 1, i.e. log(x) is + equivalent to np.log1p(x). + :return np.ndarray: An array with the same length as the axis not used for computation containing the geometric + means. + """ + log_geo_mean = log_geometric_mean(a, axis, eps) + return np.exp(log_geo_mean) - eps + + +def bw_kde(x: np.ndarray, method: str = "silverman"): + """ + Performs gaussian kernel density estimation using a specified method and returns the estimated bandwidth. + :param x: np.ndarray of values for which the KDE is performed. + :param method: The method used for estimating an optimal bandwidth, see scipy gaussian_kde documentation for + available methods. + :return float: The estimated bandwidth + """ + return gaussian_kde(x, bw_method=method).factor * 0.37 + # return FFTKDE(kernel=kernel, bw=method).fit(x).bw + + +def robust_scale(x: pd.Series, c: float = 1.4826, eps: Optional[Union[int, float]] = None): + r""" + Compute a scale param using the formula scale_{i} = (x_{i} - median(x)) / mad(x) where + mad = c * median(abs(x_{i} - median(x))) + eps is the median absolute deviation. + This function is derived from sctransform's implementation of robust scale: + https://github.com/satijalab/sctransform/blob/7e9c9557222d1e34416e8854ed22da580e533e78/R/utils.R#L162-L163 + :param x: pd.Series containing the values used to compute the scale. + :param c: Scaling constant used in the computation of mad. The default value is equivalent to the R implementation + of mad: https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/mad + :param eps: Small value added to the mad. If None, it defaults to `np.finfo(float).eps`. This should be equivalent + to sctransform's `.Machine$double.eps`. + + :return pd.Series containing the computed scales. + """ + if eps is None: + eps = np.finfo(float).eps + + deviation = x - x.median() + mad = c * deviation.abs().median() + eps + scale = deviation / mad + return scale + + +def is_outlier(model_param: np.ndarray, means: np.ndarray, threshold: Union[int, float] = 10): + """ + Compute outlier genes based on deviation of model_param from mean counts in individual bins. + This function is derived from sctransform's implementation of is_outlier: + https://github.com/satijalab/sctransform/blob/7e9c9557222d1e34416e8854ed22da580e533e78/R/utils.R#L120-L129 + :param model_param: np.ndarray of a specific model_param. This can be the intercept, any batch/condition or + loc/scale param. This is the param based on which it is determined if a specific gene is an outlier. + :param means: np.ndarray of genewise mean counts. The means are used to determine bins within outlier detection of + the model_param is performed. + :param threshold: The minimal score required for a model_param to be considered an outlier. + + :return np.ndarray of booleans indicating if a particular gene is an outlier (True) or not (False). + """ + bin_width = (means.max() - means.min()) * bw_kde(means) / 2 + eps = np.finfo(float).eps * 10 + + breaks1 = np.arange(means.min() - eps, means.max() + bin_width, bin_width) + breaks2 = np.arange(means.min() - eps - bin_width / 2, means.max() + bin_width, bin_width) + bins1 = pd.cut(means, bins=breaks1) + bins2 = pd.cut(means, bins=breaks2) + + df_tmp = pd.DataFrame({"param": model_param, "bins1": bins1, "bins2": bins2}) + df_tmp["score1"] = df_tmp.groupby("bins1")["param"].transform(robust_scale) + df_tmp["score2"] = df_tmp.groupby("bins2")["param"].transform(robust_scale) + df_tmp["outlier"] = df_tmp[["score1", "score2"]].abs().min(axis=1) > threshold + + return df_tmp From 9f768db1044a5e17f3b23707b7c738c6730f6679 Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 27 May 2022 16:03:44 +0200 Subject: [PATCH 02/58] fixed trailing whitespace --- docs/api/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api/index.rst b/docs/api/index.rst index f49487f5..721703dd 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -23,7 +23,7 @@ where `xxxxxx` is the backend desired, like `tf2`, `numpy` or `statsmodel`. For example, here is a short snippet to give a sense of how the API might work:: from batchglm.models.glm_nb import Model as NBModel - from batchglm.train.numpy.glm_nb import Estimator as NBEstimator + from batchglm.train.numpy.glm_nb import Estimator as NBEstimator from batchglm.utils.input import InputDataGLM input_data = InputDataGLM(data=data_matrix, design_loc=_design_loc, design_scale=_design_scale, as_dask=as_dask) From 3fe55245de4db75ee726e91f62de1917896af334 Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 27 May 2022 16:04:17 +0200 Subject: [PATCH 03/58] removed return statements --- tests/numpy/test_accuracy.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/numpy/test_accuracy.py b/tests/numpy/test_accuracy.py index 78661b89..f514560a 100644 --- a/tests/numpy/test_accuracy.py +++ b/tests/numpy/test_accuracy.py @@ -64,7 +64,7 @@ def _test_accuracy(self, estimator: EstimatorGlm) -> bool: class TestAccuracyNB(TestAccuracy): - def test_accuracy_rand_theta(self) -> bool: + def test_accuracy_rand_theta(self): """ This tests randTheta simulated data with 2 conditions and 4 batches sparse and dense. """ @@ -94,7 +94,7 @@ def test_accuracy_rand_theta(self) -> bool: ) assert self._test_accuracy(sparse_estimator) - def test_accuracy_const_theta(self) -> bool: + def test_accuracy_const_theta(self): """ This tests constTheta simulated data with 2 conditions and 0 batches sparse and dense. """ @@ -113,7 +113,7 @@ def test_accuracy_const_theta(self) -> bool: sparse_estimator = get_estimator( noise_model="nb", model=sparse_model, init_location="standard", init_scale="standard" ) - return self._test_accuracy(sparse_estimator) + assert self._test_accuracy(sparse_estimator) dense_estimator = get_estimator( noise_model="nb", model=dense_model, init_location="standard", init_scale="standard", quick_scale=True @@ -123,7 +123,7 @@ def test_accuracy_const_theta(self) -> bool: sparse_estimator = get_estimator( noise_model="nb", model=sparse_model, init_location="standard", init_scale="standard", quick_scale=True ) - return self._test_accuracy(sparse_estimator) + assert self._test_accuracy(sparse_estimator) if __name__ == "__main__": From 74155f0ba339f09f3bcd775be3149541c7427e94 Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 27 May 2022 16:16:04 +0200 Subject: [PATCH 04/58] cast np.finfo to float explicitly --- batchglm/train/numpy/base_glm/vst.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/batchglm/train/numpy/base_glm/vst.py b/batchglm/train/numpy/base_glm/vst.py index 2e0bf091..6068f40a 100644 --- a/batchglm/train/numpy/base_glm/vst.py +++ b/batchglm/train/numpy/base_glm/vst.py @@ -63,7 +63,7 @@ def robust_scale(x: pd.Series, c: float = 1.4826, eps: Optional[Union[int, float :return pd.Series containing the computed scales. """ if eps is None: - eps = np.finfo(float).eps + eps = float(np.finfo(float).eps) deviation = x - x.median() mad = c * deviation.abs().median() + eps From 47c8341b8209139542e722ceb6d4de149006af32 Mon Sep 17 00:00:00 2001 From: picciama Date: Sun, 19 Jun 2022 11:16:32 +0200 Subject: [PATCH 05/58] resolved commments by Ilan Gold in #145 --- batchglm/train/numpy/base_glm/estimator.py | 69 ++++++++++++---------- batchglm/train/numpy/base_glm/vst.py | 3 +- 2 files changed, 39 insertions(+), 33 deletions(-) diff --git a/batchglm/train/numpy/base_glm/estimator.py b/batchglm/train/numpy/base_glm/estimator.py index a30aa5b4..c6a0a934 100644 --- a/batchglm/train/numpy/base_glm/estimator.py +++ b/batchglm/train/numpy/base_glm/estimator.py @@ -94,19 +94,28 @@ def train_sequence(self, training_strategy, **kwargs): "overrding %s from training strategy with value %s with new value %s\n" % (x, str(d[x]), str(kwargs[x])) ) - if "dispersion_smoothing" in d: - if d["dispersion_smoothing"] == "sctransform": - if FDataGrid is None or NWSmoother is None: - logger.error("Missing optional dependency scikit-fda.") + + ds_method = d.get("dispersion_smoothing", "no_smoothing") + + # perform checks before starting the training procedure + if ds_method == "sctransform" and (FDataGrid is None or NWSmoother is None): + logger.error("Missing optional dependency scikit-fda.") + if ds_method not in ["sctransform"]: + raise AssertionError(f"Unknown dispersion smoothing method: {ds_method}") + + # now start the training self.train(**d, **kwargs) logger.debug("Training sequence #%d complete", idx + 1) - if "dispersion_smoothing" in d: - if d["dispersion_smoothing"] == "sctransform": - self.perform_vst() - elif d["dispersion_smoothing"] == "edger": - raise NotImplementedError() - elif d["dispersion_smoothing"] == "deseq2": - raise NotImplementedError() + + # perform dispersion smoothing after training if specified + if ds_method == "no_smoothing": + continue + elif ds_method == "sctransform": + self.perform_vst() + elif ds_method == "edger": + raise NotImplementedError() + elif ds_method == "deseq2": + raise NotImplementedError() def perform_vst( self, @@ -118,18 +127,18 @@ def perform_vst( logger.info("Performing Dispersion smoothing with flavour sctransform...") # compute geometric log mean counts if use_geometric_mean: - genes_log_gmean = np.log10(geometric_mean(self.model_container.x, axis=0, eps=gmean_eps)) + featurewise_means = np.log10(geometric_mean(self.model_container.x, axis=0, eps=gmean_eps)) else: - genes_log_gmean = np.log10(np.mean(self.model_container.x, axis=0)) - if isinstance(genes_log_gmean, dask.array.core.Array): - genes_log_gmean = genes_log_gmean.compute() + featurewise_means = np.log10(np.mean(self.model_container.x, axis=0)) + if isinstance(featurewise_means, dask.array.core.Array): + featurewise_means = featurewise_means.compute() # specify which kind of regularization is performed scale_param = np.exp(self.model_container.theta_scale[0]) # TODO check if this is always correct if theta_regularization == "log_theta": dispersion_par = np.log10(scale_param) elif theta_regularization == "od_factor": - dispersion_par = np.log10(1 + np.power(10, genes_log_gmean) / scale_param) + dispersion_par = np.log10(1 + np.power(10, featurewise_means) / scale_param) else: raise ValueError(f"Unrecognized regularization method {theta_regularization}") if isinstance(dispersion_par, dask.array.core.Array): @@ -137,36 +146,34 @@ def perform_vst( # downsample because KDE and bandwidth selection would take too long if performed on the entire dataset. # It is sufficient to get a general idea of the data distribution - if len(genes_log_gmean) > 2000: + if len(featurewise_means) > 2000: logger.info("Sampling 2000 random features...") - idx = np.random.choice(np.arange(len(genes_log_gmean)), 2000) - genes_log_gmean_filtered = genes_log_gmean[idx] + idx = np.random.choice(np.arange(len(featurewise_means)), 2000) + featurewise_means_filtered = featurewise_means[idx] dispersion_par_filtered = dispersion_par[idx] else: dispersion_par_filtered = dispersion_par - genes_log_gmean_filtered = genes_log_gmean + featurewise_means_filtered = featurewise_means - # check for outliers in the function f(genes_log_gmean_filtered) = dispersion_par_filtered and remove them + # check for outliers in the function f(featurewise_means_filtered) = dispersion_par_filtered and remove them logger.info("Searching for outliers...") - outliers = is_outlier(model_param=dispersion_par_filtered, means=genes_log_gmean_filtered) - outliers = outliers["outlier"].values + outliers = is_outlier(model_param=dispersion_par_filtered, means=featurewise_means_filtered) if np.any(outliers): # toss out the outliers logger.info(f"Excluded {np.sum(outliers)} outliers.") - genes_log_gmean_filtered = genes_log_gmean_filtered[~outliers] + featurewise_means_filtered = featurewise_means_filtered[~outliers] dispersion_par_filtered = dispersion_par_filtered[~outliers] # define a data grid with the downsampled and filtered values - domain_range = genes_log_gmean_filtered.min(), genes_log_gmean_filtered.max() + domain_range = featurewise_means_filtered.min(), featurewise_means_filtered.max() fd = FDataGrid( - data_matrix=dispersion_par_filtered, grid_points=genes_log_gmean_filtered, domain_range=domain_range + data_matrix=dispersion_par_filtered, grid_points=featurewise_means_filtered, domain_range=domain_range ) # select bandwidth to be used for smoothing - bw = bw_kde(genes_log_gmean_filtered) * bw_adjust - # bandwidth = FFTKDE(kernel='gaussian', bw='ISJ').fit(fd).bw * 0.37 * 3 + bw = bw_kde(featurewise_means_filtered) * bw_adjust - # define points for evaluation. Ensure x_points is within the range of genes_log_gmean_filtered - x_points = np.clip(genes_log_gmean, *domain_range) + # define points for evaluation. Ensure x_points is within the range of featurewise_means_filtered + x_points = np.clip(featurewise_means, *domain_range) # smooth the dispersion_par logger.info("Performing smoothing...") smoother = NWSmoother(smoothing_parameter=bw, output_points=x_points) @@ -176,7 +183,7 @@ def perform_vst( if theta_regularization == "log_theta": smoothed_scale = np.power(10, dispersion_par_smoothed) elif theta_regularization == "od_factor": - smoothed_scale = np.power(10, genes_log_gmean) / (np.power(10, dispersion_par_smoothed) - 1) + smoothed_scale = np.power(10, featurewise_means) / (np.power(10, dispersion_par_smoothed) - 1) else: raise ValueError(f"Unrecognized regularization method {theta_regularization}") diff --git a/batchglm/train/numpy/base_glm/vst.py b/batchglm/train/numpy/base_glm/vst.py index 6068f40a..8220b2ae 100644 --- a/batchglm/train/numpy/base_glm/vst.py +++ b/batchglm/train/numpy/base_glm/vst.py @@ -45,7 +45,6 @@ def bw_kde(x: np.ndarray, method: str = "silverman"): :return float: The estimated bandwidth """ return gaussian_kde(x, bw_method=method).factor * 0.37 - # return FFTKDE(kernel=kernel, bw=method).fit(x).bw def robust_scale(x: pd.Series, c: float = 1.4826, eps: Optional[Union[int, float]] = None): @@ -97,4 +96,4 @@ def is_outlier(model_param: np.ndarray, means: np.ndarray, threshold: Union[int, df_tmp["score2"] = df_tmp.groupby("bins2")["param"].transform(robust_scale) df_tmp["outlier"] = df_tmp[["score1", "score2"]].abs().min(axis=1) > threshold - return df_tmp + return df_tmp["outlier"].to_numpy() From 6e347f7b0b263c84a174ac313fe083a78cdeaca9 Mon Sep 17 00:00:00 2001 From: picciama Date: Sun, 19 Jun 2022 11:22:03 +0200 Subject: [PATCH 06/58] bugfix: also check for "no_smoothing" --- batchglm/train/numpy/base_glm/estimator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/batchglm/train/numpy/base_glm/estimator.py b/batchglm/train/numpy/base_glm/estimator.py index c6a0a934..16457f29 100644 --- a/batchglm/train/numpy/base_glm/estimator.py +++ b/batchglm/train/numpy/base_glm/estimator.py @@ -100,7 +100,7 @@ def train_sequence(self, training_strategy, **kwargs): # perform checks before starting the training procedure if ds_method == "sctransform" and (FDataGrid is None or NWSmoother is None): logger.error("Missing optional dependency scikit-fda.") - if ds_method not in ["sctransform"]: + if ds_method not in ["sctransform", "no_smoothing"]: raise AssertionError(f"Unknown dispersion smoothing method: {ds_method}") # now start the training From 235e8a5139a786ff6e62e2813680d36c41e99378 Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 15 Jul 2022 14:02:13 +0200 Subject: [PATCH 07/58] added nbdeviance from edgeR --- batchglm/external/edgeR/nbinomDeviance.py | 68 +++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 batchglm/external/edgeR/nbinomDeviance.py diff --git a/batchglm/external/edgeR/nbinomDeviance.py b/batchglm/external/edgeR/nbinomDeviance.py new file mode 100644 index 00000000..33990749 --- /dev/null +++ b/batchglm/external/edgeR/nbinomDeviance.py @@ -0,0 +1,68 @@ +import numpy as np + +from .external import BaseModelContainer + + +def nb_deviance(model: BaseModelContainer): + + eps = 1e-8 + eps2 = 1e-4 + + y = model.x + mu = model.location + phi = 1 / model.scale + + y += eps + mu += eps + + if isinstance(phi, float): + phi = np.full(y.shape[1], phi) + + """ + Calculating the deviance using either the Poisson (small phi*mu), the Gamma (large) or NB (everything else). + Some additional work is put in to make the transitions between families smooth. + """ + + deviance = np.zeros_like(y, dtype=float) + + poisson_idx = phi < eps2 + + if np.any(poisson_idx): + deviance[:, poisson_idx] = _poisson_deviance(poisson_idx, y, mu, phi) + + non_poisson_idx = ~poisson_idx + y_non_poisson = y[:, non_poisson_idx] + mu_non_poisson = mu[:, non_poisson_idx] + phi_non_poisson = phi[non_poisson_idx] + product = mu_non_poisson * phi_non_poisson + + deviance[:, non_poisson_idx] = np.where( + product > 1e6, + _gamma_deviance(y_non_poisson, mu_non_poisson, product), + _nb_deviance(y_non_poisson, mu_non_poisson, phi_non_poisson), + ) + + return np.sum(deviance, axis=0) + + +def _poisson_deviance(idx, y, mu, phi): + + y_poisson = y[:, idx] + mu_poisson = mu[:, idx] + phi_poisson = phi + resid = y_poisson - mu_poisson + return 2 * ( + y_poisson * np.log(y_poisson / mu_poisson) + - resid + - 0.5 * resid * phi_poisson * (1 + phi_poisson * (2 / 3 * resid - y)) + ) + # return 2 * ( y * std::log(y/mu) - resid - 0.5*resid*resid*phi*(1+phi*(2/3*resid-y)) ); + + +def _gamma_deviance(y, mu, product): + return 2 * ((y - mu) / mu - np.log(y / mu)) * mu / (1 + product) + + +def _nb_deviance(y, mu, phi): + inv_phi = 1 / phi + return 2 * (y * np.log(y / mu) + (y + inv_phi) * np.log((mu + inv_phi) / (y + inv_phi))) From 66b3ed5aa0cd34b2d245ea68939965ce9755aeaa Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 15 Jul 2022 14:02:43 +0200 Subject: [PATCH 08/58] added aveLogCPM from edgeR --- batchglm/external/edgeR/aveLogCPM.py | 56 ++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 batchglm/external/edgeR/aveLogCPM.py diff --git a/batchglm/external/edgeR/aveLogCPM.py b/batchglm/external/edgeR/aveLogCPM.py new file mode 100644 index 00000000..bbaab976 --- /dev/null +++ b/batchglm/external/edgeR/aveLogCPM.py @@ -0,0 +1,56 @@ +import numpy as np + +from .glm_one_group import fit + + +def avg_log_cpm( + counts, + size_factors, + prior_count: int = 2, + dispersion: np.ndarray = None, + weights: np.ndarray = None, + maxit=50, + tolerance=1e-10, +): + # Compute average log2-cpm for each gene over all samples. + # This measure is designed to be used as the x-axis for all abundance-dependent trend analyses in edgeR. + # It is generally held fixed through an edgeR analysis. + # Original author: Gordon Smyth + # Created 25 Aug 2012. Last modified 19 Nov 2018. + + # Check dispersion + if dispersion is None: + dispersion = 0.05 + + # Check weights + if weights is None: + weights = 1.0 + + # Calling the C++ code + + adjusted_prior, adjusted_size_factors = add_priors(prior_count, size_factors) + # return adjusted_prior, adjusted_size_factors + x = np.array(counts, dtype=float) # model.x.copy() + x += adjusted_prior + output = fit( + data=x, + size_factors=adjusted_size_factors, + dispersion=dispersion, + weights=weights, + maxit=maxit, + tolerance=tolerance, + ) + output = (output + np.log(1e6)) / np.log(2) + + return output + + +def add_priors(prior_count: int, size_factors: np.ndarray): + + factors = np.exp(size_factors) + avg_factors = np.mean(factors) + adjusted_priors = prior_count * factors / avg_factors + + adjusted_size_factors = np.log(factors + 2 * adjusted_priors) + + return adjusted_priors, adjusted_size_factors From 17360a03d6102b94fb0dc5e136703672cf6e88f0 Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 15 Jul 2022 14:03:06 +0200 Subject: [PATCH 09/58] added calcNormFactors from edgeR --- batchglm/external/edgeR/calcNormFactors.py | 198 +++++++++++++++++++++ 1 file changed, 198 insertions(+) create mode 100644 batchglm/external/edgeR/calcNormFactors.py diff --git a/batchglm/external/edgeR/calcNormFactors.py b/batchglm/external/edgeR/calcNormFactors.py new file mode 100644 index 00000000..8e08ea90 --- /dev/null +++ b/batchglm/external/edgeR/calcNormFactors.py @@ -0,0 +1,198 @@ +from typing import Optional + +import numpy as np +from scipy.stats import rankdata + + +def calc_size_factors(x: np.ndarray, method: Optional[str] = None, *args, **kwargs): + assert ~np.any(np.isnan(x)), "Counts matrix must not contain NaN!" + lib_size = np.sum(x, axis=1, keepdims=True) + x = x[:, np.sum(x, axis=0) > 0] + + if method is None: + size_factors = np.ones_like(lib_size) + elif method == "TMM": + size_factors = _calc_factor_tmm(data=x, *args, **kwargs) + elif method == "TMMwsp": + size_factors = _calc_factor_tmmwsp(data=x, *args, **kwargs) + elif method == "RLE": + size_factors = _calc_factor_rle(data=x) + elif method == "upperquartile": + size_factors = _calc_factor_quantile(data=x, *args, **kwargs) + else: + raise ValueError(f"Method {method} not recognized.") + + # Factors should multiple to one + size_factors = size_factors / np.exp(np.mean(np.log(size_factors))) + return size_factors + + +def _calc_factor_rle(data: np.ndarray): + # Scale factors as in Anders et al (2010) + geometric_feature_means = np.exp(np.mean(np.log(data), axis=0)) + adjusted_data = data / geometric_feature_means + return np.median(adjusted_data[:, geometric_feature_means > 0], axis=1, keepdims=True) / np.sum( + data, axis=1, keepdims=True + ) + + +def _calc_factor_quantile(data, p=0.75): + # Generalized version of upper-quartile normalization + size_factors = np.quantile(data, q=p, axis=1, keepdims=True) + if np.min(size_factors) == 0: + print("Warning: One or more quantiles are zero.") + size_factors = size_factors / np.sum(data, axis=1, keepdims=True) + return size_factors + + +def _calc_factor_tmm( + data: np.ndarray, + ref_idx: Optional[int] = None, + logratio_trim: float = 0.3, + sum_trim: float = 0.05, + do_weighting: bool = True, + a_cutoff: float = -1e10, +): + # TMM between two libraries + + if ref_idx is None: + f75 = _calc_factor_quantile(data, p=0.75) + if np.median(f75) < 1e-20: + ref_idx = np.argmax(np.sum(np.sqrt(data), axis=1)) + else: + ref_idx = np.argmin(np.abs(f75 - np.mean(f75))) + + sample_sums = np.sum(data, axis=1, keepdims=True) + sum_normalized_data = data / sample_sums + log_ratios = np.log2(sum_normalized_data / sum_normalized_data[ref_idx]) + absolute_values = (np.log2(sum_normalized_data) + np.log2(sum_normalized_data[ref_idx])) / 2 + estimated_asymptotic_variance = (sample_sums - data) / sample_sums / data + estimated_asymptotic_variance += (sample_sums[ref_idx] - data[ref_idx]) / sample_sums[ref_idx] / data[ref_idx] + + # remove infinite values, cutoff based on aCutOff + finite_idx = np.isfinite(log_ratios) & np.isfinite(absolute_values) & (absolute_values > a_cutoff) + + size_factors = np.ones_like(sample_sums, dtype=float) + for i in range(data.shape[0]): + log_ratios_i = log_ratios[i, finite_idx[i]] + absolute_values_i = absolute_values[i, finite_idx[i]] + estimated_asymptotic_variance_i = estimated_asymptotic_variance[i, finite_idx[i]] + + if np.max(np.abs(log_ratios_i) < 1e-6): + continue + + # taken from the original mean() function + n = len(log_ratios_i) + lo_l = np.floor(n * logratio_trim) + 1 + hi_l = n + 1 - lo_l + lo_s = np.floor(n * sum_trim) + 1 + hi_s = n + 1 - lo_s + + keep = (rankdata(log_ratios_i) >= lo_l) & (rankdata(log_ratios_i) <= hi_l) + keep &= (rankdata(absolute_values_i) >= lo_s) & (rankdata(absolute_values_i) <= hi_s) + + if do_weighting: + size_factor_i = np.nansum(log_ratios_i[keep] / estimated_asymptotic_variance_i[keep]) + size_factor_i = size_factor_i / np.nansum(1 / estimated_asymptotic_variance_i[keep]) + else: + size_factor_i = np.nanmean(log_ratios_i[keep]) + + # Results will be missing if the two libraries share no features with positive counts + # In this case, return unity + if np.isnan(size_factor_i): + size_factor_i = 0 + size_factors[i] = 2 ** size_factor_i + return size_factors + + +def _calc_factor_tmmwsp( + data: np.ndarray, + ref_idx: Optional[int] = None, + logratio_trim: float = 0.3, + sum_trim: float = 0.05, + do_weighting: bool = True, + a_cutoff: float = -1e10, +): + # TMM with pairing of singleton positive counts between the obs and ref libraries + if ref_idx is None: + ref_idx = np.argmax(np.sum(np.sqrt(data), axis=1)) + eps = 1e-14 + sample_sums = np.sum(data, axis=1, keepdims=True) + + # Identify zero counts + n_pos = np.where(data > eps, 1, 0) + n_pos = 2 * n_pos + n_pos[ref_idx] + + size_factors = np.ones_like(sample_sums, dtype=float) + + for i in range(data.shape[0]): + # Remove double zeros and NAs + keep = np.where(n_pos[i] > 0) + data_i = data[i, keep] + ref_i = data[ref_idx, keep] + n_pos_i = n_pos[i, keep] + + # Pair up as many singleton positives as possible + # The unpaired singleton positives are discarded so that no zeros remain + zero_obs = n_pos_i == 1 + zero_ref = n_pos_i == 2 + k = zero_obs | zero_ref + n_eligible_singles = np.min((np.sum(zero_obs), np.sum(zero_ref))) + if n_eligible_singles > 0: + ref_i_k = np.sort(ref_i[k])[::-1][1:n_eligible_singles] + data_i_k = np.sort(data_i[k])[::-1][1:n_eligible_singles] + data_i = np.concatenate((data_i[~k], data_i_k)) + ref_i = np.concatenate((ref_i[~k], ref_i_k)) + else: + data_i = data_i[~k] + ref_i = ref_i[~k] + + # Any left? + n = len(data_i) + if n == 0: + continue + + # Compute M and A values + data_i_p = data_i / sample_sums[i] + ref_i_p = ref_i / sample_sums[ref_idx] + m = np.log2(data_i_p / ref_i_p) + a = 0.5 * np.log2(data_i_p * ref_i_p) + + # If M all zero, return 1 + if np.max(np.abs(m)) < 1e-6: + continue + + # M order, breaking ties by shrunk M + data_i_p_shrunk = (data_i + 0.5) / (sample_sums[i] + 0.5) + ref_i_p_shrunk = (ref_i + 0.5) / (sample_sums[ref_idx] + 0.5) + m_shrunk = np.log2(data_i_p_shrunk / ref_i_p_shrunk) + m_ordered = np.argsort( + np.array(list(zip(m, m_shrunk)), dtype={"names": ["m", "m_shrunk"], "formats": [m.dtype, m_shrunk.dtype]}) + ) + + # a order + a_ordered = np.argsort(a) + + # Trim + lo_m = int(n * logratio_trim) + 1 + hi_m = n + 1 - lo_m + keep_m = np.zeros(n, dtype=bool) + keep_m[m_ordered[lo_m:hi_m]] = True + lo_a = int(n * sum_trim) + 1 + hi_a = n + 1 - lo_a + keep_a = np.zeros(n, dtype=bool) + keep_a[a_ordered[lo_a:hi_a]] = True + keep = keep_a & keep_m + m = m[keep] + + # Average the m values + if do_weighting: + data_i_p = data_i_p[keep] + ref_i_p = ref_i_p[keep] + v = (1 - data_i_p) / data_i_p / sample_sums[i] + (1 - ref_i_p) / ref_i_p / sample_sums[ref_idx] + w = (1 + 1e-6) / (v + 1e-6) + size_factor_i = np.sum(w * m) / np.sum(w) + else: + size_factor_i = np.mean(m) + size_factors[i] = 2 ** size_factor_i + return size_factors From 6e1e530a2b34f95e1b45aeb33e8fa854cdb85235 Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 15 Jul 2022 14:03:49 +0200 Subject: [PATCH 10/58] added residDF from edgeR --- batchglm/external/edgeR/residDF.py | 41 ++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 batchglm/external/edgeR/residDF.py diff --git a/batchglm/external/edgeR/residDF.py b/batchglm/external/edgeR/residDF.py new file mode 100644 index 00000000..ac4cbb82 --- /dev/null +++ b/batchglm/external/edgeR/residDF.py @@ -0,0 +1,41 @@ +import numpy as np + + +def _combo_groups(truths: np.ndarray): + + # Function that returns a list of vectors of indices, + # where each vector refers to the rows with the same + # combination of TRUE/FALSE values in 'truths'. + + uniq_cols, rev_index = np.unique(truths, axis=1, return_inverse=True) + return [np.where(rev_index == i)[0] for i in range(uniq_cols.shape[1])] + + +def resid_df(zero: np.ndarray, design: np.ndarray): + """ + TODO + :param zero: boolean np.ndarray of shape (n_obs x n_features). It yields True if both + the data and the fitted value of a GLM where close to zero within a small margin. + :param design: the design matrix used in the GLM. + """ + + n_obs = zero.shape[0] + n_param = design.shape[1] + n_zero = np.sum(zero, axis=0) # the number of zeros per feature; shape = (n_features, ) + + degrees_of_freedom = np.full(zero.shape[1], n_obs - n_param) # shape = (n_features, ) + degrees_of_freedom[n_zero == n_obs] = 0 # 0 if only zeros for specific feature + + some_zero_idx = (n_zero > 0) & (n_zero < n_obs) # shape = (n_features, ) + if np.any(some_zero_idx): + some_zero = zero[:, some_zero_idx] # shape = (n_obs, len(np.where(some_zero_idx))) + groupings = _combo_groups(some_zero) # list of idx in some_zero with identical cols + + degrees_of_freedom_some_zero = n_obs - n_zero[some_zero_idx] + for group in groupings: + some_zero_group = some_zero[:, group[0]] # shape = (n_obs, ) + degrees_of_freedom_some_zero[group] -= np.linalg.matrix_rank(design[~some_zero_group]) + degrees_of_freedom_some_zero = np.max(degrees_of_freedom_some_zero, 0) + degrees_of_freedom[some_zero_idx] = degrees_of_freedom_some_zero + + return degrees_of_freedom From 21f6570dd1e2dc036624336dbac88015df88e674 Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 15 Jul 2022 14:04:11 +0200 Subject: [PATCH 11/58] added wleb wrapper from edgeR --- batchglm/external/edgeR/wleb.py | 69 +++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 batchglm/external/edgeR/wleb.py diff --git a/batchglm/external/edgeR/wleb.py b/batchglm/external/edgeR/wleb.py new file mode 100644 index 00000000..42517012 --- /dev/null +++ b/batchglm/external/edgeR/wleb.py @@ -0,0 +1,69 @@ +from typing import Any + +import numpy as np + +from .maximizeInterpolant import maximizeInterpolant + + +def wleb( + theta: Any, + loglik: Any, + prior_n: int = 5, + covariate: np.ndarray = None, + trend_method: str = "loess", + span: Any = None, + overall: bool = True, + trend: bool = True, + individual: bool = True, + m0: Any = None, +): + """ + Weighted likelihood empirical Bayes for estimating a parameter vector theta + given log-likelihood values on a grid of theta values + + returns tuple span, overall prior, shared_loglik(trended prior), trend, individual + """ + + n_features, n_theta = loglik.shape + # Check covariate and trend + if covariate is None: + trend_method = "none" + + # Set span + if span is None: + if n_features < 50: + out_span = 1 + else: + out_span = 0.25 + 0.75 * np.sqrt(50 / n_features) + + # overall prior + if overall: + out_overall = maximizeInterpolant(theta, np.sum(loglik, axis=0)) + else: + out_overall = None + + # trended prior + if m0 is None: + if trend_method == "none": + m0 = np.broadcast_to(np.sum(loglik, axis=0), loglik.shape) + elif trend_method == "loess": + m0 = loess_by_col(loglik, covariate, span=out_span) + else: + raise NotImplementedError(f"Method {trend_method} is not yet implemented.") + + if trend: + out_trend = maximizeInterpolant(theta, m0) + else: + out_trend = None + + # weighted empirical Bayes posterior estimates + if individual: + assert np.all(np.isfinite(prior_n)), "prior_n must not contain infinite values." + l0a = loglik + prior_n * m0 + out_individual = maximizeInterpolant(theta, l0a) + + return out_span, out_overall, m0, out_trend, out_individual + + +def loess_by_col(): + pass From 63444d17f66527d9723a949045b7b0899b3579d9 Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 15 Jul 2022 14:04:55 +0200 Subject: [PATCH 12/58] added maximizeInterpolant from edgeR --- .../external/edgeR/maximizeInterpolant.py | 207 ++++++++++++++++++ 1 file changed, 207 insertions(+) create mode 100644 batchglm/external/edgeR/maximizeInterpolant.py diff --git a/batchglm/external/edgeR/maximizeInterpolant.py b/batchglm/external/edgeR/maximizeInterpolant.py new file mode 100644 index 00000000..eb304bd5 --- /dev/null +++ b/batchglm/external/edgeR/maximizeInterpolant.py @@ -0,0 +1,207 @@ +import numpy as np + + +def maximize_interpolant(x, y): + # This function takes an ordered set of spline points and a likelihood matrix where each row + # corresponds to a tag and each column corresponds to a spline point. It then calculates the + # position at which the maximum interpolated likelihood occurs for each by solving the derivative + # of the spline function. + + interpolator = Interpolator(n=len(x)) + output = np.zeros(y.shape[0], dtype=float) + for i in range(y.shape[0]): + output[i] = interpolator.find_max(x, y[i]) + return output + + +class Interpolator: + def __init__(self, n: int): + + self.npts = n + if self.npts < 2: + raise ValueError("Must have at lest two points for interpolation") + + self.b = np.zeros(n, dtype=float) + self.c = np.zeros(n, dtype=float) + self.d = np.zeros(n, dtype=float) + + def find_max(self, x, y): + maxed_at = np.argmax(y) + maxed = y[maxed_at] + """ + maxed = -1 + maxed_at = -1 + for i in range(self.npts): + # Getting a good initial guess for the MLE. + if maxed_at < 0 or y[i] > maxed: + maxed = y[i] + maxed_at = i + """ + x_max = x[maxed_at] + x, y, self.b, self.c, self.d = fmm_spline(self.npts, x, y, self.b, self.c, self.d) + + # First we have a look at the segment on the left and see if it contains the maximum. + + if maxed_at > 0: + ld = self.d[maxed_at - 1] + lc = self.c[maxed_at - 1] + lb = self.b[maxed_at - 1] + sol1_left, sol2_left, solvable_left = quad_solver(3 * ld, 2 * lc, lb) + if solvable_left: + """ + Using the solution with the maximum (not minimum). If the curve is mostly increasing, the + maximal point is located at the smaller solution (i.e. sol1 for a>0). If the curve is mostly + decreasing, the maximal is located at the larger solution (i.e., sol1 for a<0). + """ + chosen_sol = sol1_left + """ + The spline coefficients are designed such that 'x' in 'y + b*x + c*x^2 + d*x^3' is + equal to 'x_t - x_l' where 'x_l' is the left limit of that spline segment and 'x_t' + is where you want to get an interpolated value. This is necessary in 'splinefun' to + ensure that you get 'y' (i.e. the original data point) when 'x=0'. For our purposes, + the actual MLE corresponds to 'x_t' and is equal to 'solution + x_0'. + """ + if (chosen_sol > 0) and (chosen_sol < (x[maxed_at] - x[maxed_at - 1])): + temp = ((ld * chosen_sol + lc) * chosen_sol + lb) * chosen_sol + y[maxed_at - 1] + if temp > maxed: + maxed = temp + x_max = chosen_sol + x[maxed_at - 1] + + # Repeating for the segment on the right. + + if maxed_at < self.npts - 1: + rd = self.d[maxed_at] + rc = self.c[maxed_at] + rb = self.b[maxed_at] + sol1_right, sol2_right, solvable_right = quad_solver(3 * rd, 2 * rc, rb) + if solvable_right: + chosen_sol = sol1_right + print(sol1_right, sol2_right) + if (chosen_sol > 0) and (chosen_sol < (x[maxed_at + 1] - x[maxed_at])): + temp = ((rd * chosen_sol + rc) * chosen_sol + rb) * chosen_sol + y[maxed_at] + if temp > maxed: + maxed = temp + x_max = chosen_sol + x[maxed_at] + + return x_max + + +def fmm_spline(n: int, x: np.ndarray, y: np.ndarray, b: np.ndarray, c: np.ndarray, d: np.ndarray): + """ + This code is a python derivative of fmm_spline in R core splines.c as implemented in edgeR. + """ + """ + Spline Interpolation + -------------------- + C code to perform spline fitting and interpolation. + There is code here for: + + 1. Splines with end-conditions determined by fitting + cubics in the start and end intervals (Forsythe et al). + + + Computational Techniques + ------------------------ + A special LU decomposition for symmetric tridiagonal matrices + is used for all computations, except for periodic splines where + Choleski is more efficient. + + + Splines a la Forsythe Malcolm and Moler + --------------------------------------- + In this case the end-conditions are determined by fitting + cubic polynomials to the first and last 4 points and matching + the third derivitives of the spline at the end-points to the + third derivatives of these cubics at the end-points. + """ + + i = 0 + t = 0 + + # Adjustment for 1-based arrays + """ + x -= 1 + y -= 1 + b -= 1 + c -= 1 + d -= 1 + """ + + if n < 2: + return x, y, b, c, d + + if n < 3: + t = y[1] - y[0] + b[0] = t / (x[1] - x[0]) + b[1] = b[0] + c[0] = 0.0 + c[1] = 0.0 + d[0] = 0.0 + d[1] = 0.0 + return x, y, b, c, d + + # Set up tridiagonal system + # b = diagonal, d = offdiagonal, c = right hand side + + d[0] = x[1] - x[0] + c[1] = (y[1] - y[0]) / d[0] # ;/* = +/- Inf for x[1]=x[2] -- problem? */ + for i in range(1, n - 1): + d[i] = x[i + 1] - x[i] + b[i] = 2.0 * (d[i - 1] + d[i]) + c[i + 1] = (y[i + 1] - y[i]) / d[i] + c[i] = c[i + 1] - c[i] + + """ + End conditions. + Third derivatives at x[0] and x[n-1] obtained + from divided differences + """ + + b[0] = -d[0] + + b[n - 1] = -d[n - 2] + c[0] = 0.0 + c[n - 1] = 0.0 + if n > 3: + c[0] = c[2] / (x[3] - x[1]) - c[1] / (x[2] - x[0]) + c[n - 1] = c[n - 2] / (x[n - 1] - x[n - 3]) - c[n - 3] / (x[n - 2] - x[n - 4]) + c[0] = c[0] * d[0] * d[0] / (x[3] - x[0]) + c[n - 1] = -c[n - 1] * d[n - 2] * d[n - 2] / (x[n - 1] - x[n - 4]) + + # Gaussian elimination + for i in range(1, n): + t = d[i - 1] / b[i - 1] + b[i] = b[i] - t * d[i - 1] + c[i] = c[i] - t * c[i - 1] + + # Backward substitution + + c[n - 1] = c[n - 1] / b[n - 1] + for i in range(n - 2, -1, -1): + c[i] = (c[i] - d[i] * c[i + 1]) / b[i] + + # c[i] is now the sigma[i-1] of the text + # Compute polynomial coefficients + + b[n - 1] = (y[n - 1] - y[n - 2]) / d[n - 2] + d[n - 2] * (c[n - 2] + 2.0 * c[n - 1]) + for i in range(0, n - 1): + b[i] = (y[i + 1] - y[i]) / d[i] - d[i] * (c[i + 1] + 2.0 * c[i]) + d[i] = (c[i + 1] - c[i]) / d[i] + c[i] = 3.0 * c[i] + + c[n - 1] = 3.0 * c[n - 1] + d[n - 1] = d[n - 2] + return x, y, b, c, d + + +def quad_solver(a: float, b: float, c: float): + """ + Find the two solutions for the formula x = (-b +- sqrt(b^2-4ac) / 2a + :return tuple(sol1, sol2, solvable). + """ + back = np.square(b) - 4 * a * c + if back < 0: + return None, None, False + front = -b / (2 * a) + back = np.sqrt(back) / (2 * a) + return front - back, front + back, True From 9756c36d04aa36445b1ab519056adb4e2ed15f0d Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 15 Jul 2022 14:07:31 +0200 Subject: [PATCH 13/58] added external.py for external imports --- batchglm/external/edgeR/external.py | 1 + 1 file changed, 1 insertion(+) create mode 100644 batchglm/external/edgeR/external.py diff --git a/batchglm/external/edgeR/external.py b/batchglm/external/edgeR/external.py new file mode 100644 index 00000000..cc17729b --- /dev/null +++ b/batchglm/external/edgeR/external.py @@ -0,0 +1 @@ +from batchglm.train.numpy.base_glm.B.base_glm.model import BaseModelContainer From 9dd2d38eb0365bb9808148ceab9e59dbcf5f7cf5 Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 15 Jul 2022 14:07:56 +0200 Subject: [PATCH 14/58] added squeezeVar from limma --- batchglm/external/edgeR/limma/squeezeVar.py | 58 +++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 batchglm/external/edgeR/limma/squeezeVar.py diff --git a/batchglm/external/edgeR/limma/squeezeVar.py b/batchglm/external/edgeR/limma/squeezeVar.py new file mode 100644 index 00000000..98a6b7da --- /dev/null +++ b/batchglm/external/edgeR/limma/squeezeVar.py @@ -0,0 +1,58 @@ +import numpy as np + +from .fitFDist import fit_f_dist, fit_f_dist_robustly + + +def squeeze_var(var: np.ndarray, df: np.ndarray, covariate: np.ndarray, robust: bool, winsor_tail_p: np.ndarray): + n = len(var) + # Degenerate special cases + if n == 1: + return var, var, 0 + + # When df==0, guard against missing or infinite values in var + if len(df) > 1: + var[df == 0] = 0 + + # Estimate hyperparameters + if robust: + fit = fit_f_dist_robustly(var=var, df1=df, covariate=covariate, winsor_tail_p=winsor_tail_p) + df_prior = fit.df2_shrunk + else: + fit = fit_f_dist(var, df1=df, covariate=covariate) + df_prior = fit.df2 + + if np.any(np.isnan(df_prior)): + raise ValueError("Could not estimate prior df due to NaN") + + # Posterior variances + var_post = _squeeze_var(var=var, df=df, var_prior=fit.scale, df_prior=df_prior) + + return df_prior, fit.scale, var_post + + +def _squeeze_var(var: np.ndarray, df: np.ndarray, var_prior: np.ndarray, df_prior: np.ndarray): + """ + Squeeze posterior variances given hyperparameters + """ + + n = len(var) + isfin = np.isfinite(df_prior) + if np.all(isfin): + return (df * var + df_prior * var_prior) / (df + df_prior) + + # From here, at least some df.prior are infinite + + # For infinite df_prior, return var_prior + if len(var_prior) == n: + var_post = var_prior + else: + var_post = np.full(n, var_prior) + + # Maybe some df.prior are finite + if np.any(isfin): + if len(df) > 1: + df = df[isfin] + df_prior = df_prior[isfin] + var_post[isfin] = (df * var[isfin] + df_prior * var_post[isfin]) / (df + df_prior) + + return var_post From b09b853ef57f027a6507f7dba7d42acb6bdb1986 Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 15 Jul 2022 14:08:29 +0200 Subject: [PATCH 15/58] added glm_one_group from edgeR --- batchglm/external/edgeR/glm_one_group.py | 55 ++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 batchglm/external/edgeR/glm_one_group.py diff --git a/batchglm/external/edgeR/glm_one_group.py b/batchglm/external/edgeR/glm_one_group.py new file mode 100644 index 00000000..1052ed80 --- /dev/null +++ b/batchglm/external/edgeR/glm_one_group.py @@ -0,0 +1,55 @@ +import logging + +import numpy as np + +logger = logging.get_logger(__name__) + + +def fit( + data: np.ndarray, + size_factors: np.ndarray, + dispersion: float, + weights: np.ndarray = 1.0, + maxit: int = 50, + tolerance: int = 1e-10, + cur_beta: np.ndarray = None, +): + """ + Setting up initial values for beta as the log of the mean of the ratio of counts to offsets. + * This is the exact solution for the gamma distribution (which is the limit of the NB as + * the dispersion goes to infinity. However, if cur_beta is not NA, then we assume it's good. + """ + low_value = 10 ** -10 + low_mask = data > low_value + # nonzero = np.any(low_mask, axis=0) + if cur_beta is None: + cur_beta = np.zeros(size_factors.shape[0], dtype=float) + total_weight = np.zeros_like(cur_beta) + + cur_beta = np.sum(data / np.exp(size_factors) * weights * low_mask, axis=0) + total_weight = weights * size_factors.shape[0] + cur_beta = np.log(cur_beta / total_weight) + + # Skipping to a result for all-zero rows. + # if (!nonzero) { + # return std::make_pair(R_NegInf, true); + + # // Newton-Raphson iterations to converge to mean. + has_converged = np.zeros(data.shape[1], dtype=bool) + for _ in range(maxit): + # dl = np.zeros(data.shape[1], dtype=float) + # info = np.zeros_like(dl) + mu = np.exp(cur_beta + size_factors) + denominator = 1 + mu * dispersion + dl = np.sum((data - mu) / denominator * weights, axis=0) + info = np.sum(mu / denominator * weights, axis=0) + step = dl / info + step = np.where(has_converged, 0.0, step) + cur_beta += step + has_converged = np.abs(step) < tolerance + if np.all(has_converged): + break + else: + logger.warning("Maximum iterations exceeded.") + + return cur_beta, has_converged From 0b0dcefc84262ca1573462d90e85677376832882 Mon Sep 17 00:00:00 2001 From: picciama Date: Tue, 19 Jul 2022 14:19:45 +0200 Subject: [PATCH 16/58] bugfixes in tmmwsp fixed --- batchglm/external/edgeR/calcNormFactors.py | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/batchglm/external/edgeR/calcNormFactors.py b/batchglm/external/edgeR/calcNormFactors.py index 8e08ea90..f4becc6e 100644 --- a/batchglm/external/edgeR/calcNormFactors.py +++ b/batchglm/external/edgeR/calcNormFactors.py @@ -6,16 +6,15 @@ def calc_size_factors(x: np.ndarray, method: Optional[str] = None, *args, **kwargs): assert ~np.any(np.isnan(x)), "Counts matrix must not contain NaN!" - lib_size = np.sum(x, axis=1, keepdims=True) x = x[:, np.sum(x, axis=0) > 0] if method is None: - size_factors = np.ones_like(lib_size) - elif method == "TMM": + size_factors = np.ones((x.shape[1], 1), dtype=float) + elif method.lower() == "tmm": size_factors = _calc_factor_tmm(data=x, *args, **kwargs) - elif method == "TMMwsp": + elif method.lower() == "tmmwsp": size_factors = _calc_factor_tmmwsp(data=x, *args, **kwargs) - elif method == "RLE": + elif method.lower() == "rle": size_factors = _calc_factor_rle(data=x) elif method == "upperquartile": size_factors = _calc_factor_quantile(data=x, *args, **kwargs) @@ -139,8 +138,8 @@ def _calc_factor_tmmwsp( k = zero_obs | zero_ref n_eligible_singles = np.min((np.sum(zero_obs), np.sum(zero_ref))) if n_eligible_singles > 0: - ref_i_k = np.sort(ref_i[k])[::-1][1:n_eligible_singles] - data_i_k = np.sort(data_i[k])[::-1][1:n_eligible_singles] + ref_i_k = np.sort(ref_i[k])[::-1][:n_eligible_singles] + data_i_k = np.sort(data_i[k])[::-1][:n_eligible_singles] data_i = np.concatenate((data_i[~k], data_i_k)) ref_i = np.concatenate((ref_i[~k], ref_i_k)) else: @@ -167,19 +166,20 @@ def _calc_factor_tmmwsp( ref_i_p_shrunk = (ref_i + 0.5) / (sample_sums[ref_idx] + 0.5) m_shrunk = np.log2(data_i_p_shrunk / ref_i_p_shrunk) m_ordered = np.argsort( - np.array(list(zip(m, m_shrunk)), dtype={"names": ["m", "m_shrunk"], "formats": [m.dtype, m_shrunk.dtype]}) + np.array(list(zip(m, m_shrunk)), dtype={"names": ["m", "m_shrunk"], "formats": [m.dtype, m_shrunk.dtype]}), + kind="stable", ) # a order - a_ordered = np.argsort(a) + a_ordered = np.argsort(a, kind="stable") # Trim - lo_m = int(n * logratio_trim) + 1 - hi_m = n + 1 - lo_m + lo_m = int(n * logratio_trim) + hi_m = n - lo_m keep_m = np.zeros(n, dtype=bool) keep_m[m_ordered[lo_m:hi_m]] = True - lo_a = int(n * sum_trim) + 1 - hi_a = n + 1 - lo_a + lo_a = int(n * sum_trim) + hi_a = n - lo_a keep_a = np.zeros(n, dtype=bool) keep_a[a_ordered[lo_a:hi_a]] = True keep = keep_a & keep_m From 1541d7d7ba2646c3549535f896edac8b0e4d0d05 Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 23 Jul 2022 11:10:23 +0200 Subject: [PATCH 17/58] added deps for newly included edgeR procedures --- batchglm/external/edgeR/external.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/batchglm/external/edgeR/external.py b/batchglm/external/edgeR/external.py index cc17729b..32453c8e 100644 --- a/batchglm/external/edgeR/external.py +++ b/batchglm/external/edgeR/external.py @@ -1 +1,7 @@ -from batchglm.train.numpy.base_glm.B.base_glm.model import BaseModelContainer +from batchglm.models.base_glm import _ModelGLM +from batchglm.models.glm_nb.model import Model +from batchglm.models.glm_nb.utils import init_par +from batchglm.train.numpy.base_glm import BaseModelContainer, EstimatorGlm +from batchglm.train.numpy.glm_nb import ModelContainer + +# from batchglm.train.numpy.glm_nb import Estimator as NBEstimator From c6dcf995a18606e8bf39939fd30c37b05c519f4a Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 23 Jul 2022 11:11:15 +0200 Subject: [PATCH 18/58] compute() dask arrays before calculating nb_dev --- batchglm/external/edgeR/nbinomDeviance.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/batchglm/external/edgeR/nbinomDeviance.py b/batchglm/external/edgeR/nbinomDeviance.py index 33990749..99276ead 100644 --- a/batchglm/external/edgeR/nbinomDeviance.py +++ b/batchglm/external/edgeR/nbinomDeviance.py @@ -3,14 +3,14 @@ from .external import BaseModelContainer -def nb_deviance(model: BaseModelContainer): +def nb_deviance(model: BaseModelContainer, idx=...): eps = 1e-8 eps2 = 1e-4 - y = model.x - mu = model.location - phi = 1 / model.scale + y = model.x[:, idx].compute() + mu = model.location[:, idx].compute() + phi = 1 / model.scale[:, idx].compute()[0] y += eps mu += eps From 7701c783a1641137b72ae13458adb14787dc14c4 Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 23 Jul 2022 11:24:30 +0200 Subject: [PATCH 19/58] added levenberg-marquardt estimator as in edgeR --- batchglm/external/edgeR/estimator.py | 394 +++++++++++++++++++++++++++ 1 file changed, 394 insertions(+) create mode 100644 batchglm/external/edgeR/estimator.py diff --git a/batchglm/external/edgeR/estimator.py b/batchglm/external/edgeR/estimator.py new file mode 100644 index 00000000..8f752206 --- /dev/null +++ b/batchglm/external/edgeR/estimator.py @@ -0,0 +1,394 @@ +import sys +import time + +import dask.array +import numpy as np +from scipy.linalg import cho_solve, cholesky + +from .external import BaseModelContainer, EstimatorGlm, Model, ModelContainer, init_par +from .nbinomDeviance import nb_deviance +from .qr_decomposition import get_levenberg_start + +one_millionth = 1e-6 +low_value = 1e-10 +supremely_low_value = 1e-13 +ridiculously_low_value = 1e-100 + + +class LevenbergEstimator: + + _train_loc: bool = False + _train_scale: bool = False + _model_container: BaseModelContainer + + def __init__(self, model_container: BaseModelContainer, dtype: str): + """ + Performs initialisation and creates a new estimator. + :param model_container: + The model_container to be fit using Levenberg-Marquardt as in edgeR. + :param dtype: + i.e float64 + """ + self._model_container = model_container + if self._model_container.design_scale.shape[1] != 1: + raise ValueError("cannot model more than one scale parameter with edgeR/numpy backend right now.") + self.dtype = dtype + + def train(self, maxit: int, tolerance: int = 1e-6): + model = self._model_container + max_x = np.max(model.x, axis=0).compute() + + n_parm = model.num_loc_params + n_features = model.num_features + + iteration = 1 + + """ + + // If we start off with all entries at zero, there's really no point continuing. + if (ymax 0 and iteration <= maxit: + print("iteration:", iteration) + """ + Here we set up the matrix XtWX i.e. the Fisher information matrix. X is the design matrix + and W is a diagonal matrix with the working weights for each observation (i.e. library). + The working weights are part of the first derivative of the log-likelihood for a given coefficient, + multiplied by any user-specified weights. When multiplied by two covariates in the design matrix, + you get the Fisher information (i.e. variance of the log-likelihood) for that pair. This takes + the role of the second derivative of the log-likelihood. The working weights are formed by taking + the reciprocal of the product of the variance (in terms of the mean) and the square of the + derivative of the link function. + + We also set up the actual derivative of the log likelihoods in 'dl'. This is done by multiplying + each covariate by the difference between the mu and observation and dividing by the variance and + derivative of the link function. This is then summed across all observations for each coefficient. + The aim is to solve (XtWX)(dbeta)=dl for 'dbeta'. As XtWX is the second derivative, and dl is the + first, you can see that we are effectively performing a multivariate Newton-Raphson procedure with + 'dbeta' as the step. + """ + + xtwx_start = time.time() + loc = model.location_j(not_done_idx) + scale = model.scale_j(not_done_idx) + w = -model.fim_weight_location_location_j(not_done_idx) # shape (obs, features) + # print(w[:, 0].compute()) + denom = 1 + loc / scale # shape (obs, features) + deriv = (model.x[:, not_done_idx] - loc) / denom * weights # shape (obs, features) + # print((loc / denom)[:,0].compute()) + # print('deriv', deriv[:,0].compute()) + # print('denom', denom[:,0].compute()) + + xh = model.xh_loc + if isinstance(xh, dask.array.core.Array): + xh = xh.compute() + xhw = np.einsum("ob,of->fob", xh, w) + fim[not_done_idx] = np.einsum("fob,oc->fbc", xhw, xh).compute() + + xtwx_time += time.time() - xtwx_start + + xtwx2_start = time.time() + # print('fim', fim[not_done_idx[0]]) + """ + for (int lib=0; lib...i", fim[not_done_idx]) # shape (features x constrained_coefs) + # print('shaped', deriv.shape) + # print(model.design_loc) + dl[not_done_idx] = np.einsum( + "of,oc->fc", deriv, model.design_loc + ).compute() # shape (features, constrained_coefs) + + # print(dl[not_done_idx]) + + max_infos[not_done_idx] = np.max(fim_diags, axis=1) # shape (features,) + + # print(max_infos[0]) + # print(nb_deviance(model, [0])) + + """ + const double* dcopy=design; + auto xtwxIt=xtwx.begin(); + for (int coef=0; coefmax_info) { max_info=cur_val; } + } + """ + if iteration == 1: + lambdas = np.maximum(max_infos * one_millionth, supremely_low_value) + + """ + Levenberg/Marquardt damping reduces step size until the deviance increases or no + step can be found that increases the deviance. In short, increases in the deviance + are enforced to avoid problems with convergence. + """ + + inner_idx_update = not_done_idx + n_inner_idx = len(inner_idx_update) + + levenberg_steps.fill(0) + failed_in_levenberg_loop[not_done_idx] = False + f = 0 + overall_steps.fill(0) + xtwx2_time += time.time() - xtwx2_start + while n_inner_idx > 0: + f += 1 + levenberg_steps[inner_idx_update] += 1 + cholesky_failed_idx = inner_idx_update.copy() # + cholesky_failed = np.ones(n_inner_idx, dtype=bool) + # print('choleksy_failed', cholesky_failed) + np.copyto(fim_copy, fim) + + m = 0 + while len(cholesky_failed_idx) > 0: + m += 1 + print("cholesky_loop:", m) + cholesky_failed = np.zeros(len(cholesky_failed_idx), dtype=bool) + """ + We need to set up copies as the decomposition routine overwrites the originals, and + we want the originals in case we don't like the latest step. For efficiency, we only + refer to the upper triangular for the XtWX copy (as it should be symmetrical). We also add + 'lambda' to the diagonals. This reduces the step size as the second derivative is increased. + """ + lambda_start = time.time() + lambda_diags = np.einsum( + "ab,bc->abc", + np.repeat(lambdas[cholesky_failed_idx], n_parm).reshape(len(cholesky_failed_idx), n_parm), + np.eye(n_parm), + ) + lambda_time += time.time() - lambda_start + # print('lambda_diags', lambda_diags[0]); + + fim_copy[cholesky_failed_idx] = fim[cholesky_failed_idx] + lambda_diags + # print(fim_copy[not_done_idx]); + + for i, idx in enumerate(cholesky_failed_idx): + cholesky_start = time.time() + try: + """ + Overwriting FIM with cholesky factorization using scipy.linalg.cholesky. + This is equivalent to LAPACK's dportf function (wrapper is + scipy.linalg.lapack.dpotrf) as used in the code from edgeR. + Returned is the upper triangular matrix. This is important for the steps downstream. + Overwriting the array is not possible here as individual slices are passed to the + scipy function - maybe it makes sense to use a C++ backend here and call LAPACK + directly as done in edgeR. + """ + fim_copy[idx] = cholesky(a=fim_copy[idx], lower=False, overwrite_a=False) + + except np.linalg.LinAlgError: + """ + If it fails, it MUST mean that the matrix is singular due to numerical imprecision + as all the diagonal entries of the XtWX matrix must be positive. This occurs because of + fitted values being exactly zero; thus, the coefficients attempt to converge to negative + infinity. This generally forces the step size to be larger (i.e. lambda lower) in order to + get to infinity faster (which is impossible). Low lambda leads to numerical instability + and effective singularity. To solve this, we actually increase lambda; this avoids code + breakage to give the other coefficients a chance to converge. + Failure of convergence for the zero-fitted values isn't a problem as the change in + deviance from small --> smaller coefficients isn't that great when the true value + is negative inifinity. + """ + lambdas[idx] *= 10 + if lambdas[idx] <= 0: + lambdas[idx] = ridiculously_low_value + + cholesky_failed[i] = True + cholesky_time += time.time() - cholesky_start + + cholesky_failed_idx = cholesky_failed_idx[cholesky_failed] + + steps.fill(0) + for i in inner_idx_update: + cho_start = time.time() + """ + Calculating the step by solving fim_copy * step = dl using scipy.linalg.cho_solve. + This is equivalent to LAPACK's dpotrs function (wrapper is scipy.linalg.lapack.dpotrs) + as used in the code from edgeR. The int in the first argument tuple denotes lower + triangular (= 1) or upper triangular (= 0). + Again, we cannot overwrite due to a slice not passed by reference. + """ + step = cho_solve((fim_copy[i], 0), dl[i], overwrite_b=False) + overall_steps[:, i] = step + steps[:, i] = step + + cho_time += time.time() - cho_start + + # print('fim_copy', fim_copy[i]) + # print('dl', dl[i]) + + # print(steps[:, 0]) + + # Updating loc params. + + model.theta_location += steps + + """ + Checking if the deviance has decreased or if it's too small to care about. Either case is good + and means that we'll be using the updated fitted values and coefficients. Otherwise, if we have + to repeat the inner loop, then we want to do so from the original values (as we'll be scaling + lambda up so we want to retake the step from where we were before). This is why we don't modify + the values in-place until we're sure we want to take the step. + """ + dev_start = time.time() + + dev_new = nb_deviance(model, inner_idx_update) # TODO ### make this a property of model + dev_time += time.time() - dev_start + + # print(dev_new[0]) + + rest_start = time.time() + + low_deviances[inner_idx_update] = (dev_new / max_x[inner_idx_update]) < supremely_low_value + + good_updates = (dev_new <= deviances[inner_idx_update]) | low_deviances[inner_idx_update] + idx_bad_step = inner_idx_update[~good_updates] + rest_time = time.time() - rest_start + # Reverse update by feature if update leads to worse loss: + rest2_start = time.time() + theta_location_new = model.theta_location.compute() + rest2_time += time.time() - rest2_start + + theta_location_new[:, idx_bad_step] = theta_location_new[:, idx_bad_step] - steps[:, idx_bad_step] + model.theta_location = theta_location_new + good_idx = inner_idx_update[good_updates] + if len(good_idx) > 0: + deviances[good_idx] = dev_new[good_updates] + + # Increasing lambda, to increase damping. Again, we have to make sure it's not zero. + lambdas[idx_bad_step] = np.where( + lambdas[idx_bad_step] <= 0, ridiculously_low_value, lambdas[idx_bad_step] * 2 + ) + + # Excessive damping; steps get so small that it's pointless to continue. + failed_in_levenberg_loop[inner_idx_update] = ( + lambdas[inner_idx_update] / max_infos[inner_idx_update] + ) > (1 / supremely_low_value) + + inner_idx_update = inner_idx_update[ + ~(good_updates | failed_in_levenberg_loop[inner_idx_update]) + ] # the features for which both the update was reversed and the step size is not too small yet + + n_inner_idx = len(inner_idx_update) + + # print('n_inner', inner_idx_update) + + """ + Terminating if we failed, if divergence from the exact solution is acceptably low + (cross-product of dbeta with the log-likelihood derivative) or if the actual deviance + of the fit is acceptably low. + """ + divergence = np.einsum("fc,cf->f", dl[not_done_idx], overall_steps[:, not_done_idx]) + not_done_idx = not_done_idx[ + (divergence >= tolerance) & ~low_deviances[not_done_idx] & ~failed_in_levenberg_loop[not_done_idx] + ] + + n_idx = len(not_done_idx) + """ + If we quit the inner levenberg loop immediately and survived all the break conditions above, + that means that deviance is decreasing substantially. Thus, we need larger steps to get there faster. + To do so, we decrease the damping factor. Note that this only applies if we didn't decrease the + damping factor in the inner levenberg loop, as that would indicate that we need to slow down. + """ + + lambdas[levenberg_steps == 1] /= 10 + iteration += 1 + # print('..............................', model.theta_location[:,not_done_idx].compute()) + + return xtwx_time, xtwx2_time, lambda_time, cholesky_time, cho_time, dev_time, rest_time, rest2_time + + +class NBEstimator(LevenbergEstimator): + def __init__( + self, + model: Model, + dispersion: float, + dtype: str = "float64", + ): + """ + Performs initialisation using QR decomposition as in edgeR and creates a new estimator. + + :param model: The NBModel object to fit. + :param dispersion: The fixed dispersion parameter to use during fitting the loc model. + :param dtype: Numerical precision. + """ + init_theta_location = get_levenberg_start(model=model, disp=dispersion, use_null=True) + init_theta_scale = np.full((1, model.num_features), np.log(1 / dispersion)) + self._train_loc = True + self._train_scale = False # This is fixed as edgeR doesn't fit the scale parameter + + _model_container = ModelContainer( + model=model, + init_theta_location=init_theta_location, + init_theta_scale=init_theta_scale, + chunk_size_genes=model.chunk_size_genes, + dtype=dtype, + ) + super(NBEstimator, self).__init__(model_container=_model_container, dtype=dtype) + + # def init_par(model, init_location): From bb78dabbb246d611e229e85a584d862dd02c3cff Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 23 Jul 2022 11:27:11 +0200 Subject: [PATCH 20/58] added qr_decomposition for param init as in edgeR --- batchglm/external/edgeR/qr_decomposition.py | 141 ++++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100644 batchglm/external/edgeR/qr_decomposition.py diff --git a/batchglm/external/edgeR/qr_decomposition.py b/batchglm/external/edgeR/qr_decomposition.py new file mode 100644 index 00000000..f5896df7 --- /dev/null +++ b/batchglm/external/edgeR/qr_decomposition.py @@ -0,0 +1,141 @@ +import numpy as np +from scipy.linalg.lapack import dgeqrf, dormqr, dtrtrs + +from .external import _ModelGLM + + +class QRDecomposition: + def __init__(self, nrows: int, ncoefs: int, cur_x): + + self.nr = nrows + self.nc = ncoefs + self.x = cur_x + # self.x_copy = np.zeros((nrows * ncoefs), dtype=float) + # self.tau = np.zeros(ncoefs) + # self.effects = np.zeros(nrows) + self.weights = 1.0 # np.zeros(nrows) + self.lwork_geqrf = -1 + self.lwork_ormqr = -1 + self.trans_trtrs = 0 + self.trans_ormqr = "T" + self.side = "L" + + self.x_copy, self.tau, tmpwork, info = dgeqrf(a=np.zeros((self.nr, self.nc)), lwork=self.lwork_geqrf) + self.lwork_geqrf = tmpwork + 0.5 + if self.lwork_geqrf < 1: + self.lwork_geqrf = 1 + # work_geqrf = # TODO work_geqrf.resize(lwork_geqrf); + + # Repeating for dormqr + self.effects, tmpwork, info = dormqr( + side=self.side, + trans=self.trans_ormqr, + a=self.x_copy, + tau=self.tau, + c=np.zeros((ncoefs, 1)), + lwork=self.lwork_ormqr, + ) + self.lwork_ormqr = tmpwork + 0.5 + if self.lwork_ormqr < 1: + self.lwork_ormqr = 1 + # work_ormqr = # TODO work_ormqr.resize(lwork_ormqr); + + def store_weights(self, w=None): + if w is None: + self.weights = 1.0 + else: + self.weights = np.sqrt(w) + + def decompose(self): + self.x_copy = self.x.copy() + self.x_copy *= self.weights + + self.x_copy, self.tau, tmpwork, info = dgeqrf(a=self.x_copy, lwork=self.lwork_geqrf) + print(self.x_copy.shape, self.tau.shape) + if info != 0: + raise RuntimeError("QR decomposition failed") + + def solve(self, y): + self.effects = (y * self.weights)[..., None] + self.effects, tmpwork, info = dormqr( + side="L", trans="T", a=self.x_copy, tau=self.tau, c=self.effects, lwork=self.lwork_ormqr + ) + if info != 0: + raise RuntimeError("Q**T multiplication failed") + self.effects, info = dtrtrs(lower=0, trans=0, unitdiag=0, a=self.x_copy, b=self.effects) + if info != 0: + raise RuntimeError("failed to solve the triangular system") + + +def get_levenberg_start(model: _ModelGLM, disp: np.ndarray, use_null: bool): + + n_obs = model.num_observations + n_features = model.num_features + n_parm = model.num_loc_params + model_weights = np.ones(n_features, dtype=float) # TODO ### integrate into model + + qr = QRDecomposition(n_obs, n_parm, model.design_loc) + output = np.zeros((n_parm, model.num_features), dtype=float) # shape (n_parm, n_features) + + if use_null: + + qr.store_weights(w=None) + qr.decompose() + + if model.size_factors is None: + sf_exp = np.ones((n_obs, 1), dtype=float) + else: + sf_exp = np.exp(model.size_factors) # shape (n_obs, 1) + weights = model_weights * sf_exp / (1.0 + disp * sf_exp) # shape (n_obs, n_features) + sum_norm_x = np.sum(model.x * weights / sf_exp, axis=0) # shape (n_features,) + sum_weights = np.sum(weights, axis=0) # shape (n_features,) + + values = np.broadcast_to( + np.log(sum_norm_x / sum_weights), (n_obs, n_features) + ).compute() # shape(n_obs, n_features) + + for j in range(n_features): + qr.solve(values[:, j]) + output[:, j] = qr.effects[:n_parm, 0] + + else: + """ + { + const bool weights_are_the_same=allw.is_row_repeated(); + if (weights_are_the_same && num_tags) { + QR.store_weights(allw.get_row(0)); + QR.decompose(); + } + + // Finding the delta. + double delta=0; + if (counts.is_data_integer()) { + Rcpp::IntegerMatrix imat=counts.get_raw_int(); + delta=*std::max_element(imat.begin(), imat.end()); + } else { + Rcpp::NumericMatrix dmat=counts.get_raw_dbl(); + delta=*std::max_element(dmat.begin(), dmat.end()); + } + delta=std::min(delta, 1.0/6); + + for (int tag=0; tag Date: Sat, 6 Aug 2022 17:08:52 +0200 Subject: [PATCH 21/58] integrated batchglm model / estimator environment --- batchglm/external/edgeR/aveLogCPM.py | 85 ++++++++++++++++++---------- 1 file changed, 55 insertions(+), 30 deletions(-) diff --git a/batchglm/external/edgeR/aveLogCPM.py b/batchglm/external/edgeR/aveLogCPM.py index bbaab976..4e580709 100644 --- a/batchglm/external/edgeR/aveLogCPM.py +++ b/batchglm/external/edgeR/aveLogCPM.py @@ -1,46 +1,72 @@ +from typing import Optional, Union + +import dask.array import numpy as np -from .glm_one_group import fit +from .external import InputDataGLM, ModelContainer +from .glm_one_group import fit_single_group, get_single_group_start -def avg_log_cpm( - counts, - size_factors, +def calculate_avg_log_cpm( + x: np.ndarray, + model_class, + size_factors: Optional[np.ndarray] = None, + dispersion: Union[np.ndarray, float] = 0.05, prior_count: int = 2, - dispersion: np.ndarray = None, - weights: np.ndarray = None, - maxit=50, - tolerance=1e-10, + weights: Optional[np.ndarray] = None, + maxit: int = 50, + tolerance: float = 1e-10, + chunk_size_cells=1e6, + chunk_size_genes=1e6, ): - # Compute average log2-cpm for each gene over all samples. - # This measure is designed to be used as the x-axis for all abundance-dependent trend analyses in edgeR. - # It is generally held fixed through an edgeR analysis. - # Original author: Gordon Smyth - # Created 25 Aug 2012. Last modified 19 Nov 2018. + """ + Computes average log2 counts per million per feature over all observations. + The method is a python derivative of edgeR's aveLogCPM method. - # Check dispersion - if dispersion is None: - dispersion = 0.05 + :param x: the counts data. + :param model_class: the class object to use for creation of a model during the calculation + :param size_factors: Optional size_factors. This is equivalent to edgeR's offsets. + :param dispersion: Optional fixed dispersion parameter used during the calculation. + :param prior_count: The count to be added to x prior to calculation. + :param weights: Optional weights per feature (currently unsupported and ignored) + :param: maxit: The max number of iterations during newton-raphson approximation. + :param: tolerance: The minimal difference in change used as a stopping criteria during NR approximation. + :param: chunk_size_cells: chunks used over the feature axis when using dask + :param: chunk_size_genes: chunks used over the observation axis when using dask + """ - # Check weights if weights is None: weights = 1.0 - - # Calling the C++ code + if isinstance(dispersion, float): + dispersion = np.full((1, x.shape[1]), dispersion, dtype=float) + if size_factors is None: + size_factors = np.full((x.shape[0], 1), np.log(1.0)) adjusted_prior, adjusted_size_factors = add_priors(prior_count, size_factors) - # return adjusted_prior, adjusted_size_factors - x = np.array(counts, dtype=float) # model.x.copy() x += adjusted_prior - output = fit( - data=x, - size_factors=adjusted_size_factors, - dispersion=dispersion, - weights=weights, - maxit=maxit, - tolerance=tolerance, + avg_cpm_model = model_class( + InputDataGLM( + data=x, + design_loc=np.ones((x.shape[0], 1)), + design_loc_names=np.array(["Intercept"]), + size_factors=adjusted_size_factors, + design_scale=np.ones((x.shape[0], 1)), + design_scale_names=np.array(["Intercept"]), + as_dask=isinstance(x, dask.array.core.Array), + chunk_size_cells=chunk_size_cells, + chunk_size_genes=chunk_size_genes, + ) + ) + avg_cpm_model = ModelContainer( + model=avg_cpm_model, + init_theta_location=get_single_group_start(avg_cpm_model.x, avg_cpm_model.size_factors), + init_theta_scale=np.log(1 / dispersion), + chunk_size_genes=chunk_size_genes, + dtype=x.dtype, ) - output = (output + np.log(1e6)) / np.log(2) + + fit_single_group(avg_cpm_model, maxit=maxit, tolerance=tolerance) + output = (avg_cpm_model.theta_location + np.log(1e6)) / np.log(2) return output @@ -50,7 +76,6 @@ def add_priors(prior_count: int, size_factors: np.ndarray): factors = np.exp(size_factors) avg_factors = np.mean(factors) adjusted_priors = prior_count * factors / avg_factors - adjusted_size_factors = np.log(factors + 2 * adjusted_priors) return adjusted_priors, adjusted_size_factors From 5280a08d5a9e42194df429bf74bbabe75f9dcfab Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 17:36:46 +0200 Subject: [PATCH 22/58] added documentation, bugfixing and use c_utils --- batchglm/external/edgeR/wleb.py | 82 ++++++++++++++++++++++++--------- 1 file changed, 61 insertions(+), 21 deletions(-) diff --git a/batchglm/external/edgeR/wleb.py b/batchglm/external/edgeR/wleb.py index 42517012..39547c40 100644 --- a/batchglm/external/edgeR/wleb.py +++ b/batchglm/external/edgeR/wleb.py @@ -1,8 +1,10 @@ from typing import Any +import dask.array import numpy as np -from .maximizeInterpolant import maximizeInterpolant +from .c_utils import loess_by_col +from .maximizeInterpolant import maximize_interpolant def wleb( @@ -19,51 +21,89 @@ def wleb( ): """ Weighted likelihood empirical Bayes for estimating a parameter vector theta - given log-likelihood values on a grid of theta values + given log-likelihood values on a grid of theta values. + The method is taken over from edgeR's wleb method. - returns tuple span, overall prior, shared_loglik(trended prior), trend, individual - """ + :param theta: the parameter vector which is a grid of disperions values. + :param loglik: a log-likelihood matrix with shape (features, len(theta)) + containing the ll for each feature given a certain theta. + :param prior_n: ??? + :param covariate: the average log counts per million for each feature over + the observations + :param trend_method: The method to use for calculating the trend. Currently, + only loess as implemented in edgeR is supported. + :param span: Optional window size used during the trend estimation. + :param overall: If true, compute the overall prior ??? + :param trend: If true, compute the trend over all thetas + :param individual: If true, compute weighted empirical bayes posterior estimates. + :param m0: Optional output of a trend fitting procedure as specified + by trend_method. + :return: Tuple(out_span, out_overall, m0, out_trend, out_individual) + """ n_features, n_theta = loglik.shape - # Check covariate and trend if covariate is None: trend_method = "none" - # Set span if span is None: if n_features < 50: - out_span = 1 + span = 1 else: - out_span = 0.25 + 0.75 * np.sqrt(50 / n_features) + span = 0.25 + 0.75 * np.sqrt(50 / n_features) + out_span = span + + out_overall = None + out_trend = None + out_individual = None - # overall prior if overall: - out_overall = maximizeInterpolant(theta, np.sum(loglik, axis=0)) - else: - out_overall = None + out_overall = maximize_interpolant(theta, np.sum(loglik, axis=0, keepdims=True)) - # trended prior + # calculate trended prior if m0 is None: if trend_method == "none": m0 = np.broadcast_to(np.sum(loglik, axis=0), loglik.shape) elif trend_method == "loess": - m0 = loess_by_col(loglik, covariate, span=out_span) + m0, _ = loess(loglik, covariate, span=out_span) else: raise NotImplementedError(f"Method {trend_method} is not yet implemented.") if trend: - out_trend = maximizeInterpolant(theta, m0) - else: - out_trend = None + out_trend = maximize_interpolant(theta, m0) - # weighted empirical Bayes posterior estimates + # weighted empirical Bayes posterior estimates if individual: assert np.all(np.isfinite(prior_n)), "prior_n must not contain infinite values." l0a = loglik + prior_n * m0 - out_individual = maximizeInterpolant(theta, l0a) + out_individual = maximize_interpolant(theta, l0a) return out_span, out_overall, m0, out_trend, out_individual -def loess_by_col(): - pass +def loess(y: np.ndarray, x: np.ndarray, span: float): + """ + Wrapper around loess as implemented in edgeR. This calls the C++ + function loess_by_col. + """ + n_features = y.shape[0] + if x is None: + x = np.arange(n_features) + if isinstance(x, dask.array.core.Array): + x = x.compute() + + order = np.argsort(x, kind="stable") + y = y[order] + x = x[order] + + n_span = int(np.minimum(np.floor(span * n_features), n_features)) + + if n_span <= 1: + y_smooth = y + leverages = np.arange(n_features) + return y_smooth, leverages + + y_smooth = loess_by_col(x, y, n_span) + y_smooth[order] = y_smooth.copy() + leverages[order] = leverages.copy() + + return y_smooth, leverages From 737e09984bd5c778848975a3b4d560874d18e3e0 Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 17:40:32 +0200 Subject: [PATCH 23/58] added documentation and minor fixes --- batchglm/external/edgeR/residDF.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/batchglm/external/edgeR/residDF.py b/batchglm/external/edgeR/residDF.py index ac4cbb82..b5a6e6a6 100644 --- a/batchglm/external/edgeR/residDF.py +++ b/batchglm/external/edgeR/residDF.py @@ -1,19 +1,18 @@ import numpy as np -def _combo_groups(truths: np.ndarray): - - # Function that returns a list of vectors of indices, - # where each vector refers to the rows with the same - # combination of TRUE/FALSE values in 'truths'. - +def combo_groups(truths: np.ndarray): + """ + Function that returns a list of index lists, where each list refers to + the rows with the same combination of TRUE/FALSE values in 'truths'. + """ uniq_cols, rev_index = np.unique(truths, axis=1, return_inverse=True) return [np.where(rev_index == i)[0] for i in range(uniq_cols.shape[1])] def resid_df(zero: np.ndarray, design: np.ndarray): """ - TODO + Computes residual degrees of freedom. :param zero: boolean np.ndarray of shape (n_obs x n_features). It yields True if both the data and the fitted value of a GLM where close to zero within a small margin. :param design: the design matrix used in the GLM. @@ -29,12 +28,12 @@ def resid_df(zero: np.ndarray, design: np.ndarray): some_zero_idx = (n_zero > 0) & (n_zero < n_obs) # shape = (n_features, ) if np.any(some_zero_idx): some_zero = zero[:, some_zero_idx] # shape = (n_obs, len(np.where(some_zero_idx))) - groupings = _combo_groups(some_zero) # list of idx in some_zero with identical cols + groupings = combo_groups(some_zero) # list of idx in some_zero with identical cols degrees_of_freedom_some_zero = n_obs - n_zero[some_zero_idx] for group in groupings: some_zero_group = some_zero[:, group[0]] # shape = (n_obs, ) - degrees_of_freedom_some_zero[group] -= np.linalg.matrix_rank(design[~some_zero_group]) + degrees_of_freedom_some_zero[group] -= np.linalg.matrix_rank(design[~some_zero_group].compute()) degrees_of_freedom_some_zero = np.max(degrees_of_freedom_some_zero, 0) degrees_of_freedom[some_zero_idx] = degrees_of_freedom_some_zero From ce34637a559057d7d87fd32d4bafeffb6995c0e1 Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 17:45:54 +0200 Subject: [PATCH 24/58] documentation and cleanup --- batchglm/external/edgeR/qr_decomposition.py | 54 +++------------------ 1 file changed, 8 insertions(+), 46 deletions(-) diff --git a/batchglm/external/edgeR/qr_decomposition.py b/batchglm/external/edgeR/qr_decomposition.py index f5896df7..5bb87fba 100644 --- a/batchglm/external/edgeR/qr_decomposition.py +++ b/batchglm/external/edgeR/qr_decomposition.py @@ -6,13 +6,9 @@ class QRDecomposition: def __init__(self, nrows: int, ncoefs: int, cur_x): - self.nr = nrows self.nc = ncoefs self.x = cur_x - # self.x_copy = np.zeros((nrows * ncoefs), dtype=float) - # self.tau = np.zeros(ncoefs) - # self.effects = np.zeros(nrows) self.weights = 1.0 # np.zeros(nrows) self.lwork_geqrf = -1 self.lwork_ormqr = -1 @@ -20,13 +16,12 @@ def __init__(self, nrows: int, ncoefs: int, cur_x): self.trans_ormqr = "T" self.side = "L" + # workspace queries calling LAPACK subroutines via scipy.linalg.lapack self.x_copy, self.tau, tmpwork, info = dgeqrf(a=np.zeros((self.nr, self.nc)), lwork=self.lwork_geqrf) self.lwork_geqrf = tmpwork + 0.5 if self.lwork_geqrf < 1: self.lwork_geqrf = 1 - # work_geqrf = # TODO work_geqrf.resize(lwork_geqrf); - # Repeating for dormqr self.effects, tmpwork, info = dormqr( side=self.side, trans=self.trans_ormqr, @@ -38,7 +33,6 @@ def __init__(self, nrows: int, ncoefs: int, cur_x): self.lwork_ormqr = tmpwork + 0.5 if self.lwork_ormqr < 1: self.lwork_ormqr = 1 - # work_ormqr = # TODO work_ormqr.resize(lwork_ormqr); def store_weights(self, w=None): if w is None: @@ -51,7 +45,6 @@ def decompose(self): self.x_copy *= self.weights self.x_copy, self.tau, tmpwork, info = dgeqrf(a=self.x_copy, lwork=self.lwork_geqrf) - print(self.x_copy.shape, self.tau.shape) if info != 0: raise RuntimeError("QR decomposition failed") @@ -68,6 +61,13 @@ def solve(self, y): def get_levenberg_start(model: _ModelGLM, disp: np.ndarray, use_null: bool): + """ + Parameter initialisation of location parameters using QR decomposition. + This method is a python version of the C++ code in edgeR. + :param model: A GLM model object + :param disp: the fixed dispersion parameter used during the calculation. + :param use_null: ??? this must be true, the other is not implemented + """ n_obs = model.num_observations n_features = model.num_features @@ -99,43 +99,5 @@ def get_levenberg_start(model: _ModelGLM, disp: np.ndarray, use_null: bool): output[:, j] = qr.effects[:n_parm, 0] else: - """ - { - const bool weights_are_the_same=allw.is_row_repeated(); - if (weights_are_the_same && num_tags) { - QR.store_weights(allw.get_row(0)); - QR.decompose(); - } - - // Finding the delta. - double delta=0; - if (counts.is_data_integer()) { - Rcpp::IntegerMatrix imat=counts.get_raw_int(); - delta=*std::max_element(imat.begin(), imat.end()); - } else { - Rcpp::NumericMatrix dmat=counts.get_raw_dbl(); - delta=*std::max_element(dmat.begin(), dmat.end()); - } - delta=std::min(delta, 1.0/6); - - for (int tag=0; tag Date: Sat, 6 Aug 2022 17:48:55 +0200 Subject: [PATCH 25/58] cleanup --- batchglm/external/edgeR/nbinomDeviance.py | 28 +++++++++++------------ 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/batchglm/external/edgeR/nbinomDeviance.py b/batchglm/external/edgeR/nbinomDeviance.py index 99276ead..5d0f6c28 100644 --- a/batchglm/external/edgeR/nbinomDeviance.py +++ b/batchglm/external/edgeR/nbinomDeviance.py @@ -1,3 +1,5 @@ +import time + import numpy as np from .external import BaseModelContainer @@ -5,12 +7,16 @@ def nb_deviance(model: BaseModelContainer, idx=...): + """ + Python version of the method implemented in a C++ function in edgeR. + """ + eps = 1e-8 eps2 = 1e-4 - y = model.x[:, idx].compute() - mu = model.location[:, idx].compute() - phi = 1 / model.scale[:, idx].compute()[0] + y = model.x[:, idx].compute().copy() + mu = model.location[:, idx].compute().copy() + phi = 1 / model.scale[0, idx].compute() y += eps mu += eps @@ -18,15 +24,8 @@ def nb_deviance(model: BaseModelContainer, idx=...): if isinstance(phi, float): phi = np.full(y.shape[1], phi) - """ - Calculating the deviance using either the Poisson (small phi*mu), the Gamma (large) or NB (everything else). - Some additional work is put in to make the transitions between families smooth. - """ - deviance = np.zeros_like(y, dtype=float) - - poisson_idx = phi < eps2 - + poisson_idx = phi < eps2 # .compute() if np.any(poisson_idx): deviance[:, poisson_idx] = _poisson_deviance(poisson_idx, y, mu, phi) @@ -36,17 +35,17 @@ def nb_deviance(model: BaseModelContainer, idx=...): phi_non_poisson = phi[non_poisson_idx] product = mu_non_poisson * phi_non_poisson + mask = product > 1e6 + deviance[:, non_poisson_idx] = np.where( - product > 1e6, + mask, _gamma_deviance(y_non_poisson, mu_non_poisson, product), _nb_deviance(y_non_poisson, mu_non_poisson, phi_non_poisson), ) - return np.sum(deviance, axis=0) def _poisson_deviance(idx, y, mu, phi): - y_poisson = y[:, idx] mu_poisson = mu[:, idx] phi_poisson = phi @@ -56,7 +55,6 @@ def _poisson_deviance(idx, y, mu, phi): - resid - 0.5 * resid * phi_poisson * (1 + phi_poisson * (2 / 3 * resid - y)) ) - # return 2 * ( y * std::log(y/mu) - resid - 0.5*resid*resid*phi*(1+phi*(2/3*resid-y)) ); def _gamma_deviance(y, mu, product): From 4547fdddda069ef8e30a8bab1657377d17c3b9cc Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 17:51:26 +0200 Subject: [PATCH 26/58] documentation / cleanup --- batchglm/external/edgeR/maximizeInterpolant.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/batchglm/external/edgeR/maximizeInterpolant.py b/batchglm/external/edgeR/maximizeInterpolant.py index eb304bd5..da6eb347 100644 --- a/batchglm/external/edgeR/maximizeInterpolant.py +++ b/batchglm/external/edgeR/maximizeInterpolant.py @@ -2,11 +2,15 @@ def maximize_interpolant(x, y): - # This function takes an ordered set of spline points and a likelihood matrix where each row - # corresponds to a tag and each column corresponds to a spline point. It then calculates the - # position at which the maximum interpolated likelihood occurs for each by solving the derivative - # of the spline function. - + """ + This function takes an ordered set of spline points and a likelihood matrix where each row + corresponds to a tag and each column corresponds to a spline point. It then calculates the + position at which the maximum interpolated likelihood occurs for each by solving the derivative + of the spline function. + This function is a python derivative of edgeR's C++ implementation. + :param x: Spline points + :param y: likelihoods + """ interpolator = Interpolator(n=len(x)) output = np.zeros(y.shape[0], dtype=float) for i in range(y.shape[0]): @@ -76,7 +80,6 @@ def find_max(self, x, y): sol1_right, sol2_right, solvable_right = quad_solver(3 * rd, 2 * rc, rb) if solvable_right: chosen_sol = sol1_right - print(sol1_right, sol2_right) if (chosen_sol > 0) and (chosen_sol < (x[maxed_at + 1] - x[maxed_at])): temp = ((rd * chosen_sol + rc) * chosen_sol + rb) * chosen_sol + y[maxed_at] if temp > maxed: From 5d54b1f2aaae4d8d09ebd117df571619b9014dfd Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 17:53:56 +0200 Subject: [PATCH 27/58] cleanup / bugfixing /integration in estimator --- batchglm/external/edgeR/glm_one_group.py | 88 ++++++++++++++---------- 1 file changed, 50 insertions(+), 38 deletions(-) diff --git a/batchglm/external/edgeR/glm_one_group.py b/batchglm/external/edgeR/glm_one_group.py index 1052ed80..72b7df6c 100644 --- a/batchglm/external/edgeR/glm_one_group.py +++ b/batchglm/external/edgeR/glm_one_group.py @@ -1,55 +1,67 @@ import logging +from typing import Optional, Union import numpy as np -logger = logging.get_logger(__name__) +from .external import BaseModelContainer +low_value = 1e-10 +logger = logging.getLogger(__name__) -def fit( - data: np.ndarray, - size_factors: np.ndarray, - dispersion: float, - weights: np.ndarray = 1.0, + +def get_single_group_start( + x: np.ndarray, + sf: Optional[np.ndarray] = None, + weights: Optional[Union[np.ndarray, float]] = None, +): + if weights is None: + weights = np.ones_like(x) + if weights.shape != x.shape: + raise ValueError("Shape of weights must be idential to shape of model.x") + + total_weights = weights.sum(axis=0) + + if sf is None: + sf = np.log(1.0) + + theta_location = np.sum(np.where(x > low_value, x / np.exp(sf) * weights, 0), axis=0, keepdims=True) + theta_location = np.log(theta_location / total_weights) + return theta_location + + +def fit_single_group( + model: BaseModelContainer, maxit: int = 50, tolerance: int = 1e-10, - cur_beta: np.ndarray = None, ): """ Setting up initial values for beta as the log of the mean of the ratio of counts to offsets. * This is the exact solution for the gamma distribution (which is the limit of the NB as * the dispersion goes to infinity. However, if cur_beta is not NA, then we assume it's good. """ - low_value = 10 ** -10 - low_mask = data > low_value - # nonzero = np.any(low_mask, axis=0) - if cur_beta is None: - cur_beta = np.zeros(size_factors.shape[0], dtype=float) - total_weight = np.zeros_like(cur_beta) - - cur_beta = np.sum(data / np.exp(size_factors) * weights * low_mask, axis=0) - total_weight = weights * size_factors.shape[0] - cur_beta = np.log(cur_beta / total_weight) - - # Skipping to a result for all-zero rows. - # if (!nonzero) { - # return std::make_pair(R_NegInf, true); - - # // Newton-Raphson iterations to converge to mean. - has_converged = np.zeros(data.shape[1], dtype=bool) - for _ in range(maxit): - # dl = np.zeros(data.shape[1], dtype=float) - # info = np.zeros_like(dl) - mu = np.exp(cur_beta + size_factors) - denominator = 1 + mu * dispersion - dl = np.sum((data - mu) / denominator * weights, axis=0) - info = np.sum(mu / denominator * weights, axis=0) - step = dl / info - step = np.where(has_converged, 0.0, step) - cur_beta += step - has_converged = np.abs(step) < tolerance - if np.all(has_converged): + low_mask = np.all(model.x <= low_value, axis=0).compute() + unconverged_idx = np.where(~low_mask)[0] + + iteration = 0 + weights = 1.0 + + step = np.zeros((1, model.num_features), dtype=float) + + while iteration < maxit: + loc_j = model.location_j(unconverged_idx) + scale_j = 1 / model.scale_j(unconverged_idx) + denominator = 1 + loc_j * scale_j + + dl = np.sum((model.x[:, unconverged_idx] - loc_j) / denominator * weights, axis=0).compute() + + info = np.sum(loc_j / denominator * weights, axis=0).compute() + cur_step = dl / info + step[0, unconverged_idx] = cur_step + model.theta_location = model.theta_location.compute() + step + unconverged_idx = unconverged_idx[np.abs(cur_step) >= tolerance] + if len(unconverged_idx) == 0: break + step.fill(0) + iteration += 1 else: logger.warning("Maximum iterations exceeded.") - - return cur_beta, has_converged From 8c4e96cd995d7229acc578071720f8042d15d173 Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 17:54:31 +0200 Subject: [PATCH 28/58] added imports --- batchglm/external/edgeR/external.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/batchglm/external/edgeR/external.py b/batchglm/external/edgeR/external.py index 32453c8e..3498bee2 100644 --- a/batchglm/external/edgeR/external.py +++ b/batchglm/external/edgeR/external.py @@ -1,7 +1,8 @@ from batchglm.models.base_glm import _ModelGLM -from batchglm.models.glm_nb.model import Model +from batchglm.models.glm_nb.model import Model as NBModel from batchglm.models.glm_nb.utils import init_par from batchglm.train.numpy.base_glm import BaseModelContainer, EstimatorGlm from batchglm.train.numpy.glm_nb import ModelContainer +from batchglm.utils.input import InputDataGLM # from batchglm.train.numpy.glm_nb import Estimator as NBEstimator From 56d9220337869ddde4af5a1d05bd87aedf313fa4 Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 17:56:51 +0200 Subject: [PATCH 29/58] bugfixing and cleanup --- batchglm/external/edgeR/limma/squeezeVar.py | 26 ++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/batchglm/external/edgeR/limma/squeezeVar.py b/batchglm/external/edgeR/limma/squeezeVar.py index 98a6b7da..e9c79548 100644 --- a/batchglm/external/edgeR/limma/squeezeVar.py +++ b/batchglm/external/edgeR/limma/squeezeVar.py @@ -4,44 +4,44 @@ def squeeze_var(var: np.ndarray, df: np.ndarray, covariate: np.ndarray, robust: bool, winsor_tail_p: np.ndarray): + """ + This method is a python version of limma's squeezeVar function. + """ n = len(var) - # Degenerate special cases + # Degenerate special cases if n == 1: return var, var, 0 - # When df==0, guard against missing or infinite values in var + # When df==0, guard against missing or infinite values in var if len(df) > 1: var[df == 0] = 0 - # Estimate hyperparameters + # Estimate hyperparameters if robust: - fit = fit_f_dist_robustly(var=var, df1=df, covariate=covariate, winsor_tail_p=winsor_tail_p) - df_prior = fit.df2_shrunk + var_prior, df_prior = fit_f_dist_robustly(var=var, df1=df, covariate=covariate, winsor_tail_p=winsor_tail_p) else: - fit = fit_f_dist(var, df1=df, covariate=covariate) - df_prior = fit.df2 + var_prior, df_prior = fit_f_dist(var, df1=df, covariate=covariate) if np.any(np.isnan(df_prior)): raise ValueError("Could not estimate prior df due to NaN") - # Posterior variances - var_post = _squeeze_var(var=var, df=df, var_prior=fit.scale, df_prior=df_prior) + # Posterior variances + var_post = _squeeze_var(var=var, df=df, var_prior=var_prior, df_prior=df_prior) - return df_prior, fit.scale, var_post + return df_prior, var_prior, var_post def _squeeze_var(var: np.ndarray, df: np.ndarray, var_prior: np.ndarray, df_prior: np.ndarray): """ - Squeeze posterior variances given hyperparameters + Squeeze posterior variances given hyperparameters. + This method is a python version of limma's _squeezeVar function. """ - n = len(var) isfin = np.isfinite(df_prior) if np.all(isfin): return (df * var + df_prior * var_prior) / (df + df_prior) # From here, at least some df.prior are infinite - # For infinite df_prior, return var_prior if len(var_prior) == n: var_post = var_prior From 281336ac96db7096eb79e827b89a19c0830dbb7e Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 18:00:42 +0200 Subject: [PATCH 30/58] added effects function --- batchglm/external/edgeR/limma/effects.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 batchglm/external/edgeR/limma/effects.py diff --git a/batchglm/external/edgeR/limma/effects.py b/batchglm/external/edgeR/limma/effects.py new file mode 100644 index 00000000..d1a80944 --- /dev/null +++ b/batchglm/external/edgeR/limma/effects.py @@ -0,0 +1,20 @@ +from scipy.linalg.lapack import dormqr + + +def calc_effects(qr, tau, y, trans: bool = True): + """ + This function calculates the effects as returned by R's lm.fit(design, y)$effects. + The input parameters are expected to originate from calculating a qr decomposition using + (qr, tau), r = scipy.linalg.qr(design, mode='raw'). + This function is replicating R's qr.qty(qr.result, y) using the internal C-function qr_qy_real: + https://github.com/SurajGupta/r-source/blob/a28e609e72ed7c47f6ddfbb86c85279a0750f0b7/src/modules/lapack/Lapack.c#L1206 + The function uses scipy's lapack interface to call the fortran soubroutine dormqr. + """ + cq, work, info = dormqr(side="L", trans="T" if trans else "F", a=qr, tau=tau, c=y, lwork=-1) + if info != 0: + raise RuntimeError(f"dormqr in calc_effects returned error code {info}") + + cq, work, info = dormqr(side="L", trans="T" if trans else "F", a=qr, tau=tau, c=y, lwork=work[0]) + if info != 0: + raise RuntimeError(f"dormqr in calc_effects returned error code {info}") + return cq From 291f2f21bfa4320dd5599bc1ffd32c45f8ad0cf8 Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 18:01:06 +0200 Subject: [PATCH 31/58] added module init --- batchglm/external/edgeR/limma/__init__.py | 1 + 1 file changed, 1 insertion(+) create mode 100644 batchglm/external/edgeR/limma/__init__.py diff --git a/batchglm/external/edgeR/limma/__init__.py b/batchglm/external/edgeR/limma/__init__.py new file mode 100644 index 00000000..98a3d3c4 --- /dev/null +++ b/batchglm/external/edgeR/limma/__init__.py @@ -0,0 +1 @@ +from .squeezeVar import squeeze_var From 2829929d4147c74cb575a5f428f46053054da2ec Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 18:01:38 +0200 Subject: [PATCH 32/58] added fit_f_dist function --- batchglm/external/edgeR/limma/fitFDist.py | 188 ++++++++++++++++++++++ 1 file changed, 188 insertions(+) create mode 100644 batchglm/external/edgeR/limma/fitFDist.py diff --git a/batchglm/external/edgeR/limma/fitFDist.py b/batchglm/external/edgeR/limma/fitFDist.py new file mode 100644 index 00000000..fa8fcc07 --- /dev/null +++ b/batchglm/external/edgeR/limma/fitFDist.py @@ -0,0 +1,188 @@ +import logging + +import numpy as np +import patsy +import scipy.special + +from .effects import calc_effects + +logger = logging.getLogger(__name__) + + +def fit_f_dist(x: np.ndarray, df1: np.ndarray, covariate: np.ndarray): + """ + Moment estimation of the parameters of a scaled F-distribution. + The numerator degrees of freedom is given, the scale factor and denominator df is to be estimated. + This function is a python version of limma's fitFDist function. + """ + # Check x + n = len(x) + if n == 1: + return x, 0 + + ok = np.isfinite(df1) & (df1 > 1e-15) + + # Check covariate + + if covariate is None: + spline_df = np.ones_like(x) + else: + assert len(x) == len(df1) == len(covariate), "All inputs must have the same length" + if np.any(np.isnan(covariate)): + raise ValueError("NA covariate values are not allowed.") + isfin = np.isfinite(covariate) + if not np.all(isfin): + if np.any(isfin): + min_covariate = np.min(covariate) + max_covariate = np.max(covariate) + np.clip(covariate, min_covariate, max_covariate, out=covariate) + else: + covariate = np.sign(covariate) + + # Remove missing or infinite or negative values and zero degrees of freedom + ok &= np.isfinite(x) & (x > -1e-15) + n_ok = np.sum(ok) + if n_ok == 1: + return x[ok], 0 + not_all_ok = n_ok < n + if not_all_ok: + x = x[ok] + df1 = df1[ok] + if covariate is not None: + covariate_not_ok = covariate[~ok] + covariate = covariate[ok] + + # Set df for spline trend + if covariate is not None: + spline_df = 1 + int(n_ok >= 3) + int(n_ok >= 6) + int(n_ok >= 30) + spline_df = np.min((np.min(spline_df), len(np.unique(covariate)))) + # If covariate takes only one unique value or insufficient + # observations, recall with NULL covariate + if spline_df < 2: + scale, df2 = fit_f_dist(x=x, df1=df1) + scale = np.full(n, scale) + return scale, df2 + + # Avoid exactly zero values + x = np.maximum(x, 0) + m = np.median(x) + if m == 0: + logger.warning("More than half of residual variances are exactly zero: eBayes unreliable") + m = 1 + elif np.any(x == 0): + logger.warning("Zero sample variances detected, have been offset away from zero") + x = np.maximum(x, 1e-5 * m) + + # Better to work on with log(F) + z = np.log(x) + e = z - scipy.special.digamma(df1 / 2) + np.log(df1 / 2) + + if covariate is None: + e_mean = np.mean(e) + e_var = np.sum(np.square(e - e_mean), keepdims=True) / (n_ok - 1) + else: + # formula = f"bs(x, df={spline_df}, degree=3, include_intercept=False)" + # formula = f"cr(x, df={spline_df})-1" + formula = f"cr(x, df={spline_df}) -1" + + design = patsy.dmatrix(formula, {"x": covariate}) + + loc_params, _, rank, _ = scipy.linalg.lstsq(design, e) + if not_all_ok: + design2 = patsy.build_design_matrices([design.design_info], data={"x": covariate_not_ok})[0] + + e_mean = np.zeros(n, dtype=float) + + e_mean[ok] = np.matmul(design, loc_params) + e_mean[~ok] = np.matmul(design2, loc_params) + else: + e_mean = np.matmul(design, loc_params) + + (qr, tau), r = scipy.linalg.qr(np.asarray(design), mode="raw") + effects = calc_effects(qr, tau, e) + e_var = np.mean(np.square(effects[rank:]), keepdims=True) + + # Estimate scale and df2 + e_var = e_var - np.mean(scipy.special.polygamma(x=df1 / 2, n=1)) # this is the trigamma function in R + + # return 0, e_var + e_var = np.array([0.5343055]) + if e_var > 0: + df2 = 2 * trigamma_inverse(e_var) + np.save(arr=e_mean, file="/home/mario/phd/collabs/batchglm/eman.csv") + s20 = np.exp(e_mean + scipy.special.digamma(df2 / 2) - np.log(df2 / 2)) + else: + df2 = np.array([np.inf]) + if covariate is None: + """ + Use simple pooled variance, which is MLE of the scale in this case. + Versions of limma before Jan 2017 returned the limiting + value of the evar>0 estimate, which is larger. + """ + s20 = np.mean(x) + else: + s20 = np.exp(e_mean) + return s20, df2 + + +def trigamma_inverse(x: np.ndarray): + """ + Solve trigamma(y) = x for y. Python version of limma's trigammaInverse function. + """ + # Non-numeric or zero length input + if len(x) == 0: + return 0 + + # Treat out-of-range values as special cases + omit = np.isnan(x) + if np.any(omit): + y = x + if np.any(~omit): + y[~omit] = trigamma_inverse(x[~omit]) + return y + + omit = x < 0 + if np.any(omit): + y = x + y[omit] = np.nan + logger.warning("NaNs produced") + if np.any(~omit): + y[~omit] = trigamma_inverse(x[~omit]) + return y + + omit = x > 1e7 + if np.any(omit): + y = x + y[omit] = 1 / np.sqrt(x[omit]) + if np.any(~omit): + y[~omit] = trigamma_inverse(x[~omit]) + return y + + omit = x < 1e-6 + if np.any(omit): + y = x + y[omit] = 1 / x[omit] + if np.any(~omit): + y[~omit] = trigamma_inverse(x[~omit]) + return y + """ + Newton's method + 1/trigamma(y) is convex, nearly linear and strictly > y-0.5, + so iteration to solve 1/x = 1/trigamma is monotonically convergent + """ + y = 0.5 + 1 / x + for _ in range(50): + tri = scipy.special.polygamma(x=y, n=1) # this is the trigamma function (psi^1(x)) + dif = tri * (1 - tri / x) / scipy.special.polygamma(x=y, n=2) # this is psi^2(x) + print(tri, dif) + y = y + dif + if np.max(-dif / y) < 1e-8: + break + else: + logger.warning("Iteration limit exceeded for trigammaInverse function.") + + return y + + +def fit_f_dist_robustly(var: np.ndarray, df1: np.ndarray, covariate: np.ndarray, winsor_tail_p: np.ndarray): + pass From 9b4bb0b8eded7b4a413f208907a79721286a438c Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 18:06:28 +0200 Subject: [PATCH 33/58] added adjusted profile likelihood --- batchglm/external/edgeR/adjProfileLik.py | 70 ++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 batchglm/external/edgeR/adjProfileLik.py diff --git a/batchglm/external/edgeR/adjProfileLik.py b/batchglm/external/edgeR/adjProfileLik.py new file mode 100644 index 00000000..82ed857b --- /dev/null +++ b/batchglm/external/edgeR/adjProfileLik.py @@ -0,0 +1,70 @@ +import numpy as np +import scipy + +from .estimator import NBEstimator + + +def adjusted_profile_likelihood( + estimator: NBEstimator, + adjust: bool = True, +): + """ + Featurewise Cox-Reid adjusted profile log-likelihoods for the dispersion. + dispersion can be a scalar or a featurewise vector. + Computationally, dispersion can also be a matrix, but the apl is still computed tagwise. + y is a matrix: rows are genes/tags/transcripts, columns are samples/libraries. + offset is a matrix of the same dimensions as y. + This is a numpy vectorized python version of edgeR's adjProfileLik function implemented in C++. + """ + low_value = 1e-10 + log_low_value = np.log(low_value) + + estimator.train(maxit=250, tolerance=1e-10) + model = estimator._model_container + poisson_idx = np.where(1 / model.scale < 0)[0].compute() + + if len(poisson_idx) == model.num_features: + loglik = model.x * np.log(model.location) - model.location - scipy.special.lgamma(model.x + 1) + elif len(poisson_idx) == 0: + loglik = model.ll + else: + loglik = np.zeros_like(model.x) + + poisson_x = model.x[:, poisson_idx] + poisson_loc = model.location_j(poisson_idx) + + loglik[:, poisson_idx] = poisson_x * np.log(poisson_loc) - poisson_loc - scipy.special.lgamma(poisson_x + 1) + + non_poisson_idx = np.where(model.theta_scale > 0)[0] + loglik[:, non_poisson_idx] = model.ll_j(non_poisson_idx) + + sum_loglik = np.sum(loglik, axis=0) + + if adjust: + w = -model.fim_weight_location_location + + adj = np.zeros(model.num_features) + n_loc_params = model.design_loc.shape[1] + if n_loc_params == 1: + adj = np.sum(w, axis=0) + adj = np.log(np.abs(adj)).compute() + else: + xh = model.xh_loc + xhw = np.einsum("ob,of->fob", xh, w) + fim = np.einsum("fob,oc->fbc", xhw, xh).compute() + for i in range(fim.shape[0]): + + ldu, _, info = scipy.linalg.lapack.dsytrf(lower=0, a=fim[i]) + if info < 0: + adj[i] = 0 + print(f"LDL factorization failed for feature {i}") + else: + ldu_diag = np.diag(ldu) + adj[i] = np.sum( + np.where((ldu_diag < low_value) | np.isinf(ldu_diag), log_low_value, np.log(ldu_diag)) + ) + + adj /= 2 + sum_loglik -= adj + + return sum_loglik From 8d4ac3bdb90e4f42d9d1fe66cb96dfaad8518cce Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 18:11:52 +0200 Subject: [PATCH 34/58] cleanup and integration of one_group_glm --- batchglm/external/edgeR/estimator.py | 166 ++++++++++++--------------- 1 file changed, 73 insertions(+), 93 deletions(-) diff --git a/batchglm/external/edgeR/estimator.py b/batchglm/external/edgeR/estimator.py index 8f752206..cd27e8c9 100644 --- a/batchglm/external/edgeR/estimator.py +++ b/batchglm/external/edgeR/estimator.py @@ -1,12 +1,13 @@ import sys -import time +from typing import Union import dask.array import numpy as np from scipy.linalg import cho_solve, cholesky -from .external import BaseModelContainer, EstimatorGlm, Model, ModelContainer, init_par -from .nbinomDeviance import nb_deviance +from .c_utils import nb_deviance +from .external import BaseModelContainer, EstimatorGlm, InputDataGLM, ModelContainer, NBModel, init_par +from .glm_one_group import fit_single_group, get_single_group_start from .qr_decomposition import get_levenberg_start one_millionth = 1e-6 @@ -15,7 +16,7 @@ ridiculously_low_value = 1e-100 -class LevenbergEstimator: +class Estimator: _train_loc: bool = False _train_scale: bool = False @@ -34,7 +35,63 @@ def __init__(self, model_container: BaseModelContainer, dtype: str): raise ValueError("cannot model more than one scale parameter with edgeR/numpy backend right now.") self.dtype = dtype - def train(self, maxit: int, tolerance: int = 1e-6): + """ + check which algorithm to use. We can use a shortcut algorithm if the number of unique rows in the design + matrix is equal to the number of coefficients. + """ + unique_design = np.unique(self._model_container.design_loc.compute(), axis=0) + if unique_design.shape[0] == unique_design.shape[1]: + self.fitting_algorithm = "one_way" + else: + self.fitting_algorithm = "levenberg" + + def train(self, maxit: int, tolerance: float = 1e-6): + + if self.fitting_algorithm == "one_way": + self.train_oneway(maxit=maxit, tolerance=tolerance) + elif self.fitting_algorithm == "levenberg": + self.train_levenberg(maxit=maxit, tolerance=tolerance) + else: + raise ValueError(f"Unrecognized algorithm: {self.train_levenberg}") + + def train_oneway(self, maxit: int, tolerance: float): + + model = self._model_container + unique_design, group_idx = np.unique(model.design_loc.compute(), return_inverse=True, axis=0) + n_groups = unique_design.shape[1] + + theta_location = model.theta_location.compute() # .copy() + + for i in range(n_groups): + obs_group = np.where(group_idx == i)[0] + group_model = model.model.__class__( + InputDataGLM( + data=model.x[obs_group], + design_loc=model.design_loc.compute()[np.ix_(obs_group, [i])], + design_loc_names=model.design_loc_names[[i]], + size_factors=model.size_factors[obs_group] if model.size_factors is not None else None, + design_scale=model.design_scale.compute()[np.ix_(obs_group, [0])], + design_scale_names=model.design_scale_names[[0]], + as_dask=isinstance(model.x, dask.array.core.Array), + chunk_size_cells=model.chunk_size_cells, + chunk_size_genes=model.chunk_size_genes, + ) + ) + group_model = ModelContainer( + model=group_model, + init_theta_location=get_single_group_start(group_model.x, group_model.size_factors), + init_theta_scale=model.theta_scale, + chunk_size_genes=model.chunk_size_genes, + dtype=model.theta_location.dtype, + ) + + fit_single_group(group_model, maxit=maxit, tolerance=tolerance) + theta_location[i] = group_model.theta_location.compute() + + theta_location = np.linalg.solve(unique_design, theta_location) + model.theta_location = theta_location + + def train_levenberg(self, maxit: int, tolerance: int = 1e-6): model = self._model_container max_x = np.max(model.x, axis=0).compute() @@ -63,7 +120,6 @@ def train(self, maxit: int, tolerance: int = 1e-6): all_zero_features = max_x < low_value model.theta_location[:, all_zero_features] = np.nan not_done_idx = np.where(~all_zero_features)[0] - print(not_done_idx) n_idx = len(not_done_idx) """ @@ -95,15 +151,6 @@ def train(self, maxit: int, tolerance: int = 1e-6): deviances = nb_deviance(model) - xtwx_time = 0.0 - xtwx2_time = 0.0 - cholesky_time = 0.0 - cho_time = 0.0 - dev_time = 0.0 - lambda_time = 0.0 - rest_time = 0.0 - rest2_time = 0.0 - while n_idx > 0 and iteration <= maxit: print("iteration:", iteration) """ @@ -123,62 +170,22 @@ def train(self, maxit: int, tolerance: int = 1e-6): first, you can see that we are effectively performing a multivariate Newton-Raphson procedure with 'dbeta' as the step. """ - - xtwx_start = time.time() loc = model.location_j(not_done_idx) scale = model.scale_j(not_done_idx) w = -model.fim_weight_location_location_j(not_done_idx) # shape (obs, features) - # print(w[:, 0].compute()) denom = 1 + loc / scale # shape (obs, features) deriv = (model.x[:, not_done_idx] - loc) / denom * weights # shape (obs, features) - # print((loc / denom)[:,0].compute()) - # print('deriv', deriv[:,0].compute()) - # print('denom', denom[:,0].compute()) - xh = model.xh_loc - if isinstance(xh, dask.array.core.Array): - xh = xh.compute() - xhw = np.einsum("ob,of->fob", xh, w) - fim[not_done_idx] = np.einsum("fob,oc->fbc", xhw, xh).compute() - xtwx_time += time.time() - xtwx_start - - xtwx2_start = time.time() - # print('fim', fim[not_done_idx[0]]) - """ - for (int lib=0; libfob", xh, w) + fim[not_done_idx] = np.einsum("fob,oc->fbc", xhw, xh) # .compute() fim_diags = np.einsum("...ii->...i", fim[not_done_idx]) # shape (features x constrained_coefs) - # print('shaped', deriv.shape) - # print(model.design_loc) - dl[not_done_idx] = np.einsum( - "of,oc->fc", deriv, model.design_loc - ).compute() # shape (features, constrained_coefs) - # print(dl[not_done_idx]) + dl[not_done_idx] = np.einsum("of,oc->fc", deriv, model.design_loc) max_infos[not_done_idx] = np.max(fim_diags, axis=1) # shape (features,) - # print(max_infos[0]) - # print(nb_deviance(model, [0])) - - """ - const double* dcopy=design; - auto xtwxIt=xtwx.begin(); - for (int coef=0; coefmax_info) { max_info=cur_val; } - } - """ if iteration == 1: lambdas = np.maximum(max_infos * one_millionth, supremely_low_value) @@ -195,19 +202,16 @@ def train(self, maxit: int, tolerance: int = 1e-6): failed_in_levenberg_loop[not_done_idx] = False f = 0 overall_steps.fill(0) - xtwx2_time += time.time() - xtwx2_start while n_inner_idx > 0: f += 1 levenberg_steps[inner_idx_update] += 1 cholesky_failed_idx = inner_idx_update.copy() # cholesky_failed = np.ones(n_inner_idx, dtype=bool) - # print('choleksy_failed', cholesky_failed) np.copyto(fim_copy, fim) m = 0 while len(cholesky_failed_idx) > 0: m += 1 - print("cholesky_loop:", m) cholesky_failed = np.zeros(len(cholesky_failed_idx), dtype=bool) """ We need to set up copies as the decomposition routine overwrites the originals, and @@ -215,20 +219,16 @@ def train(self, maxit: int, tolerance: int = 1e-6): refer to the upper triangular for the XtWX copy (as it should be symmetrical). We also add 'lambda' to the diagonals. This reduces the step size as the second derivative is increased. """ - lambda_start = time.time() + lambda_diags = np.einsum( "ab,bc->abc", np.repeat(lambdas[cholesky_failed_idx], n_parm).reshape(len(cholesky_failed_idx), n_parm), np.eye(n_parm), ) - lambda_time += time.time() - lambda_start - # print('lambda_diags', lambda_diags[0]); fim_copy[cholesky_failed_idx] = fim[cholesky_failed_idx] + lambda_diags - # print(fim_copy[not_done_idx]); for i, idx in enumerate(cholesky_failed_idx): - cholesky_start = time.time() try: """ Overwriting FIM with cholesky factorization using scipy.linalg.cholesky. @@ -259,13 +259,11 @@ def train(self, maxit: int, tolerance: int = 1e-6): lambdas[idx] = ridiculously_low_value cholesky_failed[i] = True - cholesky_time += time.time() - cholesky_start cholesky_failed_idx = cholesky_failed_idx[cholesky_failed] steps.fill(0) for i in inner_idx_update: - cho_start = time.time() """ Calculating the step by solving fim_copy * step = dl using scipy.linalg.cho_solve. This is equivalent to LAPACK's dpotrs function (wrapper is scipy.linalg.lapack.dpotrs) @@ -277,13 +275,6 @@ def train(self, maxit: int, tolerance: int = 1e-6): overall_steps[:, i] = step steps[:, i] = step - cho_time += time.time() - cho_start - - # print('fim_copy', fim_copy[i]) - # print('dl', dl[i]) - - # print(steps[:, 0]) - # Updating loc params. model.theta_location += steps @@ -295,24 +286,16 @@ def train(self, maxit: int, tolerance: int = 1e-6): lambda up so we want to retake the step from where we were before). This is why we don't modify the values in-place until we're sure we want to take the step. """ - dev_start = time.time() dev_new = nb_deviance(model, inner_idx_update) # TODO ### make this a property of model - dev_time += time.time() - dev_start - - # print(dev_new[0]) - - rest_start = time.time() low_deviances[inner_idx_update] = (dev_new / max_x[inner_idx_update]) < supremely_low_value good_updates = (dev_new <= deviances[inner_idx_update]) | low_deviances[inner_idx_update] idx_bad_step = inner_idx_update[~good_updates] - rest_time = time.time() - rest_start + # Reverse update by feature if update leads to worse loss: - rest2_start = time.time() theta_location_new = model.theta_location.compute() - rest2_time += time.time() - rest2_start theta_location_new[:, idx_bad_step] = theta_location_new[:, idx_bad_step] - steps[:, idx_bad_step] model.theta_location = theta_location_new @@ -336,8 +319,6 @@ def train(self, maxit: int, tolerance: int = 1e-6): n_inner_idx = len(inner_idx_update) - # print('n_inner', inner_idx_update) - """ Terminating if we failed, if divergence from the exact solution is acceptably low (cross-product of dbeta with the log-likelihood derivative) or if the actual deviance @@ -358,15 +339,17 @@ def train(self, maxit: int, tolerance: int = 1e-6): lambdas[levenberg_steps == 1] /= 10 iteration += 1 - # print('..............................', model.theta_location[:,not_done_idx].compute()) - return xtwx_time, xtwx2_time, lambda_time, cholesky_time, cho_time, dev_time, rest_time, rest2_time + def reset_theta_scale(self, new_scale: Union[np.ndarray, dask.array.core.Array, float]): + if isinstance(new_scale, float): + new_scale = np.full(self._model_container.theta_scale.shape, new_scale) + self._model_container.theta_scale = new_scale -class NBEstimator(LevenbergEstimator): +class NBEstimator(Estimator): def __init__( self, - model: Model, + model: NBModel, dispersion: float, dtype: str = "float64", ): @@ -381,7 +364,6 @@ def __init__( init_theta_scale = np.full((1, model.num_features), np.log(1 / dispersion)) self._train_loc = True self._train_scale = False # This is fixed as edgeR doesn't fit the scale parameter - _model_container = ModelContainer( model=model, init_theta_location=init_theta_location, @@ -390,5 +372,3 @@ def __init__( dtype=dtype, ) super(NBEstimator, self).__init__(model_container=_model_container, dtype=dtype) - - # def init_par(model, init_location): From 0e78480e10b8150a42be4f3d58a4d4f5b8875c90 Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 18:15:23 +0200 Subject: [PATCH 35/58] added prior degrees of freedom --- batchglm/external/edgeR/prior_df.py | 34 +++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 batchglm/external/edgeR/prior_df.py diff --git a/batchglm/external/edgeR/prior_df.py b/batchglm/external/edgeR/prior_df.py new file mode 100644 index 00000000..39040213 --- /dev/null +++ b/batchglm/external/edgeR/prior_df.py @@ -0,0 +1,34 @@ +import numpy as np + +from .c_utils import nb_deviance +from .estimator import NBEstimator +from .external import BaseModelContainer +from .limma import squeeze_var +from .residDF import resid_df + + +def calculate_prior_df( + model: BaseModelContainer, + avg_log_cpm: np.ndarray, + robust: bool, + winsor_tail_p: np.ndarray, + dispersion: np.ndarray, + tolerance: float = 1e-10, +): + """ + Calculates prior degrees of freedom. This is a wrapper function around limma's squeezeVar. + This is a python version of edgeR's priorDF function. + """ + estimator = NBEstimator(model, dispersion=dispersion) + estimator.train(maxit=250, tolerance=tolerance) + + zerofit = ((model.x < 1e-4) & (np.nan_to_num(model.location) < 1e-4)).compute() # shape (obs, features) + df_residual = resid_df(zerofit, model.design_loc) + + # Empirical Bayes squeezing of the quasi-likelihood variance factors + s2 = nb_deviance(model) / df_residual + s2[df_residual == 0] = 0.0 # s2[df.residual==0] <- 0 + s2 = np.maximum(s2, 0) # s2 <- pmax(s2,0) + + df_prior, _, _ = squeeze_var(s2, df=df_residual, covariate=avg_log_cpm, robust=robust, winsor_tail_p=winsor_tail_p) + return df_prior From dc4ad4ceae590de97dd621116ee77a41d89eda54 Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 18:21:49 +0200 Subject: [PATCH 36/58] added basic estimateDisp function --- batchglm/external/edgeR/estimateDisp.py | 202 ++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 batchglm/external/edgeR/estimateDisp.py diff --git a/batchglm/external/edgeR/estimateDisp.py b/batchglm/external/edgeR/estimateDisp.py new file mode 100644 index 00000000..4cd64ab2 --- /dev/null +++ b/batchglm/external/edgeR/estimateDisp.py @@ -0,0 +1,202 @@ +import numpy as np +from scipy.linalg import qr + +from .adjProfileLik import adjusted_profile_likelihood +from .aveLogCPM import calculate_avg_log_cpm +from .estimator import NBEstimator +from .external import InputDataGLM, NBModel +from .maximizeInterpolant import maximize_interpolant +from .prior_df import calculate_prior_df +from .residDF import combo_groups +from .wleb import wleb + + +def estimate_disp( + model: NBModel, + group=None, # + prior_df=None, # TODO + trend_method="loess", + tagwise: bool = True, # TODO + span=None, # TODO + min_rowsum: int = 5, # TODO + grid_length: int = 21, # TODO + grid_range: tuple = (-10, 10), # TODO + robust: bool = False, # TODO + winsor_tail_p: tuple = (0.05, 0.1), # TODO + tol: float = 1e-6, # TODO + weights=None, # TODO + adjust: bool = True, + *args, + **kwargs, +): + + """ + Implements edgeR's estimateDisp function. + + :param y: np.ndarray of counts + :param design: design_loc + :param prior_df: prior degrees of freedom. It is used in calculating prior.n.????? + :param trend_method: method for estimating dispersion trend. Possible values are + "none" and "loess" (default). + :param mixed_df: logical, only used when trend.method="locfit". + If FALSE, locfit uses a polynomial of degree 0. + If TRUE, locfit uses a polynomial of degree 1 for lowly expressed genes. + Care is taken to smooth the curve. This argument is ignored since locfit isn't implemented. + :param tagwise: logical, should the tagwise dispersions be estimated? + :param span: width of the smoothing window, as a proportion of the data set. + :param min_rowsum: numeric scalar giving a value for the filtering out of low abundance tags. + Only tags with total sum of counts above this value are used. + Low abundance tags can adversely affect the dispersion estimation, + so this argument allows the user to select an appropriate filter threshold for the tag abundance. + :param grid_length: the number of points on which the interpolation is applied for each tag. + :param grid_range: the range of the grid points around the trend on a log2 scale. + :param robust: logical, should the estimation of prior.df be robustified against outliers? + :param winsor_tail_p: numeric vector of length 1 or 2, giving left and right tail proportions + of the deviances to Winsorize when estimating prior.df. + :param tol: the desired accuracy, passed to optimize + :param group: vector or factor giving the experimental group/condition for each library. + :param libsize: numeric vector giving the total count (sequence depth) for each library. + :param offset: offset matrix for the log-linear model, as for glmFit. + Defaults to the log-effective library sizes. + :param weights: optional numeric matrix giving observation weights + """ + + # Spline points + spline_pts = np.linspace(start=grid_range[0], stop=grid_range[1], num=grid_length) + spline_disp = 0.1 * 2 ** spline_pts + l0 = np.zeros((model.num_features, grid_length)) + + # Identify which observations have means of zero (weights aren't needed here). + print("Performing initial fit...", end="") + estimator = NBEstimator(model, dispersion=0.05) + estimator.train(maxit=250, tolerance=tol) + + zerofit = ((model.x < 1e-4) & (np.nan_to_num(model.location) < 1e-4)).compute() # shape (obs, features) + + groups = combo_groups(zerofit) + coefs = list() + print("DONE.") + print("Calculating adjusted profile likelihoods in subgroups...", end="") + for subgroup in groups: + not_zero_obs_in_group = ~zerofit[:, subgroup[0]] + if not np.any(not_zero_obs_in_group): + continue + if np.all(not_zero_obs_in_group): + design_new = model.design_loc + new_dloc_names = model.design_loc_names + else: + design_new = model.design_loc[not_zero_obs_in_group] + _, _, pivot = qr(design_new, mode="raw", pivoting=True) + coefs_new = np.array( + pivot[: np.linalg.matrix_rank(design_new.compute())] + ) # explicitly make this array to keep dimension info + if len(coefs_new) == design_new.shape[0]: + continue + design_new = design_new[:, coefs_new] + new_dloc_names = model.design_loc_names[coefs_new] + input_data = InputDataGLM( + data=model.x.compute()[np.ix_(not_zero_obs_in_group, subgroup)], + design_loc=design_new, + design_loc_names=new_dloc_names, + size_factors=model.size_factors[:, not_zero_obs_in_group] if model.size_factors is not None else None, + design_scale=model.design_scale[not_zero_obs_in_group], + design_scale_names=np.array(["Intercept"]), + as_dask=True, + chunk_size_cells=1000000, + chunk_size_genes=1000000, + ) + group_model = NBModel(input_data) + estimator = NBEstimator(group_model, dispersion=0.05) + for i in range(len(spline_disp)): + estimator.reset_theta_scale(np.log(1 / spline_disp[i])) + l0[subgroup, i] = adjusted_profile_likelihood(estimator, adjust=adjust) + + coefs.append(estimator._model_container.theta_location.compute()) + + print("DONE.") + + # Calculate common dispersion + overall = maximize_interpolant(spline_pts, l0.sum(axis=0, keepdims=True)) # (1, spline_pts) + common_dispersion = 0.1 * 2 ** overall + + print(f"Common dispersion is {common_dispersion}.") + + # Allow dispersion trend? + if trend_method is not None: + print("Calculating trended dispersion...", flush=True) + avg_log_cpm = calculate_avg_log_cpm( + model.x.copy(), model_class=model.__class__, dispersion=common_dispersion[0], weights=weights + ) + span, _, m0, trend, _ = wleb( + theta=spline_pts, + loglik=l0, + covariate=avg_log_cpm[0], + trend_method=trend_method, + span=span, + overall=False, + individual=False, + ) + disp_trend = 0.1 * 2 ** trend + trended_dispersion = np.full(model.num_features, disp_trend[np.argmin(avg_log_cpm)]) + trended_dispersion = disp_trend + print("DONE.") + else: + avg_log_cpm = None + m0 = np.broadcast_to(l0.mean(axis=0), shape=(model.x.shape[1], len(spline_pts))) + disp_trend = common_dispersion + trended_dispersion = None + + # Are tagwise dispersions required? + if not tagwise: + return common_dispersion, trended_dispersion + + # Calculate prior.df + print("Calculating featurewise dispersion...") + if prior_df is None: # + prior_df = calculate_prior_df( + model, avg_log_cpm[0].compute(), robust=robust, winsor_tail_p=winsor_tail_p, dispersion=disp_trend + ) + print("DONE.") + n_loc_params = model.design_loc.shape[1] + prior_n = prior_df / (model.num_observations - n_loc_params) + + print("Calculating featurewise dispersion...") + # Initiate featurewise dispersions + if trend_method is not None: + featurewise_dispersion = trended_dispersion.copy() + else: + featurewise_dispersion = np.full(model.num_features, common_dispersion) + + # Checking if the shrinkage is near-infinite. + too_large = prior_n > 1e6 + if not np.all(too_large): + temp_n = prior_n + if np.any(too_large): + temp_n[too_large] = 1e6 + + # Estimating tagwise dispersions + _, _, _, _, out_individual = wleb( + theta=spline_pts, + loglik=l0, + prior_n=temp_n, + covariate=avg_log_cpm, + trend_method=trend_method, + span=span, + overall=False, + trend=False, + m0=m0, + ) + if not robust or len(too_large) == 1: + featurewise_dispersion = 0.1 * 2 ** out_individual + else: + featurewise_dispersion[~too_large] = 0.1 * 2 ** out_individual[~too_large] + print("DONE.") + if robust: + temp_df = prior_df + temp_n = prior_n + prior_df = np.full(model.num_features, np.inf) + prior_n = np.full(model.num_features, np.inf) + prior_df = temp_df + prior_n = temp_n + + return common_dispersion, trended_dispersion, featurewise_dispersion, span, prior_df, prior_n From f2e08313185a1cf48cc8884551fe6506106f8b4d Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 6 Aug 2022 18:28:04 +0200 Subject: [PATCH 37/58] added c_utility functions --- batchglm/external/edgeR/c_utils.cpp | 247 ++++++++++++++++++++++++++++ 1 file changed, 247 insertions(+) create mode 100644 batchglm/external/edgeR/c_utils.cpp diff --git a/batchglm/external/edgeR/c_utils.cpp b/batchglm/external/edgeR/c_utils.cpp new file mode 100644 index 00000000..39807033 --- /dev/null +++ b/batchglm/external/edgeR/c_utils.cpp @@ -0,0 +1,247 @@ +#include +#include +#include + + +const double one_tenthousandth=std::pow(10, -4.0); +const double mildly_low_value=std::pow(10, -8.0); +const double one_million=std::pow(10, 6); + +/* All functions are taken from the C++ backend of edgeR. + * The R wrappers were replaced with according numpy / python C-API. + */ + +double compute_unit_nb_deviance (double y, double mu, double phi) { + y+=mildly_low_value; + mu+=mildly_low_value; + + /* Calculating the deviance using either the Poisson (small phi*mu), the Gamma (large) or NB (everything else). + * Some additional work is put in to make the transitions between families smooth. + */ + if (phi < one_tenthousandth) { + const double resid = y - mu; + return 2 * ( y * std::log(y/mu) - resid - 0.5*resid*resid*phi*(1+phi*(2/3*resid-y)) ); + } else { + const double product=mu*phi; + if (product > one_million) { + return 2 * ( (y - mu)/mu - std::log(y/mu) ) * mu/(1+product); + } else { + const double invphi=1/phi; + return 2 * (y * std::log( y/mu ) + (y + invphi) * std::log( (mu + invphi)/(y + invphi) ) ); + } + } +}; + + +static PyObject *loess_by_col(PyObject *self, PyObject *args) { + + const double low_value = std::pow(10.0, -10.0); + PyArrayObject *x, *y; + int span; + PyArg_ParseTuple(args, "OOi", &x, &y, &span); + if (PyErr_Occurred()) { + return NULL; + } + if (!(PyArray_Check(x)) && !(PyArray_Check(y))) { + PyErr_SetString(PyExc_TypeError, "First two arguments must be numpy arrays."); + return NULL; + } + int total = PyArray_SIZE(x); + npy_intp *x_dims = PyArray_DIMS(x); + npy_intp *y_dims = PyArray_DIMS(y); + + int ncols = y_dims[1]; + + double *x_ptr; + double **y_ptrs; + PyArray_AsCArray((PyObject **)&x, &x_ptr, x_dims, 1, PyArray_DescrFromType(NPY_DOUBLE)); + if (PyErr_Occurred()){ + return NULL; + } + PyArray_AsCArray((PyObject **)&y, &y_ptrs, y_dims, 2, PyArray_DescrFromType(NPY_DOUBLE)); + if (PyErr_Occurred()){ + return NULL; + } + + if (span > total) { + PyErr_SetString(PyExc_RuntimeError, "Span must be smaller than the total number of points."); + return NULL; + } + double w_ptr[total]; + double f_ptrs[y_dims[0]][y_dims[1]]; + + try { + int frame_end=span-1; + std::cout << frame_end << '\n'; + for (int cur_p=0; cur_pframe_end) { frame_end=cur_p; } + const double& cur_point=x_ptr[cur_p]; + double back_dist=cur_point-x_ptr[frame_end-span+1], front_dist=x_ptr[frame_end]-cur_point, + max_dist=(back_dist > front_dist ? back_dist : front_dist); + + while (frame_end < total-1 && cur_p+span-1>frame_end) { + /* Every time we advance, we twiddle with the ends of the frame to see if we can't get + * a better fit. The frame will always advance in the forward direction. This is because the + * current frame is optimal with respect to the previous tag. If the previous maximal distance + * was at the back, shifting the frame backward will increase the back distance with respect to + * the current tag (and thus increase the maximal distance). + * + * If the previous maximal distance was at the front, shifting the frame backward may + * decrease the front distance with respect to the current tag. However, we note that + * because of optimality, having a previous maximal distance at the front must mean + * that a back-shifted frame will result in an even larger previous maximal distance at + * the back (otherwise the optimal frame would be located further back to start with). In + * short, shifting the frame backwards will flip the maximal distance to that of the back + * distance which is even larger than the non-shifted forward distance. + * + * Thus, the frame can only go forwards. Note that below, the frame is defined by + * the 'end' position which notes the end point of the current frame. The start + * point is inherently defined by revolving around the minimum point. + */ + back_dist=cur_point-x_ptr[frame_end-span+2]; + front_dist=x_ptr[frame_end+1]-cur_point; + const double& next_max=(back_dist > front_dist ? back_dist : front_dist); + /* This bit provides some protection against near-equal values, by forcing the frame + * forward provided that the difference between the lowest maximum distance and + * the maximum distance at any other frame is less than a low_value. This ensures + * that values following a stretch of identical x-coordinates are accessible + * to the algorithm (rather than being blocked off by inequalities introduced by + * double imprecision). + */ + const double diff=(next_max-max_dist)/max_dist; + if (diff > low_value) { + break; + } else if (diff < 0) { + max_dist=next_max; + } + ++frame_end; + } + /* Now that we've located our optimal window, we can calculate the weighted average + * across the points in the window (weighted according to distance from the current point). + * and we can calculate the leverages. Unfortunately, we have to loop over the points in the + * window because each weight must be recomputed according to its new distance and new maximal + * distance. + */ + double total_weight=0; + double& out_leverage=(w_ptr[cur_p]=-1); + for (int i=0; i B - A. The algorithm above will move the + * frame to [1,3] when calculating the maximum distance for B. This is the same as [0, 2] in terms + * of distance, but only using the frame components to calculate the mean will miss out on element 0. + * So, the computation should work from [0, 3]. There's no need to worry about the extra 'C' as it + * will have weight zero. + */ + for (int m=frame_end; m>=0; --m) { + const double rel_dist=(max_dist > low_value ? std::abs(x_ptr[m]-cur_point)/max_dist : 0); + const double weight=std::pow(1-std::pow(rel_dist, 3.0), 3.0); + if (weight < 0) { continue; } + total_weight+=weight; + + for (int i=0; i Date: Sun, 7 Aug 2022 21:07:25 +0200 Subject: [PATCH 38/58] cleanup --- batchglm/external/edgeR/aveLogCPM.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/batchglm/external/edgeR/aveLogCPM.py b/batchglm/external/edgeR/aveLogCPM.py index 4e580709..5954bbb7 100644 --- a/batchglm/external/edgeR/aveLogCPM.py +++ b/batchglm/external/edgeR/aveLogCPM.py @@ -3,13 +3,12 @@ import dask.array import numpy as np -from .external import InputDataGLM, ModelContainer +from .external import InputDataGLM, ModelContainer, NBModel from .glm_one_group import fit_single_group, get_single_group_start def calculate_avg_log_cpm( x: np.ndarray, - model_class, size_factors: Optional[np.ndarray] = None, dispersion: Union[np.ndarray, float] = 0.05, prior_count: int = 2, @@ -43,8 +42,8 @@ def calculate_avg_log_cpm( size_factors = np.full((x.shape[0], 1), np.log(1.0)) adjusted_prior, adjusted_size_factors = add_priors(prior_count, size_factors) - x += adjusted_prior - avg_cpm_model = model_class( + x = x + adjusted_prior + avg_cpm_model = NBModel( InputDataGLM( data=x, design_loc=np.ones((x.shape[0], 1)), From dd7c053018b812be99e60238e390efe1821572b1 Mon Sep 17 00:00:00 2001 From: picciama Date: Sun, 7 Aug 2022 21:07:59 +0200 Subject: [PATCH 39/58] added support for summed nb_deviance --- batchglm/external/edgeR/c_utils.cpp | 40 +++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/batchglm/external/edgeR/c_utils.cpp b/batchglm/external/edgeR/c_utils.cpp index 39807033..97c2eae0 100644 --- a/batchglm/external/edgeR/c_utils.cpp +++ b/batchglm/external/edgeR/c_utils.cpp @@ -180,7 +180,8 @@ static PyObject *loess_by_col(PyObject *self, PyObject *args) { static PyObject *nb_deviance(PyObject *self, PyObject *args) { PyArrayObject *x, *loc, *scale; - PyArg_ParseTuple(args, "OOO", &x, &loc, &scale); + bool *sum; + PyArg_ParseTuple(args, "OOOp", &x, &loc, &scale, &sum); if (PyErr_Occurred()) { return NULL; } @@ -210,16 +211,35 @@ static PyObject *nb_deviance(PyObject *self, PyObject *args) { double *loc_ptr = &loc_data[0][0]; double *scale_ptr = &scale_data[0][0]; - - PyObject *result = PyArray_SimpleNew(2, dims, NPY_DOUBLE); - double *result_data = (double*) PyArray_DATA((PyArrayObject *) result); - - for(int i=0; i Date: Sun, 7 Aug 2022 21:08:29 +0200 Subject: [PATCH 40/58] bugfix: leverages not correctly returned --- batchglm/external/edgeR/wleb.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/batchglm/external/edgeR/wleb.py b/batchglm/external/edgeR/wleb.py index 39547c40..4bcec901 100644 --- a/batchglm/external/edgeR/wleb.py +++ b/batchglm/external/edgeR/wleb.py @@ -102,8 +102,8 @@ def loess(y: np.ndarray, x: np.ndarray, span: float): leverages = np.arange(n_features) return y_smooth, leverages - y_smooth = loess_by_col(x, y, n_span) + y_smooth, leverages_smooth = loess_by_col(x, y, n_span) y_smooth[order] = y_smooth.copy() - leverages[order] = leverages.copy() + leverages_smooth[order] = leverages_smooth.copy() - return y_smooth, leverages + return y_smooth, leverages_smooth From 6abe5c9f02281e8e4137fe166c6c7694279d9367 Mon Sep 17 00:00:00 2001 From: picciama Date: Sun, 7 Aug 2022 21:12:01 +0200 Subject: [PATCH 41/58] removed print statement --- batchglm/external/edgeR/limma/fitFDist.py | 1 - 1 file changed, 1 deletion(-) diff --git a/batchglm/external/edgeR/limma/fitFDist.py b/batchglm/external/edgeR/limma/fitFDist.py index fa8fcc07..a8414fcc 100644 --- a/batchglm/external/edgeR/limma/fitFDist.py +++ b/batchglm/external/edgeR/limma/fitFDist.py @@ -174,7 +174,6 @@ def trigamma_inverse(x: np.ndarray): for _ in range(50): tri = scipy.special.polygamma(x=y, n=1) # this is the trigamma function (psi^1(x)) dif = tri * (1 - tri / x) / scipy.special.polygamma(x=y, n=2) # this is psi^2(x) - print(tri, dif) y = y + dif if np.max(-dif / y) < 1e-8: break From 3d2e1f37442d4ce5e0ad1d67b39ace9a5daf11bd Mon Sep 17 00:00:00 2001 From: picciama Date: Sun, 7 Aug 2022 21:14:03 +0200 Subject: [PATCH 42/58] ignore irrelevant log division errors --- batchglm/external/edgeR/calcNormFactors.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/batchglm/external/edgeR/calcNormFactors.py b/batchglm/external/edgeR/calcNormFactors.py index f4becc6e..fbed536d 100644 --- a/batchglm/external/edgeR/calcNormFactors.py +++ b/batchglm/external/edgeR/calcNormFactors.py @@ -63,10 +63,12 @@ def _calc_factor_tmm( sample_sums = np.sum(data, axis=1, keepdims=True) sum_normalized_data = data / sample_sums - log_ratios = np.log2(sum_normalized_data / sum_normalized_data[ref_idx]) - absolute_values = (np.log2(sum_normalized_data) + np.log2(sum_normalized_data[ref_idx])) / 2 - estimated_asymptotic_variance = (sample_sums - data) / sample_sums / data - estimated_asymptotic_variance += (sample_sums[ref_idx] - data[ref_idx]) / sample_sums[ref_idx] / data[ref_idx] + with np.errstate(divide="ignore", invalid="ignore"): + opfer = sum_normalized_data / sum_normalized_data[ref_idx] + log_ratios = np.log2(opfer) + absolute_values = (np.log2(sum_normalized_data) + np.log2(sum_normalized_data[ref_idx])) / 2 + estimated_asymptotic_variance = (sample_sums - data) / sample_sums / data + estimated_asymptotic_variance += (sample_sums[ref_idx] - data[ref_idx]) / sample_sums[ref_idx] / data[ref_idx] # remove infinite values, cutoff based on aCutOff finite_idx = np.isfinite(log_ratios) & np.isfinite(absolute_values) & (absolute_values > a_cutoff) From 9595dd9e37170461cb5f3ab09e1bc88835d81c34 Mon Sep 17 00:00:00 2001 From: picciama Date: Sun, 7 Aug 2022 21:17:38 +0200 Subject: [PATCH 43/58] delayed levenberg theta_loc init + dask support --- batchglm/external/edgeR/estimator.py | 45 +++++++++++++++++++++------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/batchglm/external/edgeR/estimator.py b/batchglm/external/edgeR/estimator.py index cd27e8c9..62cca5cd 100644 --- a/batchglm/external/edgeR/estimator.py +++ b/batchglm/external/edgeR/estimator.py @@ -39,11 +39,18 @@ def __init__(self, model_container: BaseModelContainer, dtype: str): check which algorithm to use. We can use a shortcut algorithm if the number of unique rows in the design matrix is equal to the number of coefficients. """ - unique_design = np.unique(self._model_container.design_loc.compute(), axis=0) + if isinstance(self._model_container.design_loc, dask.array.core.Array): + unique_design = np.unique(self._model_container.design_loc.compute(), axis=0) + else: + unique_design = np.unique(self._model_container.design_loc, axis=0) + if unique_design.shape[0] == unique_design.shape[1]: self.fitting_algorithm = "one_way" else: self.fitting_algorithm = "levenberg" + self._model_container.theta_location = get_levenberg_start( + model=self._model_container.model, disp=self._model_container.scale, use_null=True + ) def train(self, maxit: int, tolerance: float = 1e-6): @@ -55,22 +62,38 @@ def train(self, maxit: int, tolerance: float = 1e-6): raise ValueError(f"Unrecognized algorithm: {self.train_levenberg}") def train_oneway(self, maxit: int, tolerance: float): - model = self._model_container - unique_design, group_idx = np.unique(model.design_loc.compute(), return_inverse=True, axis=0) + if isinstance(model.design_loc, dask.array.core.Array): + unique_design, group_idx = np.unique(model.design_loc.compute(), return_inverse=True, axis=0) + else: + unique_design, group_idx = np.unique(model.design_loc, return_inverse=True, axis=0) + n_groups = unique_design.shape[1] - theta_location = model.theta_location.compute() # .copy() + theta_location = model.theta_location + if isinstance(theta_location, dask.array.core.Array): + theta_location = theta_location.compute() # .copy() for i in range(n_groups): obs_group = np.where(group_idx == i)[0] + dloc = model.design_loc + if isinstance(model.design_loc, dask.array.core.Array): + dloc = dloc.compute() + sf = model.size_factors + if sf is not None: + sf = sf[obs_group] + if isinstance(model.size_factors, dask.array.core.Array): + sf = sf.compute() + dscale = model.design_scale + if isinstance(model.design_loc, dask.array.core.Array): + dscale = dscale.compute() group_model = model.model.__class__( InputDataGLM( data=model.x[obs_group], - design_loc=model.design_loc.compute()[np.ix_(obs_group, [i])], + design_loc=dloc[np.ix_(obs_group, [i])], design_loc_names=model.design_loc_names[[i]], - size_factors=model.size_factors[obs_group] if model.size_factors is not None else None, - design_scale=model.design_scale.compute()[np.ix_(obs_group, [0])], + size_factors=sf, + design_scale=dscale[np.ix_(obs_group, [0])], design_scale_names=model.design_scale_names[[0]], as_dask=isinstance(model.x, dask.array.core.Array), chunk_size_cells=model.chunk_size_cells, @@ -84,9 +107,11 @@ def train_oneway(self, maxit: int, tolerance: float): chunk_size_genes=model.chunk_size_genes, dtype=model.theta_location.dtype, ) - fit_single_group(group_model, maxit=maxit, tolerance=tolerance) - theta_location[i] = group_model.theta_location.compute() + if isinstance(group_model.theta_location, dask.array.core.Array): + theta_location[i] = group_model.theta_location.compute() + else: + theta_location[i] = group_model.theta_location theta_location = np.linalg.solve(unique_design, theta_location) model.theta_location = theta_location @@ -360,7 +385,7 @@ def __init__( :param dispersion: The fixed dispersion parameter to use during fitting the loc model. :param dtype: Numerical precision. """ - init_theta_location = get_levenberg_start(model=model, disp=dispersion, use_null=True) + init_theta_location = np.zeros((model.xh_loc.shape[1], model.num_features), dtype=model.cast_dtype) init_theta_scale = np.full((1, model.num_features), np.log(1 / dispersion)) self._train_loc = True self._train_scale = False # This is fixed as edgeR doesn't fit the scale parameter From ca7e50dda3edd412acfa169b1c11ffa2af78267e Mon Sep 17 00:00:00 2001 From: picciama Date: Sun, 7 Aug 2022 21:34:35 +0200 Subject: [PATCH 44/58] added sizefactor and dask/nodask support --- batchglm/external/edgeR/adjProfileLik.py | 13 ++- batchglm/external/edgeR/estimateDisp.py | 116 ++++++++++++++++------- batchglm/external/edgeR/glm_one_group.py | 25 +++-- batchglm/external/edgeR/prior_df.py | 27 +++++- batchglm/external/edgeR/residDF.py | 2 +- 5 files changed, 133 insertions(+), 50 deletions(-) diff --git a/batchglm/external/edgeR/adjProfileLik.py b/batchglm/external/edgeR/adjProfileLik.py index 82ed857b..d06f3d12 100644 --- a/batchglm/external/edgeR/adjProfileLik.py +++ b/batchglm/external/edgeR/adjProfileLik.py @@ -1,3 +1,4 @@ +import dask.array import numpy as np import scipy @@ -21,7 +22,9 @@ def adjusted_profile_likelihood( estimator.train(maxit=250, tolerance=1e-10) model = estimator._model_container - poisson_idx = np.where(1 / model.scale < 0)[0].compute() + poisson_idx = np.where(1 / model.scale < 0)[0] + if isinstance(poisson_idx, dask.array.core.Array): + poisson_idx = poisson_idx.compute() if len(poisson_idx) == model.num_features: loglik = model.x * np.log(model.location) - model.location - scipy.special.lgamma(model.x + 1) @@ -47,11 +50,15 @@ def adjusted_profile_likelihood( n_loc_params = model.design_loc.shape[1] if n_loc_params == 1: adj = np.sum(w, axis=0) - adj = np.log(np.abs(adj)).compute() + adj = np.log(np.abs(adj)) + if isinstance(adj, dask.array.core.Array): + adj = adj.compute() else: xh = model.xh_loc xhw = np.einsum("ob,of->fob", xh, w) - fim = np.einsum("fob,oc->fbc", xhw, xh).compute() + fim = np.einsum("fob,oc->fbc", xhw, xh) + if isinstance(fim, dask.array.core.Array): + fim = fim.compute() for i in range(fim.shape[0]): ldu, _, info = scipy.linalg.lapack.dsytrf(lower=0, a=fim[i]) diff --git a/batchglm/external/edgeR/estimateDisp.py b/batchglm/external/edgeR/estimateDisp.py index 4cd64ab2..37342548 100644 --- a/batchglm/external/edgeR/estimateDisp.py +++ b/batchglm/external/edgeR/estimateDisp.py @@ -1,3 +1,6 @@ +from typing import Optional + +import dask.array import numpy as np from scipy.linalg import qr @@ -12,7 +15,11 @@ def estimate_disp( - model: NBModel, + model: Optional[NBModel] = None, + x: Optional[np.ndarray] = None, + design: Optional[np.ndarray] = None, + design_loc_names: Optional[np.ndarray] = None, + norm_factors: Optional[np.ndarray] = None, group=None, # prior_df=None, # TODO trend_method="loess", @@ -26,8 +33,7 @@ def estimate_disp( tol: float = 1e-6, # TODO weights=None, # TODO adjust: bool = True, - *args, - **kwargs, + **input_data_kwargs, ): """ @@ -61,6 +67,36 @@ def estimate_disp( :param weights: optional numeric matrix giving observation weights """ + if model is None: + if x is None: + raise AssertionError("Provide x when no model is specified.") + if design is None: + raise AssertionError("Provide design when no model is specified.") + + if norm_factors is None: + sum_counts_observation = x.sum(axis=1) + if norm_factors is None: + size_factors = np.log(sum_counts_observation) + else: + size_factors = np.log(sum_counts_observation * norm_factors) + + selected_features = x.sum(axis=0) >= min_rowsum + x_filtered = x[:, selected_features] + + input_data = InputDataGLM( + data=x_filtered, + design_loc=design, + design_loc_names=design_loc_names, + size_factors=size_factors, + design_scale=np.ones((x.shape[0], 1)), + design_scale_names=np.array(["Intercept"]), + **input_data_kwargs, + ) + model = NBModel(input_data) + else: + selected_features = ... + x = model.x.copy() + # Spline points spline_pts = np.linspace(start=grid_range[0], stop=grid_range[1], num=grid_length) spline_disp = 0.1 * 2 ** spline_pts @@ -71,10 +107,10 @@ def estimate_disp( estimator = NBEstimator(model, dispersion=0.05) estimator.train(maxit=250, tolerance=tol) - zerofit = ((model.x < 1e-4) & (np.nan_to_num(model.location) < 1e-4)).compute() # shape (obs, features) - + zerofit = (model.x < 1e-4) & (np.nan_to_num(model.location) < 1e-4) + if isinstance(zerofit, dask.array.core.Array): + zerofit = zerofit.compute() # shape (obs, features) groups = combo_groups(zerofit) - coefs = list() print("DONE.") print("Calculating adjusted profile likelihoods in subgroups...", end="") for subgroup in groups: @@ -86,22 +122,37 @@ def estimate_disp( new_dloc_names = model.design_loc_names else: design_new = model.design_loc[not_zero_obs_in_group] - _, _, pivot = qr(design_new, mode="raw", pivoting=True) - coefs_new = np.array( - pivot[: np.linalg.matrix_rank(design_new.compute())] - ) # explicitly make this array to keep dimension info + if isinstance(design_new, dask.array.core.Array): + _, _, pivot = qr(design_new.compute(), mode="raw", pivoting=True) + coefs_new = np.array( + pivot[: np.linalg.matrix_rank(design_new.compute())] + ) # explicitly make this array to keep dimension info + else: + _, _, pivot = qr(design_new, mode="raw", pivoting=True) + coefs_new = np.array( + pivot[: np.linalg.matrix_rank(design_new)] + ) # explicitly make this array to keep dimension info if len(coefs_new) == design_new.shape[0]: continue design_new = design_new[:, coefs_new] new_dloc_names = model.design_loc_names[coefs_new] + + subgroup_x = model.x + if isinstance(model.x, dask.array.core.Array): + subgroup_x = subgroup_x.compute() + sf = model.size_factors + if sf is not None: + sf = sf[not_zero_obs_in_group] + if isinstance(sf, dask.array.core.Array): + sf = sf.compute() input_data = InputDataGLM( - data=model.x.compute()[np.ix_(not_zero_obs_in_group, subgroup)], + data=subgroup_x[np.ix_(not_zero_obs_in_group, subgroup)], design_loc=design_new, design_loc_names=new_dloc_names, - size_factors=model.size_factors[:, not_zero_obs_in_group] if model.size_factors is not None else None, + size_factors=sf, design_scale=model.design_scale[not_zero_obs_in_group], design_scale_names=np.array(["Intercept"]), - as_dask=True, + as_dask=isinstance(model.x, dask.array.core.Array), chunk_size_cells=1000000, chunk_size_genes=1000000, ) @@ -110,9 +161,6 @@ def estimate_disp( for i in range(len(spline_disp)): estimator.reset_theta_scale(np.log(1 / spline_disp[i])) l0[subgroup, i] = adjusted_profile_likelihood(estimator, adjust=adjust) - - coefs.append(estimator._model_container.theta_location.compute()) - print("DONE.") # Calculate common dispersion @@ -124,21 +172,22 @@ def estimate_disp( # Allow dispersion trend? if trend_method is not None: print("Calculating trended dispersion...", flush=True) - avg_log_cpm = calculate_avg_log_cpm( - model.x.copy(), model_class=model.__class__, dispersion=common_dispersion[0], weights=weights - ) + sf = model.size_factors + if sf is not None and isinstance(sf, dask.array.core.Array): + sf = sf.compute() + avg_log_cpm = calculate_avg_log_cpm(x, size_factors=sf, dispersion=common_dispersion[0], weights=weights) span, _, m0, trend, _ = wleb( theta=spline_pts, loglik=l0, - covariate=avg_log_cpm[0], + covariate=avg_log_cpm[0, selected_features], trend_method=trend_method, span=span, overall=False, individual=False, ) disp_trend = 0.1 * 2 ** trend - trended_dispersion = np.full(model.num_features, disp_trend[np.argmin(avg_log_cpm)]) - trended_dispersion = disp_trend + trended_dispersion = np.full(x.shape[1], disp_trend[np.argmin(avg_log_cpm[selected_features])]) + trended_dispersion[selected_features] = disp_trend print("DONE.") else: avg_log_cpm = None @@ -149,23 +198,22 @@ def estimate_disp( # Are tagwise dispersions required? if not tagwise: return common_dispersion, trended_dispersion - + if isinstance(avg_log_cpm, dask.array.core.Array): + avg_log_cpm = avg_log_cpm.compute() # Calculate prior.df print("Calculating featurewise dispersion...") if prior_df is None: # prior_df = calculate_prior_df( - model, avg_log_cpm[0].compute(), robust=robust, winsor_tail_p=winsor_tail_p, dispersion=disp_trend + model, avg_log_cpm[0, selected_features], robust=robust, winsor_tail_p=winsor_tail_p, dispersion=disp_trend ) - print("DONE.") n_loc_params = model.design_loc.shape[1] prior_n = prior_df / (model.num_observations - n_loc_params) - print("Calculating featurewise dispersion...") # Initiate featurewise dispersions if trend_method is not None: featurewise_dispersion = trended_dispersion.copy() else: - featurewise_dispersion = np.full(model.num_features, common_dispersion) + featurewise_dispersion = np.full(x.shape[1], common_dispersion) # Checking if the shrinkage is near-infinite. too_large = prior_n > 1e6 @@ -179,7 +227,7 @@ def estimate_disp( theta=spline_pts, loglik=l0, prior_n=temp_n, - covariate=avg_log_cpm, + covariate=avg_log_cpm[selected_features], trend_method=trend_method, span=span, overall=False, @@ -187,16 +235,16 @@ def estimate_disp( m0=m0, ) if not robust or len(too_large) == 1: - featurewise_dispersion = 0.1 * 2 ** out_individual + featurewise_dispersion[selected_features] = 0.1 * 2 ** out_individual else: - featurewise_dispersion[~too_large] = 0.1 * 2 ** out_individual[~too_large] + featurewise_dispersion[selected_features][~too_large] = 0.1 * 2 ** out_individual[~too_large] print("DONE.") if robust: temp_df = prior_df temp_n = prior_n - prior_df = np.full(model.num_features, np.inf) - prior_n = np.full(model.num_features, np.inf) - prior_df = temp_df - prior_n = temp_n + prior_df = np.full(x.shape[1], np.inf) + prior_n = np.full(x.shape[1], np.inf) + prior_df[selected_features] = temp_df + prior_n[selected_features] = temp_n return common_dispersion, trended_dispersion, featurewise_dispersion, span, prior_df, prior_n diff --git a/batchglm/external/edgeR/glm_one_group.py b/batchglm/external/edgeR/glm_one_group.py index 72b7df6c..ca232cd1 100644 --- a/batchglm/external/edgeR/glm_one_group.py +++ b/batchglm/external/edgeR/glm_one_group.py @@ -1,6 +1,7 @@ import logging from typing import Optional, Union +import dask.array import numpy as np from .external import BaseModelContainer @@ -13,7 +14,7 @@ def get_single_group_start( x: np.ndarray, sf: Optional[np.ndarray] = None, weights: Optional[Union[np.ndarray, float]] = None, -): +) -> np.ndarray: if weights is None: weights = np.ones_like(x) if weights.shape != x.shape: @@ -23,9 +24,13 @@ def get_single_group_start( if sf is None: sf = np.log(1.0) - + if isinstance(x, dask.array.core.Array): + x = x.compute() + if isinstance(sf, dask.array.core.Array): + sf = sf.compute() theta_location = np.sum(np.where(x > low_value, x / np.exp(sf) * weights, 0), axis=0, keepdims=True) - theta_location = np.log(theta_location / total_weights) + with np.errstate(divide="ignore", invalid="ignore"): + theta_location = np.log(theta_location / total_weights) return theta_location @@ -39,7 +44,9 @@ def fit_single_group( * This is the exact solution for the gamma distribution (which is the limit of the NB as * the dispersion goes to infinity. However, if cur_beta is not NA, then we assume it's good. """ - low_mask = np.all(model.x <= low_value, axis=0).compute() + low_mask = np.all(model.x <= low_value, axis=0) + if isinstance(low_mask, dask.array.core.Array): + low_mask = low_mask.compute() unconverged_idx = np.where(~low_mask)[0] iteration = 0 @@ -52,12 +59,16 @@ def fit_single_group( scale_j = 1 / model.scale_j(unconverged_idx) denominator = 1 + loc_j * scale_j - dl = np.sum((model.x[:, unconverged_idx] - loc_j) / denominator * weights, axis=0).compute() + dl = np.sum((model.x[:, unconverged_idx] - loc_j) / denominator * weights, axis=0) + if isinstance(dl, dask.array.core.Array): + dl = dl.compute() - info = np.sum(loc_j / denominator * weights, axis=0).compute() + info = np.sum(loc_j / denominator * weights, axis=0) + if isinstance(info, dask.array.core.Array): + info = info.compute() cur_step = dl / info step[0, unconverged_idx] = cur_step - model.theta_location = model.theta_location.compute() + step + model.theta_location = model.theta_location + step unconverged_idx = unconverged_idx[np.abs(cur_step) >= tolerance] if len(unconverged_idx) == 0: break diff --git a/batchglm/external/edgeR/prior_df.py b/batchglm/external/edgeR/prior_df.py index 39040213..dbf2d485 100644 --- a/batchglm/external/edgeR/prior_df.py +++ b/batchglm/external/edgeR/prior_df.py @@ -1,3 +1,4 @@ +import dask.array import numpy as np from .c_utils import nb_deviance @@ -22,13 +23,29 @@ def calculate_prior_df( estimator = NBEstimator(model, dispersion=dispersion) estimator.train(maxit=250, tolerance=tolerance) - zerofit = ((model.x < 1e-4) & (np.nan_to_num(model.location) < 1e-4)).compute() # shape (obs, features) - df_residual = resid_df(zerofit, model.design_loc) + zerofit = (model.x < 1e-4) & (np.nan_to_num(model.location) < 1e-4) + if isinstance(zerofit, dask.array.core.Array): + zerofit = zerofit.compute() # shape (obs, features) + dloc = model.design_loc + if isinstance(model.design_loc, dask.array.core.Array): + dloc = dloc.compute() + df_residual = resid_df(zerofit, dloc) # Empirical Bayes squeezing of the quasi-likelihood variance factors - s2 = nb_deviance(model) / df_residual - s2[df_residual == 0] = 0.0 # s2[df.residual==0] <- 0 - s2 = np.maximum(s2, 0) # s2 <- pmax(s2,0) + x = model.x + if isinstance(model.x, dask.array.core.Array): + x = x.compute() + loc = model.location + if isinstance(model.location, dask.array.core.Array): + loc = loc.compute() + scale = model.scale + if isinstance(model.scale, dask.array.core.Array): + scale = scale.compute() + + with np.errstate(divide="ignore"): + s2 = nb_deviance(x, loc, scale, True) / df_residual + s2[df_residual == 0] = 0.0 + s2 = np.maximum(s2, 0) df_prior, _, _ = squeeze_var(s2, df=df_residual, covariate=avg_log_cpm, robust=robust, winsor_tail_p=winsor_tail_p) return df_prior diff --git a/batchglm/external/edgeR/residDF.py b/batchglm/external/edgeR/residDF.py index b5a6e6a6..20fbd167 100644 --- a/batchglm/external/edgeR/residDF.py +++ b/batchglm/external/edgeR/residDF.py @@ -33,7 +33,7 @@ def resid_df(zero: np.ndarray, design: np.ndarray): degrees_of_freedom_some_zero = n_obs - n_zero[some_zero_idx] for group in groupings: some_zero_group = some_zero[:, group[0]] # shape = (n_obs, ) - degrees_of_freedom_some_zero[group] -= np.linalg.matrix_rank(design[~some_zero_group].compute()) + degrees_of_freedom_some_zero[group] -= np.linalg.matrix_rank(design[~some_zero_group]) degrees_of_freedom_some_zero = np.max(degrees_of_freedom_some_zero, 0) degrees_of_freedom[some_zero_idx] = degrees_of_freedom_some_zero From e1228eee2b695209eef6366d41b7d1b02912970d Mon Sep 17 00:00:00 2001 From: picciama Date: Tue, 13 Sep 2022 11:11:18 +0200 Subject: [PATCH 45/58] added newest mypy and pytest versions --- poetry.lock | 1614 +++++++++++++++++++++--------------------------- pyproject.toml | 4 +- 2 files changed, 722 insertions(+), 896 deletions(-) diff --git a/poetry.lock b/poetry.lock index 5cea3795..067d2f3d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -24,9 +24,9 @@ scipy = ">1.4" xlrd = "<2.0" [package.extras] -dev = ["setuptools-scm", "black (>=20.8b1)", "docutils"] -doc = ["sphinx (>=4.1,<4.2)", "sphinx-rtd-theme", "sphinx-autodoc-typehints (>=1.11.0)", "sphinx-issues", "scanpydoc (>=0.7.3)", "typing-extensions"] -test = ["loompy (>=3.0.5)", "pytest (>=6.0)", "pytest-cov (>=2.10)", "zarr", "matplotlib", "sklearn", "openpyxl", "joblib", "boltons", "scanpy"] +dev = ["black (>=20.8b1)", "docutils", "setuptools-scm"] +doc = ["scanpydoc (>=0.7.3)", "sphinx (>=4.1,<4.2)", "sphinx-autodoc-typehints (>=1.11.0)", "sphinx-issues", "sphinx-rtd-theme", "typing-extensions"] +test = ["boltons", "joblib", "loompy (>=3.0.5)", "matplotlib", "openpyxl", "pytest (>=6.0)", "pytest-cov (>=2.10)", "scanpy", "sklearn", "zarr"] [[package]] name = "appdirs" @@ -38,18 +38,18 @@ python-versions = "*" [[package]] name = "argcomplete" -version = "1.12.3" +version = "2.0.0" description = "Bash tab completion for argparse" category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.6" [package.extras] test = ["coverage", "flake8", "pexpect", "wheel"] [[package]] name = "arrow" -version = "1.2.2" +version = "1.2.3" description = "Better dates & times for Python" category = "dev" optional = false @@ -60,7 +60,7 @@ python-dateutil = ">=2.7.0" [[package]] name = "aspy.refactor-imports" -version = "2.2.1" +version = "2.3.0" description = "Utilities for refactoring imports in python-like syntax." category = "dev" optional = false @@ -69,35 +69,27 @@ python-versions = ">=3.7" [package.dependencies] cached-property = "*" -[[package]] -name = "atomicwrites" -version = "1.4.0" -description = "Atomic file writes." -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - [[package]] name = "attrs" -version = "21.4.0" +version = "22.1.0" description = "Classes Without Boilerplate" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.5" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] -docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"] +dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"] +docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"] +tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"] +tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"] [[package]] name = "babel" -version = "2.9.1" +version = "2.10.3" description = "Internationalization utilities" category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.6" [package.dependencies] pytz = ">=2015.7" @@ -166,15 +158,15 @@ python-versions = "*" [[package]] name = "certifi" -version = "2021.10.8" +version = "2022.6.15.1" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false -python-versions = "*" +python-versions = ">=3.6" [[package]] name = "cffi" -version = "1.15.0" +version = "1.15.1" description = "Foreign Function Interface for Python calling C code." category = "dev" optional = false @@ -193,37 +185,37 @@ python-versions = ">=3.6.1" [[package]] name = "chardet" -version = "4.0.0" -description = "Universal encoding detector for Python 2 and 3" +version = "5.0.0" +description = "Universal encoding detector for Python 3" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.6" [[package]] name = "charset-normalizer" -version = "2.0.12" +version = "2.1.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." category = "main" optional = false -python-versions = ">=3.5.0" +python-versions = ">=3.6.0" [package.extras] unicode_backport = ["unicodedata2"] [[package]] name = "click" -version = "8.0.4" +version = "8.1.3" description = "Composable command line interface toolkit" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "cloudpickle" -version = "2.0.0" +version = "2.2.0" description = "Extended pickling support for Python objects" category = "main" optional = false @@ -231,7 +223,7 @@ python-versions = ">=3.6" [[package]] name = "colorama" -version = "0.4.4" +version = "0.4.5" description = "Cross-platform colored terminal text." category = "main" optional = false @@ -239,7 +231,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "colorlog" -version = "6.6.0" +version = "6.7.0" description = "Add colours to the output of Python's logging module." category = "dev" optional = false @@ -282,7 +274,7 @@ six = ">=1.10" [[package]] name = "cookietemple" -version = "1.4.0" +version = "1.4.1" description = "A cookiecutter based project template creation tool supporting several domains and languages with advanced linting, syncing and standardized workflows to get your project kickstarted in no time." category = "dev" optional = false @@ -297,8 +289,8 @@ cryptography = ">=3.4.7,<37.0.0" GitPython = ">=3.1.17,<4.0.0" packaging = ">=20.9,<22.0" pygithub = ">=1.54.1,<2.0.0" -PyNaCl = "1.5.0" pynacl = ">=1.4.0,<2.0.0" +PyNaCl = "1.5.0" questionary = ">=1.9.0,<2.0.0" requests = ">=2.25.1,<3.0.0" rich = ">=10.2.2,<11.0.0" @@ -320,7 +312,7 @@ toml = ["toml"] [[package]] name = "cryptography" -version = "36.0.1" +version = "36.0.2" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." category = "dev" optional = false @@ -331,11 +323,11 @@ cffi = ">=1.12" [package.extras] docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"] -docstest = ["pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] +docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] sdist = ["setuptools_rust (>=0.11.4)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,!=3.79.2)"] +test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"] [[package]] name = "cycler" @@ -355,7 +347,7 @@ python-versions = ">=3.6,<4.0" [[package]] name = "dask" -version = "2022.2.1" +version = "2022.9.0" description = "Parallel PyData with Task Scheduling" category = "main" optional = false @@ -371,11 +363,11 @@ toolz = ">=0.8.2" [package.extras] array = ["numpy (>=1.18)"] -complete = ["bokeh (>=2.1.1)", "distributed (==2022.02.1)", "jinja2", "numpy (>=1.18)", "pandas (>=1.0)"] +complete = ["bokeh (>=2.4.2)", "distributed (==2022.9.0)", "jinja2", "numpy (>=1.18)", "pandas (>=1.0)"] dataframe = ["numpy (>=1.18)", "pandas (>=1.0)"] -diagnostics = ["bokeh (>=2.1.1)", "jinja2"] -distributed = ["distributed (==2022.02.1)"] -test = ["pytest", "pytest-rerunfailures", "pytest-xdist", "pre-commit"] +diagnostics = ["bokeh (>=2.4.2)", "jinja2"] +distributed = ["distributed (==2022.9.0)"] +test = ["pandas", "pre-commit", "pytest", "pytest-rerunfailures", "pytest-xdist"] [[package]] name = "deprecated" @@ -389,11 +381,11 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" wrapt = ">=1.10,<2" [package.extras] -dev = ["tox", "bump2version (<1)", "sphinx (<2)", "importlib-metadata (<3)", "importlib-resources (<4)", "configparser (<5)", "sphinxcontrib-websupport (<2)", "zipp (<2)", "PyTest (<5)", "PyTest-Cov (<2.6)", "pytest", "pytest-cov"] +dev = ["PyTest (<5)", "PyTest-Cov (<2.6)", "bump2version (<1)", "configparser (<5)", "importlib-metadata (<3)", "importlib-resources (<4)", "pytest", "pytest-cov", "sphinx (<2)", "sphinxcontrib-websupport (<2)", "tox", "zipp (<2)"] [[package]] name = "distlib" -version = "0.3.4" +version = "0.3.6" description = "Distribution utilities" category = "dev" optional = false @@ -409,7 +401,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "dparse" -version = "0.5.1" +version = "0.6.0" description = "A parser for Python dependency files" category = "dev" optional = false @@ -417,23 +409,23 @@ python-versions = ">=3.5" [package.dependencies] packaging = "*" -pyyaml = "*" toml = "*" [package.extras] +conda = ["pyyaml"] pipenv = ["pipenv"] [[package]] name = "filelock" -version = "3.6.0" +version = "3.8.0" description = "A platform independent file lock." category = "dev" optional = false python-versions = ">=3.7" [package.extras] -docs = ["furo (>=2021.8.17b43)", "sphinx (>=4.1)", "sphinx-autodoc-typehints (>=1.12)"] -testing = ["covdefaults (>=1.2.0)", "coverage (>=4)", "pytest (>=4)", "pytest-cov", "pytest-timeout (>=1.4.2)"] +docs = ["furo (>=2022.6.21)", "sphinx (>=5.1.1)", "sphinx-autodoc-typehints (>=1.19.1)"] +testing = ["covdefaults (>=2.2)", "coverage (>=6.4.2)", "pytest (>=7.1.2)", "pytest-cov (>=3)", "pytest-timeout (>=2.1)"] [[package]] name = "flake8" @@ -502,11 +494,11 @@ flake8 = "*" [[package]] name = "flake8-rst-docstrings" -version = "0.2.5" +version = "0.2.7" description = "Python docstring reStructuredText (RST) validator" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] flake8 = ">=3.0.0" @@ -515,28 +507,29 @@ restructuredtext-lint = "*" [[package]] name = "fonttools" -version = "4.29.1" +version = "4.37.1" description = "Tools to manipulate font files" category = "main" optional = false python-versions = ">=3.7" [package.extras] -all = ["fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "zopfli (>=0.1.4)", "lz4 (>=1.7.4.2)", "matplotlib", "sympy", "skia-pathops (>=0.5.0)", "brotlicffi (>=0.8.0)", "scipy", "brotli (>=1.0.1)", "munkres", "unicodedata2 (>=14.0.0)", "xattr"] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=14.0.0)", "xattr", "zopfli (>=0.1.4)"] graphite = ["lz4 (>=1.7.4.2)"] -interpolatable = ["scipy", "munkres"] +interpolatable = ["munkres", "scipy"] lxml = ["lxml (>=4.0,<5)"] pathops = ["skia-pathops (>=0.5.0)"] plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] symfont = ["sympy"] type1 = ["xattr"] ufo = ["fs (>=2.2.0,<3)"] unicode = ["unicodedata2 (>=14.0.0)"] -woff = ["zopfli (>=0.1.4)", "brotlicffi (>=0.8.0)", "brotli (>=1.0.1)"] +woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] [[package]] name = "fsspec" -version = "2022.2.0" +version = "2022.8.2" description = "File-system specification" category = "main" optional = false @@ -547,7 +540,7 @@ abfs = ["adlfs"] adl = ["adlfs"] arrow = ["pyarrow (>=1)"] dask = ["dask", "distributed"] -dropbox = ["dropboxdrivefs", "requests", "dropbox"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] entrypoints = ["importlib-metadata"] fuse = ["fusepy"] gcs = ["gcsfs"] @@ -556,13 +549,14 @@ github = ["requests"] gs = ["gcsfs"] gui = ["panel"] hdfs = ["pyarrow (>=1)"] -http = ["requests", "aiohttp"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] libarchive = ["libarchive-c"] oci = ["ocifs"] s3 = ["s3fs"] sftp = ["paramiko"] smb = ["smbprotocol"] ssh = ["paramiko"] +tqdm = ["tqdm"] [[package]] name = "gitdb" @@ -588,7 +582,7 @@ gitdb = ">=4.0.1,<5" [[package]] name = "h5py" -version = "3.6.0" +version = "3.7.0" description = "Read and write HDF5 files from Python" category = "main" optional = false @@ -599,7 +593,7 @@ numpy = ">=1.14.5" [[package]] name = "identify" -version = "2.4.11" +version = "2.5.5" description = "File identification library for Python" category = "dev" optional = false @@ -618,7 +612,7 @@ python-versions = ">=3.5" [[package]] name = "imagesize" -version = "1.3.0" +version = "1.4.1" description = "Getting image size from png/jpeg/jpeg2000/gif file" category = "main" optional = false @@ -626,7 +620,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "importlib-metadata" -version = "4.11.2" +version = "4.12.0" description = "Read metadata from Python packages" category = "main" optional = false @@ -636,9 +630,9 @@ python-versions = ">=3.7" zipp = ">=0.5" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"] +docs = ["jaraco.packaging (>=9)", "rst.linker (>=1.9)", "sphinx"] perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] [[package]] name = "iniconfig" @@ -650,11 +644,11 @@ python-versions = "*" [[package]] name = "jinja2" -version = "3.0.3" +version = "3.1.2" description = "A very fast and expressive template engine." category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] MarkupSafe = ">=2.0" @@ -676,7 +670,7 @@ jinja2 = "*" [[package]] name = "kiwisolver" -version = "1.3.2" +version = "1.4.4" description = "A fast implementation of the Cassowary constraint solver" category = "main" optional = false @@ -696,23 +690,23 @@ tornado = {version = "*", markers = "python_version > \"2.7\""} [[package]] name = "llvmlite" -version = "0.36.0" +version = "0.39.1" description = "lightweight wrapper around basic LLVM functionality" category = "main" optional = false -python-versions = ">=3.6,<3.10" +python-versions = ">=3.7" [[package]] name = "locket" -version = "0.2.1" -description = "File-based locks for Python for Linux and Windows" +version = "1.0.0" +description = "File-based locks for Python on Linux and Windows" category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "markupsafe" -version = "2.1.0" +version = "2.1.1" description = "Safely add untrusted strings to HTML/XML markup." category = "main" optional = false @@ -720,7 +714,7 @@ python-versions = ">=3.7" [[package]] name = "matplotlib" -version = "3.5.1" +version = "3.5.3" description = "Python plotting package" category = "main" optional = false @@ -735,7 +729,7 @@ packaging = ">=20.0" pillow = ">=6.2.0" pyparsing = ">=2.2.1" python-dateutil = ">=2.7" -setuptools_scm = ">=4" +setuptools_scm = ">=4,<7" [[package]] name = "mccabe" @@ -747,20 +741,21 @@ python-versions = "*" [[package]] name = "mypy" -version = "0.910" +version = "0.971" description = "Optional static typing for Python" category = "dev" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" [package.dependencies] -mypy-extensions = ">=0.4.3,<0.5.0" -toml = "*" -typing-extensions = ">=3.7.4" +mypy-extensions = ">=0.4.3" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=3.10" [package.extras] dmypy = ["psutil (>=4.0)"] -python2 = ["typed-ast (>=1.4.0,<1.5.0)"] +python2 = ["typed-ast (>=1.4.0,<2)"] +reports = ["lxml"] [[package]] name = "mypy-extensions" @@ -772,7 +767,7 @@ python-versions = "*" [[package]] name = "natsort" -version = "8.1.0" +version = "8.2.0" description = "Simple yet flexible natural sorting in Python." category = "main" optional = false @@ -784,26 +779,26 @@ icu = ["PyICU (>=1.0.0)"] [[package]] name = "nodeenv" -version = "1.6.0" +version = "1.7.0" description = "Node.js virtual environment builder" category = "dev" optional = false -python-versions = "*" +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" [[package]] name = "nox" -version = "2022.1.7" +version = "2022.8.7" description = "Flexible test automation." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] -argcomplete = ">=1.9.4,<2.0" +argcomplete = ">=1.9.4,<3.0" colorlog = ">=2.6.1,<7.0.0" packaging = ">=20.9" -py = ">=1.4.0,<2.0.0" -virtualenv = ">=14.0.0" +py = ">=1.4,<2.0.0" +virtualenv = ">=14" [package.extras] tox_to_nox = ["jinja2", "tox"] @@ -823,19 +818,20 @@ tomlkit = ">=0.7.0,<0.8.0" [[package]] name = "numba" -version = "0.53.1" +version = "0.56.2" description = "compiling Python code using LLVM" category = "main" optional = false -python-versions = ">=3.6,<3.10" +python-versions = ">=3.7" [package.dependencies] -llvmlite = ">=0.36.0rc1,<0.37" -numpy = ">=1.15" +importlib-metadata = {version = "*", markers = "python_version < \"3.9\""} +llvmlite = ">=0.39.0dev0,<0.40" +numpy = ">=1.18,<1.24" [[package]] name = "numpy" -version = "1.22.3" +version = "1.23.3" description = "NumPy is the fundamental package for array computing with Python." category = "main" optional = false @@ -854,7 +850,7 @@ pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" [[package]] name = "pandas" -version = "1.4.1" +version = "1.4.4" description = "Powerful data structures for data analysis, time series, and statistics" category = "main" optional = false @@ -874,26 +870,26 @@ test = ["hypothesis (>=5.5.3)", "pytest (>=6.0)", "pytest-xdist (>=1.31)"] [[package]] name = "partd" -version = "1.2.0" +version = "1.3.0" description = "Appendable key-value storage" category = "main" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" [package.dependencies] locket = "*" toolz = "*" [package.extras] -complete = ["numpy (>=1.9.0)", "pandas (>=0.19.0)", "pyzmq", "blosc"] +complete = ["blosc", "numpy (>=1.9.0)", "pandas (>=0.19.0)", "pyzmq"] [[package]] name = "pathspec" -version = "0.9.0" +version = "0.10.1" description = "Utility library for gitignore style pattern matching of file paths." category = "dev" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +python-versions = ">=3.7" [[package]] name = "patsy" @@ -912,7 +908,7 @@ test = ["pytest", "pytest-cov", "scipy"] [[package]] name = "pbr" -version = "5.8.1" +version = "5.10.0" description = "Python Build Reasonableness" category = "main" optional = false @@ -931,22 +927,26 @@ flake8-polyfill = ">=1.0.2,<2" [[package]] name = "pillow" -version = "9.0.1" +version = "9.2.0" description = "Python Imaging Library (Fork)" category = "main" optional = false python-versions = ">=3.7" +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] + [[package]] name = "platformdirs" -version = "2.5.1" +version = "2.5.2" description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "dev" optional = false python-versions = ">=3.7" [package.extras] -docs = ["Sphinx (>=4)", "furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)"] +docs = ["furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx (>=4)", "sphinx-autodoc-typehints (>=1.12)"] test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)"] [[package]] @@ -971,11 +971,11 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "pre-commit" -version = "2.17.0" +version = "2.20.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." category = "dev" optional = false -python-versions = ">=3.6.1" +python-versions = ">=3.7" [package.dependencies] cfgv = ">=2.0.0" @@ -987,19 +987,19 @@ virtualenv = ">=20.0.8" [[package]] name = "pre-commit-hooks" -version = "4.1.0" +version = "4.3.0" description = "Some out-of-the-box hooks for pre-commit." category = "dev" optional = false -python-versions = ">=3.6.1" +python-versions = ">=3.7" [package.dependencies] "ruamel.yaml" = ">=0.15" -toml = "*" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} [[package]] name = "prompt-toolkit" -version = "3.0.28" +version = "3.0.31" description = "Library for building powerful interactive command lines in Python" category = "dev" optional = false @@ -1073,15 +1073,18 @@ integrations = ["cryptography"] [[package]] name = "pygments" -version = "2.11.2" +version = "2.13.0" description = "Pygments is a syntax highlighting package written in Python." category = "main" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" + +[package.extras] +plugins = ["importlib-metadata"] [[package]] name = "pyjwt" -version = "2.3.0" +version = "2.4.0" description = "JSON Web Token implementation in Python" category = "dev" optional = false @@ -1089,9 +1092,9 @@ python-versions = ">=3.6" [package.extras] crypto = ["cryptography (>=3.3.1)"] -dev = ["sphinx", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.3.1)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "mypy", "pre-commit"] +dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.3.1)", "mypy", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"] docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] -tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"] +tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] name = "pynacl" @@ -1106,39 +1109,38 @@ cffi = ">=1.4.1" [package.extras] docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] -tests = ["pytest (>=3.2.1,!=3.3.0)", "hypothesis (>=3.27.0)"] +tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyparsing" -version = "3.0.7" -description = "Python parsing module" +version = "3.0.9" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.6.8" [package.extras] diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pytest" -version = "6.2.5" +version = "7.1.3" description = "pytest: simple powerful testing with Python" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] -atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" py = ">=1.8.2" -toml = "*" +tomli = ">=1.0.0" [package.extras] -testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] [[package]] name = "python-dateutil" @@ -1153,7 +1155,7 @@ six = ">=1.5" [[package]] name = "python-slugify" -version = "6.1.1" +version = "6.1.2" description = "A Python slugify application that also handles Unicode" category = "dev" optional = false @@ -1167,7 +1169,7 @@ unidecode = ["Unidecode (>=1.1.1)"] [[package]] name = "pytz" -version = "2021.3" +version = "2022.2.1" description = "World timezone definitions, modern and historical" category = "main" optional = false @@ -1193,11 +1195,11 @@ python-versions = ">=3.6,<4.0" prompt_toolkit = ">=2.0,<4.0" [package.extras] -docs = ["Sphinx (>=3.3,<4.0)", "sphinx-rtd-theme (>=0.5.0,<0.6.0)", "sphinx-autobuild (>=2020.9.1,<2021.0.0)", "sphinx-copybutton (>=0.3.1,<0.4.0)", "sphinx-autodoc-typehints (>=1.11.1,<2.0.0)"] +docs = ["Sphinx (>=3.3,<4.0)", "sphinx-autobuild (>=2020.9.1,<2021.0.0)", "sphinx-autodoc-typehints (>=1.11.1,<2.0.0)", "sphinx-copybutton (>=0.3.1,<0.4.0)", "sphinx-rtd-theme (>=0.5.0,<0.6.0)"] [[package]] name = "regex" -version = "2022.3.2" +version = "2022.9.13" description = "Alternative regular expression module, to replace re." category = "dev" optional = false @@ -1205,32 +1207,32 @@ python-versions = ">=3.6" [[package]] name = "reorder-python-imports" -version = "2.7.1" +version = "2.8.0" description = "Tool for reordering python imports" category = "dev" optional = false python-versions = ">=3.7" [package.dependencies] -"aspy.refactor-imports" = ">=2.2.1" +"aspy.refactor-imports" = ">=2.3.0,<3" [[package]] name = "requests" -version = "2.27.1" +version = "2.28.1" description = "Python HTTP for Humans." category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +python-versions = ">=3.7, <4" [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""} -idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""} +charset-normalizer = ">=2,<3" +idna = ">=2.5,<4" urllib3 = ">=1.21.1,<1.27" [package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"] -use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "restructuredtext-lint" @@ -1298,14 +1300,14 @@ requests = "*" [[package]] name = "scipy" -version = "1.8.0" +version = "1.9.1" description = "SciPy: Scientific Library for Python" category = "main" optional = false -python-versions = ">=3.8,<3.11" +python-versions = ">=3.8,<3.12" [package.dependencies] -numpy = ">=1.17.3,<1.25.0" +numpy = ">=1.18.5,<1.25.0" [[package]] name = "seaborn" @@ -1375,14 +1377,14 @@ numpy = ">=1.17" scipy = ">=0.19" [package.extras] -all = ["dask", "pytest (>=3.5)", "pytest-black", "pytest-cov", "tox", "sphinx", "sphinx-rtd-theme"] +all = ["dask", "pytest (>=3.5)", "pytest-black", "pytest-cov", "sphinx", "sphinx-rtd-theme", "tox"] docs = ["sphinx", "sphinx-rtd-theme"] tests = ["dask", "pytest (>=3.5)", "pytest-black", "pytest-cov"] tox = ["dask", "pytest (>=3.5)", "pytest-black", "pytest-cov", "tox"] [[package]] name = "sphinx" -version = "4.4.0" +version = "4.5.0" description = "Python documentation generator" category = "main" optional = false @@ -1409,8 +1411,8 @@ sphinxcontrib-serializinghtml = ">=1.1.5" [package.extras] docs = ["sphinxcontrib-websupport"] -lint = ["flake8 (>=3.5.0)", "isort", "mypy (>=0.931)", "docutils-stubs", "types-typed-ast", "types-requests"] -test = ["pytest", "pytest-cov", "html5lib", "cython", "typed-ast"] +lint = ["docutils-stubs", "flake8 (>=3.5.0)", "isort", "mypy (>=0.931)", "types-requests", "types-typed-ast"] +test = ["cython", "html5lib", "pytest", "pytest-cov", "typed-ast"] [[package]] name = "sphinx-autobuild" @@ -1430,18 +1432,18 @@ test = ["pytest", "pytest-cov"] [[package]] name = "sphinx-autodoc-typehints" -version = "1.17.0" +version = "1.19.1" description = "Type hints (PEP 484) support for the Sphinx autodoc extension" category = "main" optional = false python-versions = ">=3.7" [package.dependencies] -Sphinx = ">=4" +Sphinx = ">=4.5" [package.extras] -testing = ["covdefaults (>=2)", "coverage (>=6)", "diff-cover (>=6.4)", "nptyping (>=1)", "pytest (>=6)", "pytest-cov (>=3)", "sphobjinv (>=2)", "typing-extensions (>=3.5)"] -type_comments = ["typed-ast (>=1.4.0)"] +testing = ["covdefaults (>=2.2)", "coverage (>=6.3)", "diff-cover (>=6.4)", "nptyping (>=2.1.2)", "pytest (>=7.1)", "pytest-cov (>=3)", "sphobjinv (>=2)", "typing-extensions (>=4.1)"] +type_comments = ["typed-ast (>=1.5.2)"] [[package]] name = "sphinx-click" @@ -1480,7 +1482,7 @@ docutils = "<0.17" sphinx = "*" [package.extras] -dev = ["transifex-client", "sphinxcontrib-httpdomain", "bump2version"] +dev = ["bump2version", "sphinxcontrib-httpdomain", "transifex-client"] [[package]] name = "sphinxcontrib-applehelp" @@ -1491,7 +1493,7 @@ optional = false python-versions = ">=3.5" [package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] [[package]] @@ -1503,7 +1505,7 @@ optional = false python-versions = ">=3.5" [package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] [[package]] @@ -1515,8 +1517,8 @@ optional = false python-versions = ">=3.6" [package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest", "html5lib"] +lint = ["docutils-stubs", "flake8", "mypy"] +test = ["html5lib", "pytest"] [[package]] name = "sphinxcontrib-jsmath" @@ -1527,7 +1529,7 @@ optional = false python-versions = ">=3.5" [package.extras] -test = ["pytest", "flake8", "mypy"] +test = ["flake8", "mypy", "pytest"] [[package]] name = "sphinxcontrib-qthelp" @@ -1538,7 +1540,7 @@ optional = false python-versions = ">=3.5" [package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] [[package]] @@ -1550,16 +1552,16 @@ optional = false python-versions = ">=3.5" [package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] +lint = ["docutils-stubs", "flake8", "mypy"] test = ["pytest"] [[package]] name = "stevedore" -version = "3.5.0" +version = "4.0.0" description = "Manage dynamic plugins for Python applications" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" [package.dependencies] pbr = ">=2.0.0,<2.1.0 || >2.1.0" @@ -1598,7 +1600,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "toolz" -version = "0.11.2" +version = "0.12.0" description = "List processing tools and functional utilities" category = "main" optional = false @@ -1606,11 +1608,11 @@ python-versions = ">=3.5" [[package]] name = "tornado" -version = "6.1" +version = "6.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." category = "dev" optional = false -python-versions = ">= 3.5" +python-versions = ">= 3.7" [[package]] name = "typeguard" @@ -1621,8 +1623,8 @@ optional = false python-versions = ">=3.5.3" [package.extras] -doc = ["sphinx-rtd-theme", "sphinx-autodoc-typehints (>=1.2.0)"] -test = ["pytest", "typing-extensions", "mypy"] +doc = ["sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["mypy", "pytest", "typing-extensions"] [[package]] name = "types-attrs" @@ -1642,7 +1644,7 @@ python-versions = "*" [[package]] name = "types-requests" -version = "2.27.11" +version = "2.28.10" description = "Typing stubs for requests" category = "dev" optional = false @@ -1653,7 +1655,7 @@ types-urllib3 = "<1.27" [[package]] name = "types-urllib3" -version = "1.26.10" +version = "1.26.24" description = "Typing stubs for urllib3" category = "dev" optional = false @@ -1661,42 +1663,41 @@ python-versions = "*" [[package]] name = "typing-extensions" -version = "4.1.1" -description = "Backported and Experimental Type Hints for Python 3.6+" +version = "4.3.0" +description = "Backported and Experimental Type Hints for Python 3.7+" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "urllib3" -version = "1.26.8" +version = "1.26.12" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4" [package.extras] -brotli = ["brotlipy (>=0.6.0)"] -secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "virtualenv" -version = "20.13.3" +version = "20.16.5" description = "Virtual Python Environment builder" category = "dev" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +python-versions = ">=3.6" [package.dependencies] -distlib = ">=0.3.1,<1" -filelock = ">=3.2,<4" -platformdirs = ">=2,<3" -six = ">=1.9.0,<2" +distlib = ">=0.3.5,<1" +filelock = ">=3.4.1,<4" +platformdirs = ">=2.4,<3" [package.extras] -docs = ["proselint (>=0.10.2)", "sphinx (>=3)", "sphinx-argparse (>=0.2.5)", "sphinx-rtd-theme (>=0.4.3)", "towncrier (>=21.3)"] -testing = ["coverage (>=4)", "coverage-enable-subprocess (>=1)", "flaky (>=3)", "pytest (>=4)", "pytest-env (>=0.6.2)", "pytest-freezegun (>=0.4.1)", "pytest-mock (>=2)", "pytest-randomly (>=1)", "pytest-timeout (>=1)", "packaging (>=20.0)"] +docs = ["proselint (>=0.13)", "sphinx (>=5.1.1)", "sphinx-argparse (>=0.3.1)", "sphinx-rtd-theme (>=1)", "towncrier (>=21.9)"] +testing = ["coverage (>=6.2)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=21.3)", "pytest (>=7.0.1)", "pytest-env (>=0.6.2)", "pytest-freezegun (>=0.4.2)", "pytest-mock (>=3.6.1)", "pytest-randomly (>=3.10.3)", "pytest-timeout (>=2.1)"] [[package]] name = "wcwidth" @@ -1708,7 +1709,7 @@ python-versions = "*" [[package]] name = "wrapt" -version = "1.13.3" +version = "1.14.1" description = "Module for decorators, wrappers and monkey patching." category = "dev" optional = false @@ -1728,11 +1729,11 @@ Pygments = {version = "*", optional = true, markers = "extra == \"colors\""} six = "*" [package.extras] -all = ["six", "codecov", "scikit-build", "cmake", "ninja", "pybind11", "pygments", "colorama", "pytest", "pytest", "pytest-cov", "pytest", "pytest", "pytest-cov", "typing", "nbformat", "nbconvert", "jupyter-client", "ipython", "ipykernel", "pytest", "pytest-cov"] -colors = ["pygments", "colorama"] -jupyter = ["nbformat", "nbconvert", "jupyter-client", "ipython", "ipykernel"] -optional = ["pygments", "colorama", "nbformat", "nbconvert", "jupyter-client", "ipython", "ipykernel"] -tests = ["codecov", "scikit-build", "cmake", "ninja", "pybind11", "pytest", "pytest", "pytest-cov", "pytest", "pytest", "pytest-cov", "typing", "nbformat", "nbconvert", "jupyter-client", "ipython", "ipykernel", "pytest", "pytest-cov"] +all = ["cmake", "codecov", "colorama", "ipykernel", "ipython", "jupyter-client", "nbconvert", "nbformat", "ninja", "pybind11", "pygments", "pytest", "pytest", "pytest", "pytest", "pytest", "pytest-cov", "pytest-cov", "pytest-cov", "scikit-build", "six", "typing"] +colors = ["colorama", "pygments"] +jupyter = ["ipykernel", "ipython", "jupyter-client", "nbconvert", "nbformat"] +optional = ["colorama", "ipykernel", "ipython", "jupyter-client", "nbconvert", "nbformat", "pygments"] +tests = ["cmake", "codecov", "ipykernel", "ipython", "jupyter-client", "nbconvert", "nbformat", "ninja", "pybind11", "pytest", "pytest", "pytest", "pytest", "pytest", "pytest-cov", "pytest-cov", "pytest-cov", "scikit-build", "typing"] [[package]] name = "xlrd" @@ -1744,20 +1745,20 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "zipp" -version = "3.7.0" +version = "3.8.1" description = "Backport of pathlib-compatible object wrapper for zip files" category = "main" optional = false python-versions = ">=3.7" [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"] +testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] [metadata] lock-version = "1.1" python-versions = ">=3.8.0, <3.10.0" -content-hash = "57b077deda1e88aafb91541468754da0ed431d888c3f22ceac6819f734805aa7" +content-hash = "57ce11025e1ec208a3d9406765fc223dd905e81b7047673ffcf6b67710269b41" [metadata.files] alabaster = [ @@ -1773,29 +1774,19 @@ appdirs = [ {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, ] argcomplete = [ - {file = "argcomplete-1.12.3-py2.py3-none-any.whl", hash = "sha256:291f0beca7fd49ce285d2f10e4c1c77e9460cf823eef2de54df0c0fec88b0d81"}, - {file = "argcomplete-1.12.3.tar.gz", hash = "sha256:2c7dbffd8c045ea534921e63b0be6fe65e88599990d8dc408ac8c542b72a5445"}, + {file = "argcomplete-2.0.0-py2.py3-none-any.whl", hash = "sha256:cffa11ea77999bb0dd27bb25ff6dc142a6796142f68d45b1a26b11f58724561e"}, + {file = "argcomplete-2.0.0.tar.gz", hash = "sha256:6372ad78c89d662035101418ae253668445b391755cfe94ea52f1b9d22425b20"}, ] arrow = [ - {file = "arrow-1.2.2-py3-none-any.whl", hash = "sha256:d622c46ca681b5b3e3574fcb60a04e5cc81b9625112d5fb2b44220c36c892177"}, - {file = "arrow-1.2.2.tar.gz", hash = "sha256:05caf1fd3d9a11a1135b2b6f09887421153b94558e5ef4d090b567b47173ac2b"}, + {file = "arrow-1.2.3-py3-none-any.whl", hash = "sha256:5a49ab92e3b7b71d96cd6bfcc4df14efefc9dfa96ea19045815914a6ab6b1fe2"}, + {file = "arrow-1.2.3.tar.gz", hash = "sha256:3934b30ca1b9f292376d9db15b19446088d12ec58629bc3f0da28fd55fb633a1"}, ] "aspy.refactor-imports" = [ - {file = "aspy.refactor_imports-2.2.1-py2.py3-none-any.whl", hash = "sha256:ace9ca78abf6cfdd20ea1a321b75b20c8cc2c1af58aecb9dc4ba9d6f70f74645"}, - {file = "aspy.refactor_imports-2.2.1.tar.gz", hash = "sha256:f5b2fcbf9fd68361168588f14eda64d502d029eefe632d15094cd0683ae12984"}, -] -atomicwrites = [ - {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, - {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, -] -attrs = [ - {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"}, - {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"}, -] -babel = [ - {file = "Babel-2.9.1-py2.py3-none-any.whl", hash = "sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9"}, - {file = "Babel-2.9.1.tar.gz", hash = "sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0"}, + {file = "aspy.refactor_imports-2.3.0-py2.py3-none-any.whl", hash = "sha256:a60432fc0c0b948aa371da520b896ddcbbee71b1820eeda6d2c04f039bac13b9"}, + {file = "aspy.refactor_imports-2.3.0.tar.gz", hash = "sha256:5a7775b31e55a762f807c218a3f9f1a7ff1313d766605a301f2ed937cdfa242a"}, ] +attrs = [] +babel = [] bandit = [ {file = "bandit-1.7.2-py3-none-any.whl", hash = "sha256:e20402cadfd126d85b68ed4c8862959663c8c372dbbb1fca8f8e2c9f55a067ec"}, {file = "bandit-1.7.2.tar.gz", hash = "sha256:6d11adea0214a43813887bfe71a377b5a9955e4c826c8ffd341b494e3ab25260"}, @@ -1813,88 +1804,93 @@ cached-property = [ {file = "cached_property-1.5.2-py2.py3-none-any.whl", hash = "sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0"}, ] certifi = [ - {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"}, - {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"}, + {file = "certifi-2022.6.15.1-py3-none-any.whl", hash = "sha256:43dadad18a7f168740e66944e4fa82c6611848ff9056ad910f8f7a3e46ab89e0"}, + {file = "certifi-2022.6.15.1.tar.gz", hash = "sha256:cffdcd380919da6137f76633531a5817e3a9f268575c128249fb637e4f9e73fb"}, ] cffi = [ - {file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"}, - {file = "cffi-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0"}, - {file = "cffi-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14"}, - {file = "cffi-1.15.0-cp27-cp27m-win32.whl", hash = "sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474"}, - {file = "cffi-1.15.0-cp27-cp27m-win_amd64.whl", hash = "sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6"}, - {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27"}, - {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023"}, - {file = "cffi-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2"}, - {file = "cffi-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382"}, - {file = "cffi-1.15.0-cp310-cp310-win32.whl", hash = "sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55"}, - {file = "cffi-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0"}, - {file = "cffi-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605"}, - {file = "cffi-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e"}, - {file = "cffi-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc"}, - {file = "cffi-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7"}, - {file = "cffi-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66"}, - {file = "cffi-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029"}, - {file = "cffi-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6"}, - {file = "cffi-1.15.0-cp38-cp38-win32.whl", hash = "sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c"}, - {file = "cffi-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443"}, - {file = "cffi-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a"}, - {file = "cffi-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8"}, - {file = "cffi-1.15.0-cp39-cp39-win32.whl", hash = "sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a"}, - {file = "cffi-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139"}, - {file = "cffi-1.15.0.tar.gz", hash = "sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954"}, + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, ] cfgv = [ {file = "cfgv-3.3.1-py2.py3-none-any.whl", hash = "sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426"}, {file = "cfgv-3.3.1.tar.gz", hash = "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736"}, ] chardet = [ - {file = "chardet-4.0.0-py2.py3-none-any.whl", hash = "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"}, - {file = "chardet-4.0.0.tar.gz", hash = "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"}, -] -charset-normalizer = [ - {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"}, - {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"}, -] -click = [ - {file = "click-8.0.4-py3-none-any.whl", hash = "sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1"}, - {file = "click-8.0.4.tar.gz", hash = "sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb"}, + {file = "chardet-5.0.0-py3-none-any.whl", hash = "sha256:d3e64f022d254183001eccc5db4040520c0f23b1a3f33d6413e099eb7f126557"}, + {file = "chardet-5.0.0.tar.gz", hash = "sha256:0368df2bfd78b5fc20572bb4e9bb7fb53e2c094f60ae9993339e8671d0afb8aa"}, ] +charset-normalizer = [] +click = [] cloudpickle = [ - {file = "cloudpickle-2.0.0-py3-none-any.whl", hash = "sha256:6b2df9741d06f43839a3275c4e6632f7df6487a1f181f5f46a052d3c917c3d11"}, - {file = "cloudpickle-2.0.0.tar.gz", hash = "sha256:5cd02f3b417a783ba84a4ec3e290ff7929009fe51f6405423cfccfadd43ba4a4"}, -] -colorama = [ - {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, - {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, + {file = "cloudpickle-2.2.0-py3-none-any.whl", hash = "sha256:7428798d5926d8fcbfd092d18d01a2a03daf8237d8fcdc8095d256b8490796f0"}, + {file = "cloudpickle-2.2.0.tar.gz", hash = "sha256:3f4219469c55453cfe4737e564b67c2a149109dabf7f242478948b895f61106f"}, ] +colorama = [] colorlog = [ - {file = "colorlog-6.6.0-py2.py3-none-any.whl", hash = "sha256:351c51e866c86c3217f08e4b067a7974a678be78f07f85fc2d55b8babde6d94e"}, - {file = "colorlog-6.6.0.tar.gz", hash = "sha256:344f73204009e4c83c5b6beb00b3c45dc70fcdae3c80db919e0a4171d006fde8"}, + {file = "colorlog-6.7.0-py2.py3-none-any.whl", hash = "sha256:0d33ca236784a1ba3ff9c532d4964126d8a2c44f1f0cb1d2b0728196f512f662"}, + {file = "colorlog-6.7.0.tar.gz", hash = "sha256:bd94bd21c1e13fac7bd3153f4bc3a7dc0eb0974b8bc2fdf1a989e474f6e582e5"}, ] commonmark = [ {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, @@ -1905,8 +1901,8 @@ cookiecutter = [ {file = "cookiecutter-1.7.3.tar.gz", hash = "sha256:6b9a4d72882e243be077a7397d0f1f76fe66cf3df91f3115dbb5330e214fa457"}, ] cookietemple = [ - {file = "cookietemple-1.4.0-py3-none-any.whl", hash = "sha256:b3b8e8d7dc193a20728805bf1a0c6cd3844e560fa806e59c7573a48c963f29a2"}, - {file = "cookietemple-1.4.0.tar.gz", hash = "sha256:33deec7ca148546f4de804c2c4fcd76e8fd5e199fe1bd0383f7a7afbc02b4512"}, + {file = "cookietemple-1.4.1-py3-none-any.whl", hash = "sha256:7091b9698c9efff62ca84e97470dd1eeb3a91e535c4c0d95816cbaba48bb7931"}, + {file = "cookietemple-1.4.1.tar.gz", hash = "sha256:c6c0d2d129d56a4e5c0f42ef550de393f14be30e3f3bd4f877b1a0e73910547e"}, ] coverage = [ {file = "coverage-5.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf"}, @@ -1963,26 +1959,26 @@ coverage = [ {file = "coverage-5.5.tar.gz", hash = "sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c"}, ] cryptography = [ - {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:73bc2d3f2444bcfeac67dd130ff2ea598ea5f20b40e36d19821b4df8c9c5037b"}, - {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:2d87cdcb378d3cfed944dac30596da1968f88fb96d7fc34fdae30a99054b2e31"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74d6c7e80609c0f4c2434b97b80c7f8fdfaa072ca4baab7e239a15d6d70ed73a"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:6c0c021f35b421ebf5976abf2daacc47e235f8b6082d3396a2fe3ccd537ab173"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59a9d55027a8b88fd9fd2826c4392bd487d74bf628bb9d39beecc62a644c12"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a817b961b46894c5ca8a66b599c745b9a3d9f822725221f0e0fe49dc043a3a3"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:94ae132f0e40fe48f310bba63f477f14a43116f05ddb69d6fa31e93f05848ae2"}, - {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7be0eec337359c155df191d6ae00a5e8bbb63933883f4f5dffc439dac5348c3f"}, - {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e0344c14c9cb89e76eb6a060e67980c9e35b3f36691e15e1b7a9e58a0a6c6dc3"}, - {file = "cryptography-36.0.1-cp36-abi3-win32.whl", hash = "sha256:4caa4b893d8fad33cf1964d3e51842cd78ba87401ab1d2e44556826df849a8ca"}, - {file = "cryptography-36.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:391432971a66cfaf94b21c24ab465a4cc3e8bf4a939c1ca5c3e3a6e0abebdbcf"}, - {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bb5829d027ff82aa872d76158919045a7c1e91fbf241aec32cb07956e9ebd3c9"}, - {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc15b1c22e55c4d5566e3ca4db8689470a0ca2babef8e3a9ee057a8b82ce4b1"}, - {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:596f3cd67e1b950bc372c33f1a28a0692080625592ea6392987dba7f09f17a94"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:30ee1eb3ebe1644d1c3f183d115a8c04e4e603ed6ce8e394ed39eea4a98469ac"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec63da4e7e4a5f924b90af42eddf20b698a70e58d86a72d943857c4c6045b3ee"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca238ceb7ba0bdf6ce88c1b74a87bffcee5afbfa1e41e173b1ceb095b39add46"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:ca28641954f767f9822c24e927ad894d45d5a1e501767599647259cbf030b903"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:39bdf8e70eee6b1c7b289ec6e5d84d49a6bfa11f8b8646b5b3dfe41219153316"}, - {file = "cryptography-36.0.1.tar.gz", hash = "sha256:53e5c1dc3d7a953de055d77bef2ff607ceef7a2aac0353b5d630ab67f7423638"}, + {file = "cryptography-36.0.2-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:4e2dddd38a5ba733be6a025a1475a9f45e4e41139d1321f412c6b360b19070b6"}, + {file = "cryptography-36.0.2-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:4881d09298cd0b669bb15b9cfe6166f16fc1277b4ed0d04a22f3d6430cb30f1d"}, + {file = "cryptography-36.0.2-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea634401ca02367c1567f012317502ef3437522e2fc44a3ea1844de028fa4b84"}, + {file = "cryptography-36.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:7be666cc4599b415f320839e36367b273db8501127b38316f3b9f22f17a0b815"}, + {file = "cryptography-36.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8241cac0aae90b82d6b5c443b853723bcc66963970c67e56e71a2609dc4b5eaf"}, + {file = "cryptography-36.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b2d54e787a884ffc6e187262823b6feb06c338084bbe80d45166a1cb1c6c5bf"}, + {file = "cryptography-36.0.2-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:c2c5250ff0d36fd58550252f54915776940e4e866f38f3a7866d92b32a654b86"}, + {file = "cryptography-36.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ec6597aa85ce03f3e507566b8bcdf9da2227ec86c4266bd5e6ab4d9e0cc8dab2"}, + {file = "cryptography-36.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ca9f686517ec2c4a4ce930207f75c00bf03d94e5063cbc00a1dc42531511b7eb"}, + {file = "cryptography-36.0.2-cp36-abi3-win32.whl", hash = "sha256:f64b232348ee82f13aac22856515ce0195837f6968aeaa94a3d0353ea2ec06a6"}, + {file = "cryptography-36.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:53e0285b49fd0ab6e604f4c5d9c5ddd98de77018542e88366923f152dbeb3c29"}, + {file = "cryptography-36.0.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:32db5cc49c73f39aac27574522cecd0a4bb7384e71198bc65a0d23f901e89bb7"}, + {file = "cryptography-36.0.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b3d199647468d410994dbeb8cec5816fb74feb9368aedf300af709ef507e3e"}, + {file = "cryptography-36.0.2-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:da73d095f8590ad437cd5e9faf6628a218aa7c387e1fdf67b888b47ba56a17f0"}, + {file = "cryptography-36.0.2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:0a3bf09bb0b7a2c93ce7b98cb107e9170a90c51a0162a20af1c61c765b90e60b"}, + {file = "cryptography-36.0.2-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8897b7b7ec077c819187a123174b645eb680c13df68354ed99f9b40a50898f77"}, + {file = "cryptography-36.0.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82740818f2f240a5da8dfb8943b360e4f24022b093207160c77cadade47d7c85"}, + {file = "cryptography-36.0.2-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:1f64a62b3b75e4005df19d3b5235abd43fa6358d5516cfc43d87aeba8d08dd51"}, + {file = "cryptography-36.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e167b6b710c7f7bc54e67ef593f8731e1f45aa35f8a8a7b72d6e42ec76afd4b3"}, + {file = "cryptography-36.0.2.tar.gz", hash = "sha256:70f8f4f7bb2ac9f340655cbac89d68c527af5bb4387522a8413e841e3e6628c9"}, ] cycler = [ {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"}, @@ -1993,29 +1989,23 @@ darglint = [ {file = "darglint-1.8.1.tar.gz", hash = "sha256:080d5106df149b199822e7ee7deb9c012b49891538f14a11be681044f0bb20da"}, ] dask = [ - {file = "dask-2022.2.1-py3-none-any.whl", hash = "sha256:cb91f3853413e857c2d8b872a3ffe189fbd55a5cc01ab61e204079240c28004d"}, - {file = "dask-2022.2.1.tar.gz", hash = "sha256:b699da18d147da84c6c0be26d724dc1ec384960bf1f23c8db4f90740c9ac0a89"}, + {file = "dask-2022.9.0-py3-none-any.whl", hash = "sha256:9238deb870ee9132b375f7892c4a9c5a919118f3c0025af27e633b9cb33ce195"}, + {file = "dask-2022.9.0.tar.gz", hash = "sha256:0c3d23638c6b3b03d6eb5de0bcdd5bb8a386965eb1691c19f9d14595d9e5eaac"}, ] deprecated = [ {file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"}, {file = "Deprecated-1.2.13.tar.gz", hash = "sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d"}, ] -distlib = [ - {file = "distlib-0.3.4-py2.py3-none-any.whl", hash = "sha256:6564fe0a8f51e734df6333d08b8b94d4ea8ee6b99b5ed50613f731fd4089f34b"}, - {file = "distlib-0.3.4.zip", hash = "sha256:e4b58818180336dc9c529bfb9a0b58728ffc09ad92027a3f30b7cd91e3458579"}, -] +distlib = [] docutils = [ {file = "docutils-0.16-py2.py3-none-any.whl", hash = "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af"}, {file = "docutils-0.16.tar.gz", hash = "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc"}, ] dparse = [ - {file = "dparse-0.5.1-py3-none-any.whl", hash = "sha256:e953a25e44ebb60a5c6efc2add4420c177f1d8404509da88da9729202f306994"}, - {file = "dparse-0.5.1.tar.gz", hash = "sha256:a1b5f169102e1c894f9a7d5ccf6f9402a836a5d24be80a986c7ce9eaed78f367"}, -] -filelock = [ - {file = "filelock-3.6.0-py3-none-any.whl", hash = "sha256:f8314284bfffbdcfa0ff3d7992b023d4c628ced6feb957351d4c48d059f56bc0"}, - {file = "filelock-3.6.0.tar.gz", hash = "sha256:9cd540a9352e432c7246a48fe4e8712b10acb1df2ad1f30e8c070b82ae1fed85"}, + {file = "dparse-0.6.0-py3-none-any.whl", hash = "sha256:3cb489bd06bfa8d285c85f7dec69d9ee8f89c29dd5f4ab48e159746dc13b78b2"}, + {file = "dparse-0.6.0.tar.gz", hash = "sha256:57068bb61859b1676c6beb10f399906eecb41a75b5d3fbc99d0311059cb67213"}, ] +filelock = [] flake8 = [ {file = "flake8-3.9.2-py2.py3-none-any.whl", hash = "sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907"}, {file = "flake8-3.9.2.tar.gz", hash = "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b"}, @@ -2035,17 +2025,14 @@ flake8-polyfill = [ {file = "flake8-polyfill-1.0.2.tar.gz", hash = "sha256:e44b087597f6da52ec6393a709e7108b2905317d0c0b744cdca6208e670d8eda"}, {file = "flake8_polyfill-1.0.2-py2.py3-none-any.whl", hash = "sha256:12be6a34ee3ab795b19ca73505e7b55826d5f6ad7230d31b18e106400169b9e9"}, ] -flake8-rst-docstrings = [ - {file = "flake8-rst-docstrings-0.2.5.tar.gz", hash = "sha256:4fe93f997dea45d9d3c8bd220f12f0b6c359948fb943b5b48021a3f927edd816"}, - {file = "flake8_rst_docstrings-0.2.5-py3-none-any.whl", hash = "sha256:b99d9041b769b857efe45a448dc8c71b1bb311f9cacbdac5de82f96498105082"}, -] +flake8-rst-docstrings = [] fonttools = [ - {file = "fonttools-4.29.1-py3-none-any.whl", hash = "sha256:1933415e0fbdf068815cb1baaa1f159e17830215f7e8624e5731122761627557"}, - {file = "fonttools-4.29.1.zip", hash = "sha256:2b18a172120e32128a80efee04cff487d5d140fe7d817deb648b2eee023a40e4"}, + {file = "fonttools-4.37.1-py3-none-any.whl", hash = "sha256:fff6b752e326c15756c819fe2fe7ceab69f96a1dbcfe8911d0941cdb49905007"}, + {file = "fonttools-4.37.1.zip", hash = "sha256:4606e1a88ee1f6699d182fea9511bd9a8a915d913eab4584e5226da1180fcce7"}, ] fsspec = [ - {file = "fsspec-2022.2.0-py3-none-any.whl", hash = "sha256:eb9c9d9aee49d23028deefffe53e87c55d3515512c63f57e893710301001449a"}, - {file = "fsspec-2022.2.0.tar.gz", hash = "sha256:20322c659538501f52f6caa73b08b2ff570b7e8ea30a86559721d090e473ad5c"}, + {file = "fsspec-2022.8.2-py3-none-any.whl", hash = "sha256:6374804a2c0d24f225a67d009ee1eabb4046ad00c793c3f6df97e426c890a1d9"}, + {file = "fsspec-2022.8.2.tar.gz", hash = "sha256:7f12b90964a98a7e921d27fb36be536ea036b73bf3b724ac0b0bd7b8e39c7c18"}, ] gitdb = [ {file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"}, @@ -2055,385 +2042,358 @@ gitpython = [ {file = "GitPython-3.1.27-py3-none-any.whl", hash = "sha256:5b68b000463593e05ff2b261acff0ff0972df8ab1b70d3cdbd41b546c8b8fc3d"}, {file = "GitPython-3.1.27.tar.gz", hash = "sha256:1c885ce809e8ba2d88a29befeb385fcea06338d3640712b59ca623c220bb5704"}, ] -h5py = [ - {file = "h5py-3.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a5320837c60870911645e9a935099bdb2be6a786fcf0dac5c860f3b679e2de55"}, - {file = "h5py-3.6.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98646e659bf8591a2177e12a4461dced2cad72da0ba4247643fd118db88880d2"}, - {file = "h5py-3.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:5996ff5adefd2d68c330a4265b6ef92e51b2fc674834a5990add5033bf109e20"}, - {file = "h5py-3.6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c9a5529343a619fea777b7caa27d493595b28b5af8b005e8d1817559fcccf493"}, - {file = "h5py-3.6.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e2b49c48df05e19bb20b400b7ff7dc6f1ee36b84dc717c3771c468b33697b466"}, - {file = "h5py-3.6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd9447633b0bafaf82190d9a8d56f3cb2e8d30169483aee67d800816e028190a"}, - {file = "h5py-3.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1c5acc660c458421e88c4c5fe092ce15923adfac4c732af1ac4fced683a5ea97"}, - {file = "h5py-3.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:35ab552c6f0a93365b3cb5664a5305f3920daa0a43deb5b2c547c52815ec46b9"}, - {file = "h5py-3.6.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:542781d50e1182b8fb619b1265dfe1c765e18215f818b0ab28b2983c28471325"}, - {file = "h5py-3.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f39242960b8d7f86f3056cc2546aa3047ff4835985f6483229af8f029e9c8db"}, - {file = "h5py-3.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:8ecedf16c613973622a334701f67edcc0249469f9daa0576e994fb20ac0405db"}, - {file = "h5py-3.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d8cacad89aa7daf3626fce106f7f2662ac35b14849df22d252d0d8fab9dc1c0b"}, - {file = "h5py-3.6.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dbaa1ed9768bf9ff04af0919acc55746e62b28333644f0251f38768313f31745"}, - {file = "h5py-3.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:954c5c39a09b5302f69f752c3bbf165d368a65c8d200f7d5655e0fa6368a75e6"}, - {file = "h5py-3.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:9fd8a14236fdd092a20c0bdf25c3aba3777718d266fabb0fdded4fcf252d1630"}, - {file = "h5py-3.6.0.tar.gz", hash = "sha256:8752d2814a92aba4e2b2a5922d2782d0029102d99caaf3c201a566bc0b40db29"}, -] +h5py = [] identify = [ - {file = "identify-2.4.11-py2.py3-none-any.whl", hash = "sha256:fd906823ed1db23c7a48f9b176a1d71cb8abede1e21ebe614bac7bdd688d9213"}, - {file = "identify-2.4.11.tar.gz", hash = "sha256:2986942d3974c8f2e5019a190523b0b0e2a07cb8e89bf236727fb4b26f27f8fd"}, + {file = "identify-2.5.5-py2.py3-none-any.whl", hash = "sha256:ef78c0d96098a3b5fe7720be4a97e73f439af7cf088ebf47b620aeaa10fadf97"}, + {file = "identify-2.5.5.tar.gz", hash = "sha256:322a5699daecf7c6fd60e68852f36f2ecbb6a36ff6e6e973e0d2bb6fca203ee6"}, ] idna = [ {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, ] -imagesize = [ - {file = "imagesize-1.3.0-py2.py3-none-any.whl", hash = "sha256:1db2f82529e53c3e929e8926a1fa9235aa82d0bd0c580359c67ec31b2fddaa8c"}, - {file = "imagesize-1.3.0.tar.gz", hash = "sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d"}, -] -importlib-metadata = [ - {file = "importlib_metadata-4.11.2-py3-none-any.whl", hash = "sha256:d16e8c1deb60de41b8e8ed21c1a7b947b0bc62fab7e1d470bcdf331cea2e6735"}, - {file = "importlib_metadata-4.11.2.tar.gz", hash = "sha256:b36ffa925fe3139b2f6ff11d6925ffd4fa7bc47870165e3ac260ac7b4f91e6ac"}, -] +imagesize = [] +importlib-metadata = [] iniconfig = [ {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, ] -jinja2 = [ - {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"}, - {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"}, -] +jinja2 = [] jinja2-time = [ {file = "jinja2-time-0.2.0.tar.gz", hash = "sha256:d14eaa4d315e7688daa4969f616f226614350c48730bfa1692d2caebd8c90d40"}, {file = "jinja2_time-0.2.0-py2.py3-none-any.whl", hash = "sha256:d3eab6605e3ec8b7a0863df09cc1d23714908fa61aa6986a845c20ba488b4efa"}, ] kiwisolver = [ - {file = "kiwisolver-1.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1d819553730d3c2724582124aee8a03c846ec4362ded1034c16fb3ef309264e6"}, - {file = "kiwisolver-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d93a1095f83e908fc253f2fb569c2711414c0bfd451cab580466465b235b470"}, - {file = "kiwisolver-1.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4550a359c5157aaf8507e6820d98682872b9100ce7607f8aa070b4b8af6c298"}, - {file = "kiwisolver-1.3.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2210f28778c7d2ee13f3c2a20a3a22db889e75f4ec13a21072eabb5693801e84"}, - {file = "kiwisolver-1.3.2-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:82f49c5a79d3839bc8f38cb5f4bfc87e15f04cbafa5fbd12fb32c941cb529cfb"}, - {file = "kiwisolver-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9661a04ca3c950a8ac8c47f53cbc0b530bce1b52f516a1e87b7736fec24bfff0"}, - {file = "kiwisolver-1.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ddb500a2808c100e72c075cbb00bf32e62763c82b6a882d403f01a119e3f402"}, - {file = "kiwisolver-1.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72be6ebb4e92520b9726d7146bc9c9b277513a57a38efcf66db0620aec0097e0"}, - {file = "kiwisolver-1.3.2-cp310-cp310-win32.whl", hash = "sha256:83d2c9db5dfc537d0171e32de160461230eb14663299b7e6d18ca6dca21e4977"}, - {file = "kiwisolver-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:cba430db673c29376135e695c6e2501c44c256a81495da849e85d1793ee975ad"}, - {file = "kiwisolver-1.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4116ba9a58109ed5e4cb315bdcbff9838f3159d099ba5259c7c7fb77f8537492"}, - {file = "kiwisolver-1.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19554bd8d54cf41139f376753af1a644b63c9ca93f8f72009d50a2080f870f77"}, - {file = "kiwisolver-1.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a4cf5bbdc861987a7745aed7a536c6405256853c94abc9f3287c3fa401b174"}, - {file = "kiwisolver-1.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0007840186bacfaa0aba4466d5890334ea5938e0bb7e28078a0eb0e63b5b59d5"}, - {file = "kiwisolver-1.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec2eba188c1906b05b9b49ae55aae4efd8150c61ba450e6721f64620c50b59eb"}, - {file = "kiwisolver-1.3.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3dbb3cea20b4af4f49f84cffaf45dd5f88e8594d18568e0225e6ad9dec0e7967"}, - {file = "kiwisolver-1.3.2-cp37-cp37m-win32.whl", hash = "sha256:5326ddfacbe51abf9469fe668944bc2e399181a2158cb5d45e1d40856b2a0589"}, - {file = "kiwisolver-1.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c6572c2dab23c86a14e82c245473d45b4c515314f1f859e92608dcafbd2f19b8"}, - {file = "kiwisolver-1.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b5074fb09429f2b7bc82b6fb4be8645dcbac14e592128beeff5461dcde0af09f"}, - {file = "kiwisolver-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:22521219ca739654a296eea6d4367703558fba16f98688bd8ce65abff36eaa84"}, - {file = "kiwisolver-1.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c358721aebd40c243894298f685a19eb0491a5c3e0b923b9f887ef1193ddf829"}, - {file = "kiwisolver-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ba5a1041480c6e0a8b11a9544d53562abc2d19220bfa14133e0cdd9967e97af"}, - {file = "kiwisolver-1.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44e6adf67577dbdfa2d9f06db9fbc5639afefdb5bf2b4dfec25c3a7fbc619536"}, - {file = "kiwisolver-1.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d45d1c74f88b9f41062716c727f78f2a59a5476ecbe74956fafb423c5c87a76"}, - {file = "kiwisolver-1.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70adc3658138bc77a36ce769f5f183169bc0a2906a4f61f09673f7181255ac9b"}, - {file = "kiwisolver-1.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6a5431940f28b6de123de42f0eb47b84a073ee3c3345dc109ad550a3307dd28"}, - {file = "kiwisolver-1.3.2-cp38-cp38-win32.whl", hash = "sha256:ee040a7de8d295dbd261ef2d6d3192f13e2b08ec4a954de34a6fb8ff6422e24c"}, - {file = "kiwisolver-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:8dc3d842fa41a33fe83d9f5c66c0cc1f28756530cd89944b63b072281e852031"}, - {file = "kiwisolver-1.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a498bcd005e8a3fedd0022bb30ee0ad92728154a8798b703f394484452550507"}, - {file = "kiwisolver-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:80efd202108c3a4150e042b269f7c78643420cc232a0a771743bb96b742f838f"}, - {file = "kiwisolver-1.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f8eb7b6716f5b50e9c06207a14172cf2de201e41912ebe732846c02c830455b9"}, - {file = "kiwisolver-1.3.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f441422bb313ab25de7b3dbfd388e790eceb76ce01a18199ec4944b369017009"}, - {file = "kiwisolver-1.3.2-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:30fa008c172355c7768159983a7270cb23838c4d7db73d6c0f6b60dde0d432c6"}, - {file = "kiwisolver-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f8f6c8f4f1cff93ca5058d6ec5f0efda922ecb3f4c5fb76181f327decff98b8"}, - {file = "kiwisolver-1.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba677bcaff9429fd1bf01648ad0901cea56c0d068df383d5f5856d88221fe75b"}, - {file = "kiwisolver-1.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7843b1624d6ccca403a610d1277f7c28ad184c5aa88a1750c1a999754e65b439"}, - {file = "kiwisolver-1.3.2-cp39-cp39-win32.whl", hash = "sha256:e6f5eb2f53fac7d408a45fbcdeda7224b1cfff64919d0f95473420a931347ae9"}, - {file = "kiwisolver-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:eedd3b59190885d1ebdf6c5e0ca56828beb1949b4dfe6e5d0256a461429ac386"}, - {file = "kiwisolver-1.3.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:dedc71c8eb9c5096037766390172c34fb86ef048b8e8958b4e484b9e505d66bc"}, - {file = "kiwisolver-1.3.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:bf7eb45d14fc036514c09554bf983f2a72323254912ed0c3c8e697b62c4c158f"}, - {file = "kiwisolver-1.3.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2b65bd35f3e06a47b5c30ea99e0c2b88f72c6476eedaf8cfbc8e66adb5479dcf"}, - {file = "kiwisolver-1.3.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25405f88a37c5f5bcba01c6e350086d65e7465fd1caaf986333d2a045045a223"}, - {file = "kiwisolver-1.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:bcadb05c3d4794eb9eee1dddf1c24215c92fb7b55a80beae7a60530a91060560"}, - {file = "kiwisolver-1.3.2.tar.gz", hash = "sha256:fc4453705b81d03568d5b808ad8f09c77c47534f6ac2e72e733f9ca4714aa75c"}, + {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f5e60fabb7343a836360c4f0919b8cd0d6dbf08ad2ca6b9cf90bf0c76a3c4f6"}, + {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:10ee06759482c78bdb864f4109886dff7b8a56529bc1609d4f1112b93fe6423c"}, + {file = "kiwisolver-1.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c79ebe8f3676a4c6630fd3f777f3cfecf9289666c84e775a67d1d358578dc2e3"}, + {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:abbe9fa13da955feb8202e215c4018f4bb57469b1b78c7a4c5c7b93001699938"}, + {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7577c1987baa3adc4b3c62c33bd1118c3ef5c8ddef36f0f2c950ae0b199e100d"}, + {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ad8285b01b0d4695102546b342b493b3ccc6781fc28c8c6a1bb63e95d22f09"}, + {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ed58b8acf29798b036d347791141767ccf65eee7f26bde03a71c944449e53de"}, + {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a68b62a02953b9841730db7797422f983935aeefceb1679f0fc85cbfbd311c32"}, + {file = "kiwisolver-1.4.4-cp310-cp310-win32.whl", hash = "sha256:e92a513161077b53447160b9bd8f522edfbed4bd9759e4c18ab05d7ef7e49408"}, + {file = "kiwisolver-1.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:3fe20f63c9ecee44560d0e7f116b3a747a5d7203376abeea292ab3152334d004"}, + {file = "kiwisolver-1.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ea21f66820452a3f5d1655f8704a60d66ba1191359b96541eaf457710a5fc6"}, + {file = "kiwisolver-1.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bc9db8a3efb3e403e4ecc6cd9489ea2bac94244f80c78e27c31dcc00d2790ac2"}, + {file = "kiwisolver-1.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d5b61785a9ce44e5a4b880272baa7cf6c8f48a5180c3e81c59553ba0cb0821ca"}, + {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c2dbb44c3f7e6c4d3487b31037b1bdbf424d97687c1747ce4ff2895795c9bf69"}, + {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6295ecd49304dcf3bfbfa45d9a081c96509e95f4b9d0eb7ee4ec0530c4a96514"}, + {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bd472dbe5e136f96a4b18f295d159d7f26fd399136f5b17b08c4e5f498cd494"}, + {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf7d9fce9bcc4752ca4a1b80aabd38f6d19009ea5cbda0e0856983cf6d0023f5"}, + {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d6601aed50c74e0ef02f4204da1816147a6d3fbdc8b3872d263338a9052c51"}, + {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:877272cf6b4b7e94c9614f9b10140e198d2186363728ed0f701c6eee1baec1da"}, + {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:db608a6757adabb32f1cfe6066e39b3706d8c3aa69bbc353a5b61edad36a5cb4"}, + {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5853eb494c71e267912275e5586fe281444eb5e722de4e131cddf9d442615626"}, + {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f0a1dbdb5ecbef0d34eb77e56fcb3e95bbd7e50835d9782a45df81cc46949750"}, + {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:283dffbf061a4ec60391d51e6155e372a1f7a4f5b15d59c8505339454f8989e4"}, + {file = "kiwisolver-1.4.4-cp311-cp311-win32.whl", hash = "sha256:d06adcfa62a4431d404c31216f0f8ac97397d799cd53800e9d3efc2fbb3cf14e"}, + {file = "kiwisolver-1.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:e7da3fec7408813a7cebc9e4ec55afed2d0fd65c4754bc376bf03498d4e92686"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:62ac9cc684da4cf1778d07a89bf5f81b35834cb96ca523d3a7fb32509380cbf6"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41dae968a94b1ef1897cb322b39360a0812661dba7c682aa45098eb8e193dbdf"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0611a0a2a518464c05ddd5a3a1a0e856ccc10e67079bb17f265ad19ab3c7597"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:db5283d90da4174865d520e7366801a93777201e91e79bacbac6e6927cbceede"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1041feb4cda8708ce73bb4dcb9ce1ccf49d553bf87c3954bdfa46f0c3f77252c"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-win32.whl", hash = "sha256:a553dadda40fef6bfa1456dc4be49b113aa92c2a9a9e8711e955618cd69622e3"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166"}, + {file = "kiwisolver-1.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:841293b17ad704d70c578f1f0013c890e219952169ce8a24ebc063eecf775454"}, + {file = "kiwisolver-1.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f4f270de01dd3e129a72efad823da90cc4d6aafb64c410c9033aba70db9f1ff0"}, + {file = "kiwisolver-1.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f9f39e2f049db33a908319cf46624a569b36983c7c78318e9726a4cb8923b26c"}, + {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97528e64cb9ebeff9701e7938653a9951922f2a38bd847787d4a8e498cc83ae"}, + {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d1573129aa0fd901076e2bfb4275a35f5b7aa60fbfb984499d661ec950320b0"}, + {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad881edc7ccb9d65b0224f4e4d05a1e85cf62d73aab798943df6d48ab0cd79a1"}, + {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b428ef021242344340460fa4c9185d0b1f66fbdbfecc6c63eff4b7c29fad429d"}, + {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2e407cb4bd5a13984a6c2c0fe1845e4e41e96f183e5e5cd4d77a857d9693494c"}, + {file = "kiwisolver-1.4.4-cp38-cp38-win32.whl", hash = "sha256:75facbe9606748f43428fc91a43edb46c7ff68889b91fa31f53b58894503a191"}, + {file = "kiwisolver-1.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:5bce61af018b0cb2055e0e72e7d65290d822d3feee430b7b8203d8a855e78766"}, + {file = "kiwisolver-1.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8c808594c88a025d4e322d5bb549282c93c8e1ba71b790f539567932722d7bd8"}, + {file = "kiwisolver-1.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0a71d85ecdd570ded8ac3d1c0f480842f49a40beb423bb8014539a9f32a5897"}, + {file = "kiwisolver-1.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b533558eae785e33e8c148a8d9921692a9fe5aa516efbdff8606e7d87b9d5824"}, + {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:efda5fc8cc1c61e4f639b8067d118e742b812c930f708e6667a5ce0d13499e29"}, + {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7c43e1e1206cd421cd92e6b3280d4385d41d7166b3ed577ac20444b6995a445f"}, + {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc8d3bd6c72b2dd9decf16ce70e20abcb3274ba01b4e1c96031e0c4067d1e7cd"}, + {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ea39b0ccc4f5d803e3337dd46bcce60b702be4d86fd0b3d7531ef10fd99a1ac"}, + {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968f44fdbf6dd757d12920d63b566eeb4d5b395fd2d00d29d7ef00a00582aac9"}, + {file = "kiwisolver-1.4.4-cp39-cp39-win32.whl", hash = "sha256:da7e547706e69e45d95e116e6939488d62174e033b763ab1496b4c29b76fabea"}, + {file = "kiwisolver-1.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:ba59c92039ec0a66103b1d5fe588fa546373587a7d68f5c96f743c3396afc04b"}, + {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:91672bacaa030f92fc2f43b620d7b337fd9a5af28b0d6ed3f77afc43c4a64b5a"}, + {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:787518a6789009c159453da4d6b683f468ef7a65bbde796bcea803ccf191058d"}, + {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da152d8cdcab0e56e4f45eb08b9aea6455845ec83172092f09b0e077ece2cf7a"}, + {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ecb1fa0db7bf4cff9dac752abb19505a233c7f16684c5826d1f11ebd9472b871"}, + {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:28bc5b299f48150b5f822ce68624e445040595a4ac3d59251703779836eceff9"}, + {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:81e38381b782cc7e1e46c4e14cd997ee6040768101aefc8fa3c24a4cc58e98f8"}, + {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2a66fdfb34e05b705620dd567f5a03f239a088d5a3f321e7b6ac3239d22aa286"}, + {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:872b8ca05c40d309ed13eb2e582cab0c5a05e81e987ab9c521bf05ad1d5cf5cb"}, + {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:70e7c2e7b750585569564e2e5ca9845acfaa5da56ac46df68414f29fea97be9f"}, + {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9f85003f5dfa867e86d53fac6f7e6f30c045673fa27b603c397753bebadc3008"}, + {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e307eb9bd99801f82789b44bb45e9f541961831c7311521b13a6c85afc09767"}, + {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1792d939ec70abe76f5054d3f36ed5656021dcad1322d1cc996d4e54165cef9"}, + {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6cb459eea32a4e2cf18ba5fcece2dbdf496384413bc1bae15583f19e567f3b2"}, + {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36dafec3d6d6088d34e2de6b85f9d8e2324eb734162fba59d2ba9ed7a2043d5b"}, + {file = "kiwisolver-1.4.4.tar.gz", hash = "sha256:d41997519fcba4a1e46eb4a2fe31bc12f0ff957b2b81bac28db24744f333e955"}, ] livereload = [ {file = "livereload-2.6.3.tar.gz", hash = "sha256:776f2f865e59fde56490a56bcc6773b6917366bce0c267c60ee8aaf1a0959869"}, ] llvmlite = [ - {file = "llvmlite-0.36.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cc0f9b9644b4ab0e4a5edb17f1531d791630c88858220d3cc688d6edf10da100"}, - {file = "llvmlite-0.36.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f7918dbac02b1ebbfd7302ad8e8307d7877ab57d782d5f04b70ff9696b53c21b"}, - {file = "llvmlite-0.36.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:7768658646c418b9b3beccb7044277a608bc8c62b82a85e73c7e5c065e4157c2"}, - {file = "llvmlite-0.36.0-cp36-cp36m-win32.whl", hash = "sha256:05f807209a360d39526d98141b6f281b9c7c771c77a4d1fc22002440642c8de2"}, - {file = "llvmlite-0.36.0-cp36-cp36m-win_amd64.whl", hash = "sha256:d1fdd63c371626c25ad834e1c6297eb76cf2f093a40dbb401a87b6476ab4e34e"}, - {file = "llvmlite-0.36.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7c4e7066447305d5095d0b0a9cae7b835d2f0fde143456b3124110eab0856426"}, - {file = "llvmlite-0.36.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:9dad7e4bb042492914292aea3f4172eca84db731f9478250240955aedba95e08"}, - {file = "llvmlite-0.36.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:1ce5bc0a638d874a08d4222be0a7e48e5df305d094c2ff8dec525ef32b581551"}, - {file = "llvmlite-0.36.0-cp37-cp37m-win32.whl", hash = "sha256:dbedff0f6d417b374253a6bab39aa4b5364f1caab30c06ba8726904776fcf1cb"}, - {file = "llvmlite-0.36.0-cp37-cp37m-win_amd64.whl", hash = "sha256:3b17fc4b0dd17bd29d7297d054e2915fad535889907c3f65232ee21f483447c5"}, - {file = "llvmlite-0.36.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b3a77e46e6053e2a86e607e87b97651dda81e619febb914824a927bff4e88737"}, - {file = "llvmlite-0.36.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:048a7c117641c9be87b90005684e64a6f33ea0897ebab1df8a01214a10d6e79a"}, - {file = "llvmlite-0.36.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:7db4b0eef93125af1c4092c64a3c73c7dc904101117ef53f8d78a1a499b8d5f4"}, - {file = "llvmlite-0.36.0-cp38-cp38-win32.whl", hash = "sha256:50b1828bde514b31431b2bba1aa20b387f5625b81ad6e12fede430a04645e47a"}, - {file = "llvmlite-0.36.0-cp38-cp38-win_amd64.whl", hash = "sha256:f608bae781b2d343e15e080c546468c5a6f35f57f0446923ea198dd21f23757e"}, - {file = "llvmlite-0.36.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6a3abc8a8889aeb06bf9c4a7e5df5bc7bb1aa0aedd91a599813809abeec80b5a"}, - {file = "llvmlite-0.36.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:705f0323d931684428bb3451549603299bb5e17dd60fb979d67c3807de0debc1"}, - {file = "llvmlite-0.36.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:5a6548b4899facb182145147185e9166c69826fb424895f227e6b7cf924a8da1"}, - {file = "llvmlite-0.36.0-cp39-cp39-win32.whl", hash = "sha256:ff52fb9c2be66b95b0e67d56fce11038397e5be1ea410ee53f5f1175fdbb107a"}, - {file = "llvmlite-0.36.0-cp39-cp39-win_amd64.whl", hash = "sha256:1dee416ea49fd338c74ec15c0c013e5273b0961528169af06ff90772614f7f6c"}, - {file = "llvmlite-0.36.0.tar.gz", hash = "sha256:765128fdf5f149ed0b889ffbe2b05eb1717f8e20a5c87fa2b4018fbcce0fcfc9"}, + {file = "llvmlite-0.39.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6717c7a6e93c9d2c3d07c07113ec80ae24af45cde536b34363d4bcd9188091d9"}, + {file = "llvmlite-0.39.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ddab526c5a2c4ccb8c9ec4821fcea7606933dc53f510e2a6eebb45a418d3488a"}, + {file = "llvmlite-0.39.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3f331a323d0f0ada6b10d60182ef06c20a2f01be21699999d204c5750ffd0b4"}, + {file = "llvmlite-0.39.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2c00ff204afa721b0bb9835b5bf1ba7fba210eefcec5552a9e05a63219ba0dc"}, + {file = "llvmlite-0.39.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16f56eb1eec3cda3a5c526bc3f63594fc24e0c8d219375afeb336f289764c6c7"}, + {file = "llvmlite-0.39.1-cp310-cp310-win32.whl", hash = "sha256:d0bfd18c324549c0fec2c5dc610fd024689de6f27c6cc67e4e24a07541d6e49b"}, + {file = "llvmlite-0.39.1-cp310-cp310-win_amd64.whl", hash = "sha256:7ebf1eb9badc2a397d4f6a6c8717447c81ac011db00064a00408bc83c923c0e4"}, + {file = "llvmlite-0.39.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6546bed4e02a1c3d53a22a0bced254b3b6894693318b16c16c8e43e29d6befb6"}, + {file = "llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1578f5000fdce513712e99543c50e93758a954297575610f48cb1fd71b27c08a"}, + {file = "llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3803f11ad5f6f6c3d2b545a303d68d9fabb1d50e06a8d6418e6fcd2d0df00959"}, + {file = "llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50aea09a2b933dab7c9df92361b1844ad3145bfb8dd2deb9cd8b8917d59306fb"}, + {file = "llvmlite-0.39.1-cp37-cp37m-win32.whl", hash = "sha256:b1a0bbdb274fb683f993198775b957d29a6f07b45d184c571ef2a721ce4388cf"}, + {file = "llvmlite-0.39.1-cp37-cp37m-win_amd64.whl", hash = "sha256:e172c73fccf7d6db4bd6f7de963dedded900d1a5c6778733241d878ba613980e"}, + {file = "llvmlite-0.39.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e31f4b799d530255aaf0566e3da2df5bfc35d3cd9d6d5a3dcc251663656c27b1"}, + {file = "llvmlite-0.39.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:62c0ea22e0b9dffb020601bb65cb11dd967a095a488be73f07d8867f4e327ca5"}, + {file = "llvmlite-0.39.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ffc84ade195abd4abcf0bd3b827b9140ae9ef90999429b9ea84d5df69c9058c"}, + {file = "llvmlite-0.39.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c0f158e4708dda6367d21cf15afc58de4ebce979c7a1aa2f6b977aae737e2a54"}, + {file = "llvmlite-0.39.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22d36591cd5d02038912321d9ab8e4668e53ae2211da5523f454e992b5e13c36"}, + {file = "llvmlite-0.39.1-cp38-cp38-win32.whl", hash = "sha256:4c6ebace910410daf0bebda09c1859504fc2f33d122e9a971c4c349c89cca630"}, + {file = "llvmlite-0.39.1-cp38-cp38-win_amd64.whl", hash = "sha256:fb62fc7016b592435d3e3a8f680e3ea8897c3c9e62e6e6cc58011e7a4801439e"}, + {file = "llvmlite-0.39.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa9b26939ae553bf30a9f5c4c754db0fb2d2677327f2511e674aa2f5df941789"}, + {file = "llvmlite-0.39.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e4f212c018db951da3e1dc25c2651abc688221934739721f2dad5ff1dd5f90e7"}, + {file = "llvmlite-0.39.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39dc2160aed36e989610fc403487f11b8764b6650017ff367e45384dff88ffbf"}, + {file = "llvmlite-0.39.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ec3d70b3e507515936e475d9811305f52d049281eaa6c8273448a61c9b5b7e2"}, + {file = "llvmlite-0.39.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60f8dd1e76f47b3dbdee4b38d9189f3e020d22a173c00f930b52131001d801f9"}, + {file = "llvmlite-0.39.1-cp39-cp39-win32.whl", hash = "sha256:03aee0ccd81735696474dc4f8b6be60774892a2929d6c05d093d17392c237f32"}, + {file = "llvmlite-0.39.1-cp39-cp39-win_amd64.whl", hash = "sha256:3fc14e757bc07a919221f0cbaacb512704ce5774d7fcada793f1996d6bc75f2a"}, + {file = "llvmlite-0.39.1.tar.gz", hash = "sha256:b43abd7c82e805261c425d50335be9a6c4f84264e34d6d6e475207300005d572"}, ] locket = [ - {file = "locket-0.2.1-py2.py3-none-any.whl", hash = "sha256:12b6ada59d1f50710bca9704dbadd3f447dbf8dac6664575c1281cadab8e6449"}, - {file = "locket-0.2.1.tar.gz", hash = "sha256:3e1faba403619fe201552f083f1ecbf23f550941bc51985ac6ed4d02d25056dd"}, -] -markupsafe = [ - {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:290b02bab3c9e216da57c1d11d2ba73a9f73a614bbdcc027d299a60cdfabb11a"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e104c0c2b4cd765b4e83909cde7ec61a1e313f8a75775897db321450e928cce"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24c3be29abb6b34052fd26fc7a8e0a49b1ee9d282e3665e8ad09a0a68faee5b3"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204730fd5fe2fe3b1e9ccadb2bd18ba8712b111dcabce185af0b3b5285a7c989"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d3b64c65328cb4cd252c94f83e66e3d7acf8891e60ebf588d7b493a55a1dbf26"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:96de1932237abe0a13ba68b63e94113678c379dca45afa040a17b6e1ad7ed076"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75bb36f134883fdbe13d8e63b8675f5f12b80bb6627f7714c7d6c5becf22719f"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-win32.whl", hash = "sha256:4056f752015dfa9828dce3140dbadd543b555afb3252507348c493def166d454"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:d4e702eea4a2903441f2735799d217f4ac1b55f7d8ad96ab7d4e25417cb0827c"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f0eddfcabd6936558ec020130f932d479930581171368fd728efcfb6ef0dd357"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ddea4c352a488b5e1069069f2f501006b1a4362cb906bee9a193ef1245a7a61"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09c86c9643cceb1d87ca08cdc30160d1b7ab49a8a21564868921959bd16441b8"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a0abef2ca47b33fb615b491ce31b055ef2430de52c5b3fb19a4042dbc5cadb"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:736895a020e31b428b3382a7887bfea96102c529530299f426bf2e636aacec9e"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:679cbb78914ab212c49c67ba2c7396dc599a8479de51b9a87b174700abd9ea49"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:84ad5e29bf8bab3ad70fd707d3c05524862bddc54dc040982b0dbcff36481de7"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-win32.whl", hash = "sha256:8da5924cb1f9064589767b0f3fc39d03e3d0fb5aa29e0cb21d43106519bd624a"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:454ffc1cbb75227d15667c09f164a0099159da0c1f3d2636aa648f12675491ad"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:142119fb14a1ef6d758912b25c4e803c3ff66920635c44078666fe7cc3f8f759"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2a5a856019d2833c56a3dcac1b80fe795c95f401818ea963594b345929dffa7"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d1fb9b2eec3c9714dd936860850300b51dbaa37404209c8d4cb66547884b7ed"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62c0285e91414f5c8f621a17b69fc0088394ccdaa961ef469e833dbff64bd5ea"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc3150f85e2dbcf99e65238c842d1cfe69d3e7649b19864c1cc043213d9cd730"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f02cf7221d5cd915d7fa58ab64f7ee6dd0f6cddbb48683debf5d04ae9b1c2cc1"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5653619b3eb5cbd35bfba3c12d575db2a74d15e0e1c08bf1db788069d410ce8"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d2f5d97fcbd004c03df8d8fe2b973fe2b14e7bfeb2cfa012eaa8759ce9a762f"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-win32.whl", hash = "sha256:3cace1837bc84e63b3fd2dfce37f08f8c18aeb81ef5cf6bb9b51f625cb4e6cd8"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:fabbe18087c3d33c5824cb145ffca52eccd053061df1d79d4b66dafa5ad2a5ea"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:023af8c54fe63530545f70dd2a2a7eed18d07a9a77b94e8bf1e2ff7f252db9a3"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d66624f04de4af8bbf1c7f21cc06649c1c69a7f84109179add573ce35e46d448"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c532d5ab79be0199fa2658e24a02fce8542df196e60665dd322409a03db6a52c"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ec74fada3841b8c5f4c4f197bea916025cb9aa3fe5abf7d52b655d042f956"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c653fde75a6e5eb814d2a0a89378f83d1d3f502ab710904ee585c38888816c"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:961eb86e5be7d0973789f30ebcf6caab60b844203f4396ece27310295a6082c7"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:598b65d74615c021423bd45c2bc5e9b59539c875a9bdb7e5f2a6b92dfcfc268d"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:599941da468f2cf22bf90a84f6e2a65524e87be2fce844f96f2dd9a6c9d1e635"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-win32.whl", hash = "sha256:e6f7f3f41faffaea6596da86ecc2389672fa949bd035251eab26dc6697451d05"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:b8811d48078d1cf2a6863dafb896e68406c5f513048451cd2ded0473133473c7"}, - {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"}, + {file = "locket-1.0.0-py2.py3-none-any.whl", hash = "sha256:b6c819a722f7b6bd955b80781788e4a66a55628b858d347536b7e81325a3a5e3"}, + {file = "locket-1.0.0.tar.gz", hash = "sha256:5c0d4c052a8bbbf750e056a8e65ccd309086f4f0f18a2eac306a8dfa4112a632"}, ] +markupsafe = [] matplotlib = [ - {file = "matplotlib-3.5.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:456cc8334f6d1124e8ff856b42d2cc1c84335375a16448189999496549f7182b"}, - {file = "matplotlib-3.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8a77906dc2ef9b67407cec0bdbf08e3971141e535db888974a915be5e1e3efc6"}, - {file = "matplotlib-3.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e70ae6475cfd0fad3816dcbf6cac536dc6f100f7474be58d59fa306e6e768a4"}, - {file = "matplotlib-3.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53273c5487d1c19c3bc03b9eb82adaf8456f243b97ed79d09dded747abaf1235"}, - {file = "matplotlib-3.5.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3b6f3fd0d8ca37861c31e9a7cab71a0ef14c639b4c95654ea1dd153158bf0df"}, - {file = "matplotlib-3.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8c87cdaf06fd7b2477f68909838ff4176f105064a72ca9d24d3f2a29f73d393"}, - {file = "matplotlib-3.5.1-cp310-cp310-win32.whl", hash = "sha256:e2f28a07b4f82abb40267864ad7b3a4ed76f1b1663e81c7efc84a9b9248f672f"}, - {file = "matplotlib-3.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:d70a32ee1f8b55eed3fd4e892f0286df8cccc7e0475c11d33b5d0a148f5c7599"}, - {file = "matplotlib-3.5.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:68fa30cec89b6139dc559ed6ef226c53fd80396da1919a1b5ef672c911aaa767"}, - {file = "matplotlib-3.5.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e3484d8455af3fdb0424eae1789af61f6a79da0c80079125112fd5c1b604218"}, - {file = "matplotlib-3.5.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e293b16cf303fe82995e41700d172a58a15efc5331125d08246b520843ef21ee"}, - {file = "matplotlib-3.5.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e3520a274a0e054e919f5b3279ee5dbccf5311833819ccf3399dab7c83e90a25"}, - {file = "matplotlib-3.5.1-cp37-cp37m-win32.whl", hash = "sha256:2252bfac85cec7af4a67e494bfccf9080bcba8a0299701eab075f48847cca907"}, - {file = "matplotlib-3.5.1-cp37-cp37m-win_amd64.whl", hash = "sha256:abf67e05a1b7f86583f6ebd01f69b693b9c535276f4e943292e444855870a1b8"}, - {file = "matplotlib-3.5.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6c094e4bfecd2fa7f9adffd03d8abceed7157c928c2976899de282f3600f0a3d"}, - {file = "matplotlib-3.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:506b210cc6e66a0d1c2bb765d055f4f6bc2745070fb1129203b67e85bbfa5c18"}, - {file = "matplotlib-3.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b04fc29bcef04d4e2d626af28d9d892be6aba94856cb46ed52bcb219ceac8943"}, - {file = "matplotlib-3.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:577ed20ec9a18d6bdedb4616f5e9e957b4c08563a9f985563a31fd5b10564d2a"}, - {file = "matplotlib-3.5.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e486f60db0cd1c8d68464d9484fd2a94011c1ac8593d765d0211f9daba2bd535"}, - {file = "matplotlib-3.5.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b71f3a7ca935fc759f2aed7cec06cfe10bc3100fadb5dbd9c435b04e557971e1"}, - {file = "matplotlib-3.5.1-cp38-cp38-win32.whl", hash = "sha256:d24e5bb8028541ce25e59390122f5e48c8506b7e35587e5135efcb6471b4ac6c"}, - {file = "matplotlib-3.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:778d398c4866d8e36ee3bf833779c940b5f57192fa0a549b3ad67bc4c822771b"}, - {file = "matplotlib-3.5.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bb1c613908f11bac270bc7494d68b1ef6e7c224b7a4204d5dacf3522a41e2bc3"}, - {file = "matplotlib-3.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:edf5e4e1d5fb22c18820e8586fb867455de3b109c309cb4fce3aaed85d9468d1"}, - {file = "matplotlib-3.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:40e0d7df05e8efe60397c69b467fc8f87a2affeb4d562fe92b72ff8937a2b511"}, - {file = "matplotlib-3.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a350ca685d9f594123f652ba796ee37219bf72c8e0fc4b471473d87121d6d34"}, - {file = "matplotlib-3.5.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3e66497cd990b1a130e21919b004da2f1dc112132c01ac78011a90a0f9229778"}, - {file = "matplotlib-3.5.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:87900c67c0f1728e6db17c6809ec05c025c6624dcf96a8020326ea15378fe8e7"}, - {file = "matplotlib-3.5.1-cp39-cp39-win32.whl", hash = "sha256:b8a4fb2a0c5afbe9604f8a91d7d0f27b1832c3e0b5e365f95a13015822b4cd65"}, - {file = "matplotlib-3.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:fe8d40c434a8e2c68d64c6d6a04e77f21791a93ff6afe0dce169597c110d3079"}, - {file = "matplotlib-3.5.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:34a1fc29f8f96e78ec57a5eff5e8d8b53d3298c3be6df61e7aa9efba26929522"}, - {file = "matplotlib-3.5.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b19a761b948e939a9e20173aaae76070025f0024fc8f7ba08bef22a5c8573afc"}, - {file = "matplotlib-3.5.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6803299cbf4665eca14428d9e886de62e24f4223ac31ab9c5d6d5339a39782c7"}, - {file = "matplotlib-3.5.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:14334b9902ec776461c4b8c6516e26b450f7ebe0b3ef8703bf5cdfbbaecf774a"}, - {file = "matplotlib-3.5.1.tar.gz", hash = "sha256:b2e9810e09c3a47b73ce9cab5a72243a1258f61e7900969097a817232246ce1c"}, + {file = "matplotlib-3.5.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a206a1b762b39398efea838f528b3a6d60cdb26fe9d58b48265787e29cd1d693"}, + {file = "matplotlib-3.5.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cd45a6f3e93a780185f70f05cf2a383daed13c3489233faad83e81720f7ede24"}, + {file = "matplotlib-3.5.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d62880e1f60e5a30a2a8484432bcb3a5056969dc97258d7326ad465feb7ae069"}, + {file = "matplotlib-3.5.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ab29589cef03bc88acfa3a1490359000c18186fc30374d8aa77d33cc4a51a4a"}, + {file = "matplotlib-3.5.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2886cc009f40e2984c083687251821f305d811d38e3df8ded414265e4583f0c5"}, + {file = "matplotlib-3.5.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c995f7d9568f18b5db131ab124c64e51b6820a92d10246d4f2b3f3a66698a15b"}, + {file = "matplotlib-3.5.3-cp310-cp310-win32.whl", hash = "sha256:6bb93a0492d68461bd458eba878f52fdc8ac7bdb6c4acdfe43dba684787838c2"}, + {file = "matplotlib-3.5.3-cp310-cp310-win_amd64.whl", hash = "sha256:2e6d184ebe291b9e8f7e78bbab7987d269c38ea3e062eace1fe7d898042ef804"}, + {file = "matplotlib-3.5.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ea6aef5c4338e58d8d376068e28f80a24f54e69f09479d1c90b7172bad9f25b"}, + {file = "matplotlib-3.5.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:839d47b8ead7ad9669aaacdbc03f29656dc21f0d41a6fea2d473d856c39c8b1c"}, + {file = "matplotlib-3.5.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3b4fa56159dc3c7f9250df88f653f085068bcd32dcd38e479bba58909254af7f"}, + {file = "matplotlib-3.5.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:94ff86af56a3869a4ae26a9637a849effd7643858a1a04dd5ee50e9ab75069a7"}, + {file = "matplotlib-3.5.3-cp37-cp37m-win32.whl", hash = "sha256:35a8ad4dddebd51f94c5d24bec689ec0ec66173bf614374a1244c6241c1595e0"}, + {file = "matplotlib-3.5.3-cp37-cp37m-win_amd64.whl", hash = "sha256:43e9d3fa077bf0cc95ded13d331d2156f9973dce17c6f0c8b49ccd57af94dbd9"}, + {file = "matplotlib-3.5.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:22227c976ad4dc8c5a5057540421f0d8708c6560744ad2ad638d48e2984e1dbc"}, + {file = "matplotlib-3.5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf618a825deb6205f015df6dfe6167a5d9b351203b03fab82043ae1d30f16511"}, + {file = "matplotlib-3.5.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9befa5954cdbc085e37d974ff6053da269474177921dd61facdad8023c4aeb51"}, + {file = "matplotlib-3.5.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3840c280ebc87a48488a46f760ea1c0c0c83fcf7abbe2e6baf99d033fd35fd8"}, + {file = "matplotlib-3.5.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dacddf5bfcec60e3f26ec5c0ae3d0274853a258b6c3fc5ef2f06a8eb23e042be"}, + {file = "matplotlib-3.5.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b428076a55fb1c084c76cb93e68006f27d247169f056412607c5c88828d08f88"}, + {file = "matplotlib-3.5.3-cp38-cp38-win32.whl", hash = "sha256:874df7505ba820e0400e7091199decf3ff1fde0583652120c50cd60d5820ca9a"}, + {file = "matplotlib-3.5.3-cp38-cp38-win_amd64.whl", hash = "sha256:b28de401d928890187c589036857a270a032961411934bdac4cf12dde3d43094"}, + {file = "matplotlib-3.5.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3211ba82b9f1518d346f6309df137b50c3dc4421b4ed4815d1d7eadc617f45a1"}, + {file = "matplotlib-3.5.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6fe807e8a22620b4cd95cfbc795ba310dc80151d43b037257250faf0bfcd82bc"}, + {file = "matplotlib-3.5.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5c096363b206a3caf43773abebdbb5a23ea13faef71d701b21a9c27fdcef72f4"}, + {file = "matplotlib-3.5.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bcdfcb0f976e1bac6721d7d457c17be23cf7501f977b6a38f9d38a3762841f7"}, + {file = "matplotlib-3.5.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1e64ac9be9da6bfff0a732e62116484b93b02a0b4d4b19934fb4f8e7ad26ad6a"}, + {file = "matplotlib-3.5.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:73dd93dc35c85dece610cca8358003bf0760d7986f70b223e2306b4ea6d1406b"}, + {file = "matplotlib-3.5.3-cp39-cp39-win32.whl", hash = "sha256:879c7e5fce4939c6aa04581dfe08d57eb6102a71f2e202e3314d5fbc072fd5a0"}, + {file = "matplotlib-3.5.3-cp39-cp39-win_amd64.whl", hash = "sha256:ab8d26f07fe64f6f6736d635cce7bfd7f625320490ed5bfc347f2cdb4fae0e56"}, + {file = "matplotlib-3.5.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:99482b83ebf4eb6d5fc6813d7aacdefdd480f0d9c0b52dcf9f1cc3b2c4b3361a"}, + {file = "matplotlib-3.5.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f814504e459c68118bf2246a530ed953ebd18213dc20e3da524174d84ed010b2"}, + {file = "matplotlib-3.5.3-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:57f1b4e69f438a99bb64d7f2c340db1b096b41ebaa515cf61ea72624279220ce"}, + {file = "matplotlib-3.5.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:d2484b350bf3d32cae43f85dcfc89b3ed7bd2bcd781ef351f93eb6fb2cc483f9"}, + {file = "matplotlib-3.5.3.tar.gz", hash = "sha256:339cac48b80ddbc8bfd05daae0a3a73414651a8596904c2a881cfd1edb65f26c"}, ] mccabe = [ {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, ] -mypy = [ - {file = "mypy-0.910-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:a155d80ea6cee511a3694b108c4494a39f42de11ee4e61e72bc424c490e46457"}, - {file = "mypy-0.910-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:b94e4b785e304a04ea0828759172a15add27088520dc7e49ceade7834275bedb"}, - {file = "mypy-0.910-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:088cd9c7904b4ad80bec811053272986611b84221835e079be5bcad029e79dd9"}, - {file = "mypy-0.910-cp35-cp35m-win_amd64.whl", hash = "sha256:adaeee09bfde366d2c13fe6093a7df5df83c9a2ba98638c7d76b010694db760e"}, - {file = "mypy-0.910-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ecd2c3fe726758037234c93df7e98deb257fd15c24c9180dacf1ef829da5f921"}, - {file = "mypy-0.910-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:d9dd839eb0dc1bbe866a288ba3c1afc33a202015d2ad83b31e875b5905a079b6"}, - {file = "mypy-0.910-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:3e382b29f8e0ccf19a2df2b29a167591245df90c0b5a2542249873b5c1d78212"}, - {file = "mypy-0.910-cp36-cp36m-win_amd64.whl", hash = "sha256:53fd2eb27a8ee2892614370896956af2ff61254c275aaee4c230ae771cadd885"}, - {file = "mypy-0.910-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b6fb13123aeef4a3abbcfd7e71773ff3ff1526a7d3dc538f3929a49b42be03f0"}, - {file = "mypy-0.910-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e4dab234478e3bd3ce83bac4193b2ecd9cf94e720ddd95ce69840273bf44f6de"}, - {file = "mypy-0.910-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:7df1ead20c81371ccd6091fa3e2878559b5c4d4caadaf1a484cf88d93ca06703"}, - {file = "mypy-0.910-cp37-cp37m-win_amd64.whl", hash = "sha256:0aadfb2d3935988ec3815952e44058a3100499f5be5b28c34ac9d79f002a4a9a"}, - {file = "mypy-0.910-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec4e0cd079db280b6bdabdc807047ff3e199f334050db5cbb91ba3e959a67504"}, - {file = "mypy-0.910-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:119bed3832d961f3a880787bf621634ba042cb8dc850a7429f643508eeac97b9"}, - {file = "mypy-0.910-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:866c41f28cee548475f146aa4d39a51cf3b6a84246969f3759cb3e9c742fc072"}, - {file = "mypy-0.910-cp38-cp38-win_amd64.whl", hash = "sha256:ceb6e0a6e27fb364fb3853389607cf7eb3a126ad335790fa1e14ed02fba50811"}, - {file = "mypy-0.910-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1a85e280d4d217150ce8cb1a6dddffd14e753a4e0c3cf90baabb32cefa41b59e"}, - {file = "mypy-0.910-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:42c266ced41b65ed40a282c575705325fa7991af370036d3f134518336636f5b"}, - {file = "mypy-0.910-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:3c4b8ca36877fc75339253721f69603a9c7fdb5d4d5a95a1a1b899d8b86a4de2"}, - {file = "mypy-0.910-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:c0df2d30ed496a08de5daed2a9ea807d07c21ae0ab23acf541ab88c24b26ab97"}, - {file = "mypy-0.910-cp39-cp39-win_amd64.whl", hash = "sha256:c6c2602dffb74867498f86e6129fd52a2770c48b7cd3ece77ada4fa38f94eba8"}, - {file = "mypy-0.910-py3-none-any.whl", hash = "sha256:ef565033fa5a958e62796867b1df10c40263ea9ded87164d67572834e57a174d"}, - {file = "mypy-0.910.tar.gz", hash = "sha256:704098302473cb31a218f1775a873b376b30b4c18229421e9e9dc8916fd16150"}, -] +mypy = [] mypy-extensions = [ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, ] natsort = [ - {file = "natsort-8.1.0-py3-none-any.whl", hash = "sha256:f59988d2f24e77b6b56f8a8f882d5df6b3b637e09e075abc67b486d59fba1a4b"}, - {file = "natsort-8.1.0.tar.gz", hash = "sha256:c7c1f3f27c375719a4dfcab353909fe39f26c2032a062a8c80cc844eaaca0445"}, -] -nodeenv = [ - {file = "nodeenv-1.6.0-py2.py3-none-any.whl", hash = "sha256:621e6b7076565ddcacd2db0294c0381e01fd28945ab36bcf00f41c5daf63bef7"}, - {file = "nodeenv-1.6.0.tar.gz", hash = "sha256:3ef13ff90291ba2a4a7a4ff9a979b63ffdd00a464dbe04acf0ea6471517a4c2b"}, + {file = "natsort-8.2.0-py3-none-any.whl", hash = "sha256:04fe18fdd2b9e5957f19f687eb117f102ef8dde6b574764e536e91194bed4f5f"}, + {file = "natsort-8.2.0.tar.gz", hash = "sha256:57f85b72c688b09e053cdac302dd5b5b53df5f73ae20b4874fcbffd8bf783d11"}, ] +nodeenv = [] nox = [ - {file = "nox-2022.1.7-py3-none-any.whl", hash = "sha256:efee12f02d39405b16d68f60e7a06fe1fc450ae58669d6cdda8c7f48e3bae9e3"}, - {file = "nox-2022.1.7.tar.gz", hash = "sha256:b375238cebb0b9df2fab74b8d0ce1a50cd80df60ca2e13f38f539454fcd97d7e"}, + {file = "nox-2022.8.7-py3-none-any.whl", hash = "sha256:96cca88779e08282a699d672258ec01eb7c792d35bbbf538c723172bce23212c"}, + {file = "nox-2022.8.7.tar.gz", hash = "sha256:1b894940551dc5c389f9271d197ca5d655d40bdc6ccf93ed6880e4042760a34b"}, ] nox-poetry = [ {file = "nox-poetry-0.9.0.tar.gz", hash = "sha256:ea48fa535cd048854da35af7c6c3e92046fbed9b9023bb81193fb4d2d3a47c92"}, {file = "nox_poetry-0.9.0-py3-none-any.whl", hash = "sha256:33423c855fb47e2901faf9e15937326bc20c6e356eef825903eed4f8bbda69d3"}, ] numba = [ - {file = "numba-0.53.1-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:b23de6b6837c132087d06b8b92d343edb54b885873b824a037967fbd5272ebb7"}, - {file = "numba-0.53.1-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:6545b9e9b0c112b81de7f88a3c787469a357eeff8211e90b8f45ee243d521cc2"}, - {file = "numba-0.53.1-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:8fa5c963a43855050a868106a87cd614f3c3f459951c8fc468aec263ef80d063"}, - {file = "numba-0.53.1-cp36-cp36m-win32.whl", hash = "sha256:aaa6ebf56afb0b6752607b9f3bf39e99b0efe3c1fa6849698373925ee6838fd7"}, - {file = "numba-0.53.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b08b3df38aab769df79ed948d70f0a54a3cdda49d58af65369235c204ec5d0f3"}, - {file = "numba-0.53.1-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:bf5c463b62d013e3f709cc8277adf2f4f4d8cc6757293e29c6db121b77e6b760"}, - {file = "numba-0.53.1-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:74df02e73155f669e60dcff07c4eef4a03dbf5b388594db74142ab40914fe4f5"}, - {file = "numba-0.53.1-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:5165709bf62f28667e10b9afe6df0ce1037722adab92d620f59cb8bbb8104641"}, - {file = "numba-0.53.1-cp37-cp37m-win32.whl", hash = "sha256:2e96958ed2ca7e6d967b2ce29c8da0ca47117e1de28e7c30b2c8c57386506fa5"}, - {file = "numba-0.53.1-cp37-cp37m-win_amd64.whl", hash = "sha256:276f9d1674fe08d95872d81b97267c6b39dd830f05eb992608cbede50fcf48a9"}, - {file = "numba-0.53.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:4c4c8d102512ae472af52c76ad9522da718c392cb59f4cd6785d711fa5051a2a"}, - {file = "numba-0.53.1-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:691adbeac17dbdf6ed7c759e9e33a522351f07d2065fe926b264b6b2c15fd89b"}, - {file = "numba-0.53.1-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:94aab3e0e9e8754116325ce026e1b29ae72443c706a3104cf7f3368dc3012912"}, - {file = "numba-0.53.1-cp38-cp38-win32.whl", hash = "sha256:aabeec89bb3e3162136eea492cea7ee8882ddcda2201f05caecdece192c40896"}, - {file = "numba-0.53.1-cp38-cp38-win_amd64.whl", hash = "sha256:1895ebd256819ff22256cd6fe24aa8f7470b18acc73e7917e8e93c9ac7f565dc"}, - {file = "numba-0.53.1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:224d197a46a9e602a16780d87636e199e2cdef528caef084a4d8fd8909c2455c"}, - {file = "numba-0.53.1-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:aba7acb247a09d7f12bd17a8e28bbb04e8adef9fc20ca29835d03b7894e1b49f"}, - {file = "numba-0.53.1-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:bd126f1f49da6fc4b3169cf1d96f1c3b3f84a7badd11fe22da344b923a00e744"}, - {file = "numba-0.53.1-cp39-cp39-win32.whl", hash = "sha256:0ef9d1f347b251282ae46e5a5033600aa2d0dfa1ee8c16cb8137b8cd6f79e221"}, - {file = "numba-0.53.1-cp39-cp39-win_amd64.whl", hash = "sha256:17146885cbe4e89c9d4abd4fcb8886dee06d4591943dc4343500c36ce2fcfa69"}, - {file = "numba-0.53.1.tar.gz", hash = "sha256:9cd4e5216acdc66c4e9dab2dfd22ddb5bef151185c070d4a3cd8e78638aff5b0"}, + {file = "numba-0.56.2-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:e50d1de5291d1afd3d660ca149447c682d70b8d3c22f97ed9a3076e6344330b0"}, + {file = "numba-0.56.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c358fd4ef7c5efc09ee96432284d66df285bd68654e85c39cf6c570dc35429a"}, + {file = "numba-0.56.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6465f23beff2f6134f53da873d4202671cdbb02716a29f2b5f5c77102ece37c0"}, + {file = "numba-0.56.2-cp310-cp310-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:2e4ac02b1bacea083e7cab5c02ded3bb5db7bb35d9c3a0a63da4f8c86691365a"}, + {file = "numba-0.56.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea7d286519eb575f66617805582391e6483e8b33968831331ecd46fe3c7f753f"}, + {file = "numba-0.56.2-cp310-cp310-win32.whl", hash = "sha256:590112ac60ff482f1d096e7574f9a781dff2f7bc91bfe388fe7d87e52630c0ec"}, + {file = "numba-0.56.2-cp310-cp310-win_amd64.whl", hash = "sha256:e998782d1e466ce5a61cce18f23fd69ba0eeb78069fd2ad59e1a2928a29f952f"}, + {file = "numba-0.56.2-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:c011436c8e0ec7e37fbb81953537ceb659e662f7c7f9850f2638a100e857bee4"}, + {file = "numba-0.56.2-cp37-cp37m-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d216a8957057afa43374eb9a11c69a874424d33e20f4270f80cef878c7efa500"}, + {file = "numba-0.56.2-cp37-cp37m-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:2a1d81994bedc0e02f900ea4b81bdfd4929ee844f6ef9242e196c50f30f95449"}, + {file = "numba-0.56.2-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8a8e35d9b86d33c2f77e50fd8a610b984f57005e1cd1a2c8267d4cfd743d8f49"}, + {file = "numba-0.56.2-cp37-cp37m-win32.whl", hash = "sha256:bb3609bb76fd5b1d3adc9a7df40f27c3eb8c7e5d2c8a536c1bdc7b09c2fdc215"}, + {file = "numba-0.56.2-cp37-cp37m-win_amd64.whl", hash = "sha256:339e519f4091fab3a446b474e041c86eedd216334f8dd7febee4b43df112179c"}, + {file = "numba-0.56.2-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:63199334b9fedf3511369738fa8ef07411abc1cd9e8cd0e474cf671133b85180"}, + {file = "numba-0.56.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f112dcfcbcbb19da33e9218611c51208499d52d74e358f2160e64ccb46e50f07"}, + {file = "numba-0.56.2-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c4467aed831297473ec94f9f3d9795de551c447bf6ea17d7ac24b6a47fb687ea"}, + {file = "numba-0.56.2-cp38-cp38-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:ae89056d1681c70b79bc9835c40672d4ba22cf4e75eacc0b8ff7f176c7233781"}, + {file = "numba-0.56.2-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:edbba200696c4be93a10020e4e0eab5ca0898dff4920f3dc2b4d39c14e40b993"}, + {file = "numba-0.56.2-cp38-cp38-win32.whl", hash = "sha256:45cefeefa78762753c2be9f64e7579a7523afff479642c3c661fb78bd740a352"}, + {file = "numba-0.56.2-cp38-cp38-win_amd64.whl", hash = "sha256:9eb56e7a23c4daa195cefba894671a45464533b0af9908483ba3de74a75fe682"}, + {file = "numba-0.56.2-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:faced8cc33ee6a272654bb3e570994e1949790ae06579ea485c25849f4e79008"}, + {file = "numba-0.56.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2713f161c51e0638dd4f92036999a523df5dcc498d39adb1559cf7cacb6f0fe9"}, + {file = "numba-0.56.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6b885219eba813a4c70e68fce18339a3e575fb2b75e92d2babbcf04a40f74dee"}, + {file = "numba-0.56.2-cp39-cp39-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:d87bdb704ef219c00844844a8ac9c6f1c518f2cee9c26d1971427b6bc3d2004d"}, + {file = "numba-0.56.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6184d0c125bdf2f19ad78e71e9ea873ad6c88c55f7609f987bd4354d15c54bb3"}, + {file = "numba-0.56.2-cp39-cp39-win32.whl", hash = "sha256:61d472e42ecad262857409a00f9f24d4609b190ba1fab5fc118e7a1ee43c9d39"}, + {file = "numba-0.56.2-cp39-cp39-win_amd64.whl", hash = "sha256:36903c24088aed9a768d7e8269f4ee4d3abc5662e2aeacacf524a7c5d1707b04"}, + {file = "numba-0.56.2.tar.gz", hash = "sha256:3492f0a5d09e257fc521f5377a6c6b907eec1920d14739f0b2458b9d29946a5a"}, ] numpy = [ - {file = "numpy-1.22.3-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:92bfa69cfbdf7dfc3040978ad09a48091143cffb778ec3b03fa170c494118d75"}, - {file = "numpy-1.22.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8251ed96f38b47b4295b1ae51631de7ffa8260b5b087808ef09a39a9d66c97ab"}, - {file = "numpy-1.22.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48a3aecd3b997bf452a2dedb11f4e79bc5bfd21a1d4cc760e703c31d57c84b3e"}, - {file = "numpy-1.22.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3bae1a2ed00e90b3ba5f7bd0a7c7999b55d609e0c54ceb2b076a25e345fa9f4"}, - {file = "numpy-1.22.3-cp310-cp310-win_amd64.whl", hash = "sha256:08d9b008d0156c70dc392bb3ab3abb6e7a711383c3247b410b39962263576cd4"}, - {file = "numpy-1.22.3-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:201b4d0552831f7250a08d3b38de0d989d6f6e4658b709a02a73c524ccc6ffce"}, - {file = "numpy-1.22.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f8c1f39caad2c896bc0018f699882b345b2a63708008be29b1f355ebf6f933fe"}, - {file = "numpy-1.22.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:568dfd16224abddafb1cbcce2ff14f522abe037268514dd7e42c6776a1c3f8e5"}, - {file = "numpy-1.22.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ca688e1b9b95d80250bca34b11a05e389b1420d00e87a0d12dc45f131f704a1"}, - {file = "numpy-1.22.3-cp38-cp38-win32.whl", hash = "sha256:e7927a589df200c5e23c57970bafbd0cd322459aa7b1ff73b7c2e84d6e3eae62"}, - {file = "numpy-1.22.3-cp38-cp38-win_amd64.whl", hash = "sha256:07a8c89a04997625236c5ecb7afe35a02af3896c8aa01890a849913a2309c676"}, - {file = "numpy-1.22.3-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:2c10a93606e0b4b95c9b04b77dc349b398fdfbda382d2a39ba5a822f669a0123"}, - {file = "numpy-1.22.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fade0d4f4d292b6f39951b6836d7a3c7ef5b2347f3c420cd9820a1d90d794802"}, - {file = "numpy-1.22.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bfb1bb598e8229c2d5d48db1860bcf4311337864ea3efdbe1171fb0c5da515d"}, - {file = "numpy-1.22.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97098b95aa4e418529099c26558eeb8486e66bd1e53a6b606d684d0c3616b168"}, - {file = "numpy-1.22.3-cp39-cp39-win32.whl", hash = "sha256:fdf3c08bce27132395d3c3ba1503cac12e17282358cb4bddc25cc46b0aca07aa"}, - {file = "numpy-1.22.3-cp39-cp39-win_amd64.whl", hash = "sha256:639b54cdf6aa4f82fe37ebf70401bbb74b8508fddcf4797f9fe59615b8c5813a"}, - {file = "numpy-1.22.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c34ea7e9d13a70bf2ab64a2532fe149a9aced424cd05a2c4ba662fd989e3e45f"}, - {file = "numpy-1.22.3.zip", hash = "sha256:dbc7601a3b7472d559dc7b933b18b4b66f9aa7452c120e87dfb33d02008c8a18"}, + {file = "numpy-1.23.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c9f707b5bb73bf277d812ded9896f9512a43edff72712f31667d0a8c2f8e71ee"}, + {file = "numpy-1.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ffcf105ecdd9396e05a8e58e81faaaf34d3f9875f137c7372450baa5d77c9a54"}, + {file = "numpy-1.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ea3f98a0ffce3f8f57675eb9119f3f4edb81888b6874bc1953f91e0b1d4f440"}, + {file = "numpy-1.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004f0efcb2fe1c0bd6ae1fcfc69cc8b6bf2407e0f18be308612007a0762b4089"}, + {file = "numpy-1.23.3-cp310-cp310-win32.whl", hash = "sha256:98dcbc02e39b1658dc4b4508442a560fe3ca5ca0d989f0df062534e5ca3a5c1a"}, + {file = "numpy-1.23.3-cp310-cp310-win_amd64.whl", hash = "sha256:39a664e3d26ea854211867d20ebcc8023257c1800ae89773cbba9f9e97bae036"}, + {file = "numpy-1.23.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1f27b5322ac4067e67c8f9378b41c746d8feac8bdd0e0ffede5324667b8a075c"}, + {file = "numpy-1.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ad3ec9a748a8943e6eb4358201f7e1c12ede35f510b1a2221b70af4bb64295c"}, + {file = "numpy-1.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdc9febce3e68b697d931941b263c59e0c74e8f18861f4064c1f712562903411"}, + {file = "numpy-1.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:301c00cf5e60e08e04d842fc47df641d4a181e651c7135c50dc2762ffe293dbd"}, + {file = "numpy-1.23.3-cp311-cp311-win32.whl", hash = "sha256:7cd1328e5bdf0dee621912f5833648e2daca72e3839ec1d6695e91089625f0b4"}, + {file = "numpy-1.23.3-cp311-cp311-win_amd64.whl", hash = "sha256:8355fc10fd33a5a70981a5b8a0de51d10af3688d7a9e4a34fcc8fa0d7467bb7f"}, + {file = "numpy-1.23.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc6e8da415f359b578b00bcfb1d08411c96e9a97f9e6c7adada554a0812a6cc6"}, + {file = "numpy-1.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:22d43376ee0acd547f3149b9ec12eec2f0ca4a6ab2f61753c5b29bb3e795ac4d"}, + {file = "numpy-1.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a64403f634e5ffdcd85e0b12c08f04b3080d3e840aef118721021f9b48fc1460"}, + {file = "numpy-1.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efd9d3abe5774404becdb0748178b48a218f1d8c44e0375475732211ea47c67e"}, + {file = "numpy-1.23.3-cp38-cp38-win32.whl", hash = "sha256:f8c02ec3c4c4fcb718fdf89a6c6f709b14949408e8cf2a2be5bfa9c49548fd85"}, + {file = "numpy-1.23.3-cp38-cp38-win_amd64.whl", hash = "sha256:e868b0389c5ccfc092031a861d4e158ea164d8b7fdbb10e3b5689b4fc6498df6"}, + {file = "numpy-1.23.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:09f6b7bdffe57fc61d869a22f506049825d707b288039d30f26a0d0d8ea05164"}, + {file = "numpy-1.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8c79d7cf86d049d0c5089231a5bcd31edb03555bd93d81a16870aa98c6cfb79d"}, + {file = "numpy-1.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5d5420053bbb3dd64c30e58f9363d7a9c27444c3648e61460c1237f9ec3fa14"}, + {file = "numpy-1.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5422d6a1ea9b15577a9432e26608c73a78faf0b9039437b075cf322c92e98e7"}, + {file = "numpy-1.23.3-cp39-cp39-win32.whl", hash = "sha256:c1ba66c48b19cc9c2975c0d354f24058888cdc674bebadceb3cdc9ec403fb5d1"}, + {file = "numpy-1.23.3-cp39-cp39-win_amd64.whl", hash = "sha256:78a63d2df1d947bd9d1b11d35564c2f9e4b57898aae4626638056ec1a231c40c"}, + {file = "numpy-1.23.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:17c0e467ade9bda685d5ac7f5fa729d8d3e76b23195471adae2d6a6941bd2c18"}, + {file = "numpy-1.23.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91b8d6768a75247026e951dce3b2aac79dc7e78622fc148329135ba189813584"}, + {file = "numpy-1.23.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:94c15ca4e52671a59219146ff584488907b1f9b3fc232622b47e2cf832e94fb8"}, + {file = "numpy-1.23.3.tar.gz", hash = "sha256:51bf49c0cd1d52be0a240aa66f3458afc4b95d8993d2d04f0d91fa60c10af6cd"}, ] packaging = [ {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, ] pandas = [ - {file = "pandas-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3dfb32ed50122fe8c5e7f2b8d97387edd742cc78f9ec36f007ee126cd3720907"}, - {file = "pandas-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0259cd11e7e6125aaea3af823b80444f3adad6149ff4c97fef760093598b3e34"}, - {file = "pandas-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:96e9ece5759f9b47ae43794b6359bbc54805d76e573b161ae770c1ea59393106"}, - {file = "pandas-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:508c99debccd15790d526ce6b1624b97a5e1e4ca5b871319fb0ebfd46b8f4dad"}, - {file = "pandas-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6a7bbbb7950063bfc942f8794bc3e31697c020a14f1cd8905fc1d28ec674a01"}, - {file = "pandas-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:c614001129b2a5add5e3677c3a213a9e6fd376204cb8d17c04e84ff7dfc02a73"}, - {file = "pandas-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4e1176f45981c8ccc8161bc036916c004ca51037a7ed73f2d2a9857e6dbe654f"}, - {file = "pandas-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bbb15ad79050e8b8d39ec40dd96a30cd09b886a2ae8848d0df1abba4d5502a67"}, - {file = "pandas-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6d6ad1da00c7cc7d8dd1559a6ba59ba3973be6b15722d49738b2be0977eb8a0c"}, - {file = "pandas-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:358b0bc98a5ff067132d23bf7a2242ee95db9ea5b7bbc401cf79205f11502fd3"}, - {file = "pandas-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6105af6533f8b63a43ea9f08a2ede04e8f43e49daef0209ab0d30352bcf08bee"}, - {file = "pandas-1.4.1-cp38-cp38-win32.whl", hash = "sha256:04dd15d9db538470900c851498e532ef28d4e56bfe72c9523acb32042de43dfb"}, - {file = "pandas-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b384516dbb4e6aae30e3464c2e77c563da5980440fbdfbd0968e3942f8f9d70"}, - {file = "pandas-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f02e85e6d832be37d7f16cf6ac8bb26b519ace3e5f3235564a91c7f658ab2a43"}, - {file = "pandas-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0b1a13f647e4209ed7dbb5da3497891d0045da9785327530ab696417ef478f84"}, - {file = "pandas-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:19f7c632436b1b4f84615c3b127bbd7bc603db95e3d4332ed259dc815c9aaa26"}, - {file = "pandas-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ea47ba1d6f359680130bd29af497333be6110de8f4c35b9211eec5a5a9630fa"}, - {file = "pandas-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e5a7a1e0ecaac652326af627a3eca84886da9e667d68286866d4e33f6547caf"}, - {file = "pandas-1.4.1-cp39-cp39-win32.whl", hash = "sha256:1d85d5f6be66dfd6d1d8d13b9535e342a2214260f1852654b19fa4d7b8d1218b"}, - {file = "pandas-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:3129a35d9dad1d80c234dd78f8f03141b914395d23f97cf92a366dcd19f8f8bf"}, - {file = "pandas-1.4.1.tar.gz", hash = "sha256:8db93ec98ac7cb5f8ac1420c10f5e3c43533153f253fe7fb6d891cf5aa2b80d2"}, + {file = "pandas-1.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:799e6a25932df7e6b1f8dabf63de064e2205dc309abb75956126a0453fd88e97"}, + {file = "pandas-1.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7cd1d69a387f7d5e1a5a06a87574d9ef2433847c0e78113ab51c84d3a8bcaeaa"}, + {file = "pandas-1.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:94f2ed1fd51e545ebf71da1e942fe1822ee01e10d3dd2a7276d01351333b7c6b"}, + {file = "pandas-1.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4591cadd06fbbbd16fafc2de6e840c1aaefeae3d5864b688004777ef1bbdede3"}, + {file = "pandas-1.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0022fe6a313df1c4869b5edc012d734c6519a6fffa3cf70930f32e6a1078e49"}, + {file = "pandas-1.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:785e878a6e6d8ddcdb8c181e600855402750052497d7fc6d6b508894f6b8830b"}, + {file = "pandas-1.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c4bb8b0ab9f94207d07e401d24baebfc63057246b1a5e0cd9ee50df85a656871"}, + {file = "pandas-1.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:51c424ca134fdaeac9a4acd719d1ab48046afc60943a489028f0413fdbe9ef1c"}, + {file = "pandas-1.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ce35f947202b0b99c660221d82beb91d2e6d553d55a40b30128204e3e2c63848"}, + {file = "pandas-1.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee6f1848148ed3204235967613b0a32be2d77f214e9623f554511047705c1e04"}, + {file = "pandas-1.4.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7cc960959be28d064faefc0cb2aef854d46b827c004ebea7e79b5497ed83e7d"}, + {file = "pandas-1.4.4-cp38-cp38-win32.whl", hash = "sha256:9d805bce209714b1c1fa29bfb1e42ad87e4c0a825e4b390c56a3e71593b7e8d8"}, + {file = "pandas-1.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:afbddad78a98ec4d2ce08b384b81730de1ccc975b99eb663e6dac43703f36d98"}, + {file = "pandas-1.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a08ceb59db499864c58a9bf85ab6219d527d91f14c0240cc25fa2c261032b2a7"}, + {file = "pandas-1.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0959c41004e3d2d16f39c828d6da66ebee329836a7ecee49fb777ac9ad8a7501"}, + {file = "pandas-1.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87b4194f344dcd14c0f885cecb22005329b38bda10f1aaf7b9596a00ec8a4768"}, + {file = "pandas-1.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d2a7a3c1fea668d56bd91edbd5f2732e0af8feb9d2bf8d9bfacb2dea5fa9536"}, + {file = "pandas-1.4.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a981cfabf51c318a562deb4ae7deec594c07aee7cf18b4594a92c23718ec8275"}, + {file = "pandas-1.4.4-cp39-cp39-win32.whl", hash = "sha256:050aada67a5ec6699a7879e769825b510018a95fb9ac462bb1867483d0974a97"}, + {file = "pandas-1.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:8d4d2fe2863ecddb0ba1979bdda26c8bc2ea138f5a979abe3ba80c0fa4015c91"}, + {file = "pandas-1.4.4.tar.gz", hash = "sha256:ab6c0d738617b675183e5f28db32b5148b694ad9bba0a40c3ea26d96b431db67"}, ] partd = [ - {file = "partd-1.2.0-py3-none-any.whl", hash = "sha256:5c3a5d70da89485c27916328dc1e26232d0e270771bd4caef4a5124b6a457288"}, - {file = "partd-1.2.0.tar.gz", hash = "sha256:aa67897b84d522dcbc86a98b942afab8c6aa2f7f677d904a616b74ef5ddbc3eb"}, + {file = "partd-1.3.0-py3-none-any.whl", hash = "sha256:6393a0c898a0ad945728e34e52de0df3ae295c5aff2e2926ba7cc3c60a734a15"}, + {file = "partd-1.3.0.tar.gz", hash = "sha256:ce91abcdc6178d668bcaa431791a5a917d902341cb193f543fe445d494660485"}, ] pathspec = [ - {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"}, - {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, + {file = "pathspec-0.10.1-py3-none-any.whl", hash = "sha256:46846318467efc4556ccfd27816e004270a9eeeeb4d062ce5e6fc7a87c573f93"}, + {file = "pathspec-0.10.1.tar.gz", hash = "sha256:7ace6161b621d31e7902eb6b5ae148d12cfd23f4a249b9ffb6b9fee12084323d"}, ] patsy = [ {file = "patsy-0.5.2-py2.py3-none-any.whl", hash = "sha256:cc80955ae8c13a7e7c4051eda7b277c8f909f50bc7d73e124bc38e2ee3d95041"}, {file = "patsy-0.5.2.tar.gz", hash = "sha256:5053de7804676aba62783dbb0f23a2b3d74e35e5bfa238b88b7cbf148a38b69d"}, ] -pbr = [ - {file = "pbr-5.8.1-py2.py3-none-any.whl", hash = "sha256:27108648368782d07bbf1cb468ad2e2eeef29086affd14087a6d04b7de8af4ec"}, - {file = "pbr-5.8.1.tar.gz", hash = "sha256:66bc5a34912f408bb3925bf21231cb6f59206267b7f63f3503ef865c1a292e25"}, -] +pbr = [] pep8-naming = [ {file = "pep8-naming-0.11.1.tar.gz", hash = "sha256:a1dd47dd243adfe8a83616e27cf03164960b507530f155db94e10b36a6cd6724"}, {file = "pep8_naming-0.11.1-py2.py3-none-any.whl", hash = "sha256:f43bfe3eea7e0d73e8b5d07d6407ab47f2476ccaeff6937c84275cd30b016738"}, ] pillow = [ - {file = "Pillow-9.0.1-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd"}, - {file = "Pillow-9.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f"}, - {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a"}, - {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049"}, - {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a"}, - {file = "Pillow-9.0.1-cp310-cp310-win32.whl", hash = "sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e"}, - {file = "Pillow-9.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b"}, - {file = "Pillow-9.0.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e"}, - {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360"}, - {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b"}, - {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030"}, - {file = "Pillow-9.0.1-cp37-cp37m-win32.whl", hash = "sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669"}, - {file = "Pillow-9.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092"}, - {file = "Pillow-9.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204"}, - {file = "Pillow-9.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e"}, - {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c"}, - {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5"}, - {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae"}, - {file = "Pillow-9.0.1-cp38-cp38-win32.whl", hash = "sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c"}, - {file = "Pillow-9.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00"}, - {file = "Pillow-9.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838"}, - {file = "Pillow-9.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28"}, - {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c"}, - {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b"}, - {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7"}, - {file = "Pillow-9.0.1-cp39-cp39-win32.whl", hash = "sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7"}, - {file = "Pillow-9.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac"}, - {file = "Pillow-9.0.1-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97"}, - {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56"}, - {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e"}, - {file = "Pillow-9.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70"}, - {file = "Pillow-9.0.1.tar.gz", hash = "sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa"}, -] -platformdirs = [ - {file = "platformdirs-2.5.1-py3-none-any.whl", hash = "sha256:bcae7cab893c2d310a711b70b24efb93334febe65f8de776ee320b517471e227"}, - {file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"}, -] + {file = "Pillow-9.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:a9c9bc489f8ab30906d7a85afac4b4944a572a7432e00698a7239f44a44e6efb"}, + {file = "Pillow-9.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:510cef4a3f401c246cfd8227b300828715dd055463cdca6176c2e4036df8bd4f"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7888310f6214f19ab2b6df90f3f06afa3df7ef7355fc025e78a3044737fab1f5"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831e648102c82f152e14c1a0938689dbb22480c548c8d4b8b248b3e50967b88c"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cc1d2451e8a3b4bfdb9caf745b58e6c7a77d2e469159b0d527a4554d73694d1"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:136659638f61a251e8ed3b331fc6ccd124590eeff539de57c5f80ef3a9594e58"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6e8c66f70fb539301e064f6478d7453e820d8a2c631da948a23384865cd95544"}, + {file = "Pillow-9.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:37ff6b522a26d0538b753f0b4e8e164fdada12db6c6f00f62145d732d8a3152e"}, + {file = "Pillow-9.2.0-cp310-cp310-win32.whl", hash = "sha256:c79698d4cd9318d9481d89a77e2d3fcaeff5486be641e60a4b49f3d2ecca4e28"}, + {file = "Pillow-9.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:254164c57bab4b459f14c64e93df11eff5ded575192c294a0c49270f22c5d93d"}, + {file = "Pillow-9.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:adabc0bce035467fb537ef3e5e74f2847c8af217ee0be0455d4fec8adc0462fc"}, + {file = "Pillow-9.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:336b9036127eab855beec9662ac3ea13a4544a523ae273cbf108b228ecac8437"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50dff9cc21826d2977ef2d2a205504034e3a4563ca6f5db739b0d1026658e004"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb6259196a589123d755380b65127ddc60f4c64b21fc3bb46ce3a6ea663659b0"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b0554af24df2bf96618dac71ddada02420f946be943b181108cac55a7a2dcd4"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:15928f824870535c85dbf949c09d6ae7d3d6ac2d6efec80f3227f73eefba741c"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:bdd0de2d64688ecae88dd8935012c4a72681e5df632af903a1dca8c5e7aa871a"}, + {file = "Pillow-9.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5b87da55a08acb586bad5c3aa3b86505f559b84f39035b233d5bf844b0834b1"}, + {file = "Pillow-9.2.0-cp311-cp311-win32.whl", hash = "sha256:b6d5e92df2b77665e07ddb2e4dbd6d644b78e4c0d2e9272a852627cdba0d75cf"}, + {file = "Pillow-9.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6bf088c1ce160f50ea40764f825ec9b72ed9da25346216b91361eef8ad1b8f8c"}, + {file = "Pillow-9.2.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:2c58b24e3a63efd22554c676d81b0e57f80e0a7d3a5874a7e14ce90ec40d3069"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eef7592281f7c174d3d6cbfbb7ee5984a671fcd77e3fc78e973d492e9bf0eb3f"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dcd7b9c7139dc8258d164b55696ecd16c04607f1cc33ba7af86613881ffe4ac8"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a138441e95562b3c078746a22f8fca8ff1c22c014f856278bdbdd89ca36cff1b"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:93689632949aff41199090eff5474f3990b6823404e45d66a5d44304e9cdc467"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:f3fac744f9b540148fa7715a435d2283b71f68bfb6d4aae24482a890aed18b59"}, + {file = "Pillow-9.2.0-cp37-cp37m-win32.whl", hash = "sha256:fa768eff5f9f958270b081bb33581b4b569faabf8774726b283edb06617101dc"}, + {file = "Pillow-9.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:69bd1a15d7ba3694631e00df8de65a8cb031911ca11f44929c97fe05eb9b6c1d"}, + {file = "Pillow-9.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:030e3460861488e249731c3e7ab59b07c7853838ff3b8e16aac9561bb345da14"}, + {file = "Pillow-9.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:74a04183e6e64930b667d321524e3c5361094bb4af9083db5c301db64cd341f3"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d33a11f601213dcd5718109c09a52c2a1c893e7461f0be2d6febc2879ec2402"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fd6f5e3c0e4697fa7eb45b6e93996299f3feee73a3175fa451f49a74d092b9f"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a647c0d4478b995c5e54615a2e5360ccedd2f85e70ab57fbe817ca613d5e63b8"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:4134d3f1ba5f15027ff5c04296f13328fecd46921424084516bdb1b2548e66ff"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:bc431b065722a5ad1dfb4df354fb9333b7a582a5ee39a90e6ffff688d72f27a1"}, + {file = "Pillow-9.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1536ad017a9f789430fb6b8be8bf99d2f214c76502becc196c6f2d9a75b01b76"}, + {file = "Pillow-9.2.0-cp38-cp38-win32.whl", hash = "sha256:2ad0d4df0f5ef2247e27fc790d5c9b5a0af8ade9ba340db4a73bb1a4a3e5fb4f"}, + {file = "Pillow-9.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:ec52c351b35ca269cb1f8069d610fc45c5bd38c3e91f9ab4cbbf0aebc136d9c8"}, + {file = "Pillow-9.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ed2c4ef2451de908c90436d6e8092e13a43992f1860275b4d8082667fbb2ffc"}, + {file = "Pillow-9.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ad2f835e0ad81d1689f1b7e3fbac7b01bb8777d5a985c8962bedee0cc6d43da"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea98f633d45f7e815db648fd7ff0f19e328302ac36427343e4432c84432e7ff4"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7761afe0126d046974a01e030ae7529ed0ca6a196de3ec6937c11df0df1bc91c"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a54614049a18a2d6fe156e68e188da02a046a4a93cf24f373bffd977e943421"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:5aed7dde98403cd91d86a1115c78d8145c83078e864c1de1064f52e6feb61b20"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:13b725463f32df1bfeacbf3dd197fb358ae8ebcd8c5548faa75126ea425ccb60"}, + {file = "Pillow-9.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:808add66ea764ed97d44dda1ac4f2cfec4c1867d9efb16a33d158be79f32b8a4"}, + {file = "Pillow-9.2.0-cp39-cp39-win32.whl", hash = "sha256:337a74fd2f291c607d220c793a8135273c4c2ab001b03e601c36766005f36885"}, + {file = "Pillow-9.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:fac2d65901fb0fdf20363fbd345c01958a742f2dc62a8dd4495af66e3ff502a4"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ad2277b185ebce47a63f4dc6302e30f05762b688f8dc3de55dbae4651872cdf3"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c7b502bc34f6e32ba022b4a209638f9e097d7a9098104ae420eb8186217ebbb"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d1f14f5f691f55e1b47f824ca4fdcb4b19b4323fe43cc7bb105988cad7496be"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:dfe4c1fedfde4e2fbc009d5ad420647f7730d719786388b7de0999bf32c0d9fd"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:f07f1f00e22b231dd3d9b9208692042e29792d6bd4f6639415d2f23158a80013"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1802f34298f5ba11d55e5bb09c31997dc0c6aed919658dfdf0198a2fe75d5490"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17d4cafe22f050b46d983b71c707162d63d796a1235cdf8b9d7a112e97b15bac"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:96b5e6874431df16aee0c1ba237574cb6dff1dcb173798faa6a9d8b399a05d0e"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:0030fdbd926fb85844b8b92e2f9449ba89607231d3dd597a21ae72dc7fe26927"}, + {file = "Pillow-9.2.0.tar.gz", hash = "sha256:75e636fd3e0fb872693f23ccb8a5ff2cd578801251f3a4f6854c6a5d437d3c04"}, +] +platformdirs = [] pluggy = [ {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, @@ -2442,17 +2402,11 @@ poyo = [ {file = "poyo-0.5.0-py2.py3-none-any.whl", hash = "sha256:3e2ca8e33fdc3c411cd101ca395668395dd5dc7ac775b8e809e3def9f9fe041a"}, {file = "poyo-0.5.0.tar.gz", hash = "sha256:e26956aa780c45f011ca9886f044590e2d8fd8b61db7b1c1cf4e0869f48ed4dd"}, ] -pre-commit = [ - {file = "pre_commit-2.17.0-py2.py3-none-any.whl", hash = "sha256:725fa7459782d7bec5ead072810e47351de01709be838c2ce1726b9591dad616"}, - {file = "pre_commit-2.17.0.tar.gz", hash = "sha256:c1a8040ff15ad3d648c70cc3e55b93e4d2d5b687320955505587fd79bbaed06a"}, -] -pre-commit-hooks = [ - {file = "pre_commit_hooks-4.1.0-py2.py3-none-any.whl", hash = "sha256:ba95316b79038e56ce998cdacb1ce922831ac0e41744c77bcc2b9677bf183206"}, - {file = "pre_commit_hooks-4.1.0.tar.gz", hash = "sha256:b6361865d1877c5da5ac3a944aab19ce6bd749a534d2ede28e683d07194a57e1"}, -] +pre-commit = [] +pre-commit-hooks = [] prompt-toolkit = [ - {file = "prompt_toolkit-3.0.28-py3-none-any.whl", hash = "sha256:30129d870dcb0b3b6a53efdc9d0a83ea96162ffd28ffe077e94215b233dc670c"}, - {file = "prompt_toolkit-3.0.28.tar.gz", hash = "sha256:9f1cd16b1e86c2968f2519d7fb31dd9d669916f515612c269d14e9ed52b51650"}, + {file = "prompt_toolkit-3.0.31-py3-none-any.whl", hash = "sha256:9696f386133df0fc8ca5af4895afe5d78f5fcfe5258111c2a79a1c3e41ffa96d"}, + {file = "prompt_toolkit-3.0.31.tar.gz", hash = "sha256:9ada952c9d1787f52ff6d5f3484d0b4df8952787c087edf6a1f7c2cb1ea88148"}, ] py = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, @@ -2478,13 +2432,10 @@ pygithub = [ {file = "PyGithub-1.55-py3-none-any.whl", hash = "sha256:2caf0054ea079b71e539741ae56c5a95e073b81fa472ce222e81667381b9601b"}, {file = "PyGithub-1.55.tar.gz", hash = "sha256:1bbfff9372047ff3f21d5cd8e07720f3dbfdaf6462fcaed9d815f528f1ba7283"}, ] -pygments = [ - {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"}, - {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"}, -] +pygments = [] pyjwt = [ - {file = "PyJWT-2.3.0-py3-none-any.whl", hash = "sha256:e0c4bb8d9f0af0c7f5b1ec4c5036309617d03d56932877f2f7a0beeb5318322f"}, - {file = "PyJWT-2.3.0.tar.gz", hash = "sha256:b888b4d56f06f6dcd777210c334e69c737be74755d3e5e9ee3fe67dc18a0ee41"}, + {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"}, + {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"}, ] pynacl = [ {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, @@ -2498,26 +2449,20 @@ pynacl = [ {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, ] -pyparsing = [ - {file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"}, - {file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"}, -] +pyparsing = [] pytest = [ - {file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"}, - {file = "pytest-6.2.5.tar.gz", hash = "sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89"}, + {file = "pytest-7.1.3-py3-none-any.whl", hash = "sha256:1377bda3466d70b55e3f5cecfa55bb7cfcf219c7964629b967c37cf0bda818b7"}, + {file = "pytest-7.1.3.tar.gz", hash = "sha256:4f365fec2dff9c1162f834d9f18af1ba13062db0c708bf7b946f8a5c76180c39"}, ] python-dateutil = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] python-slugify = [ - {file = "python-slugify-6.1.1.tar.gz", hash = "sha256:00003397f4e31414e922ce567b3a4da28cf1436a53d332c9aeeb51c7d8c469fd"}, - {file = "python_slugify-6.1.1-py2.py3-none-any.whl", hash = "sha256:8c0016b2d74503eb64761821612d58fcfc729493634b1eb0575d8f5b4aa1fbcf"}, -] -pytz = [ - {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"}, - {file = "pytz-2021.3.tar.gz", hash = "sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326"}, + {file = "python-slugify-6.1.2.tar.gz", hash = "sha256:272d106cb31ab99b3496ba085e3fea0e9e76dcde967b5e9992500d1f785ce4e1"}, + {file = "python_slugify-6.1.2-py2.py3-none-any.whl", hash = "sha256:7b2c274c308b62f4269a9ba701aa69a797e9bca41aeee5b3a9e79e36b6656927"}, ] +pytz = [] pyyaml = [ {file = "PyYAML-5.4.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922"}, {file = "PyYAML-5.4.1-cp27-cp27m-win32.whl", hash = "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393"}, @@ -2554,89 +2499,100 @@ questionary = [ {file = "questionary-1.10.0.tar.gz", hash = "sha256:600d3aefecce26d48d97eee936fdb66e4bc27f934c3ab6dd1e292c4f43946d90"}, ] regex = [ - {file = "regex-2022.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ab69b4fe09e296261377d209068d52402fb85ef89dc78a9ac4a29a895f4e24a7"}, - {file = "regex-2022.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5bc5f921be39ccb65fdda741e04b2555917a4bced24b4df14eddc7569be3b493"}, - {file = "regex-2022.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:43eba5c46208deedec833663201752e865feddc840433285fbadee07b84b464d"}, - {file = "regex-2022.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c68d2c04f7701a418ec2e5631b7f3552efc32f6bcc1739369c6eeb1af55f62e0"}, - {file = "regex-2022.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:caa2734ada16a44ae57b229d45091f06e30a9a52ace76d7574546ab23008c635"}, - {file = "regex-2022.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef806f684f17dbd6263d72a54ad4073af42b42effa3eb42b877e750c24c76f86"}, - {file = "regex-2022.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be319f4eb400ee567b722e9ea63d5b2bb31464e3cf1b016502e3ee2de4f86f5c"}, - {file = "regex-2022.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:42bb37e2b2d25d958c25903f6125a41aaaa1ed49ca62c103331f24b8a459142f"}, - {file = "regex-2022.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:fbc88d3ba402b5d041d204ec2449c4078898f89c4a6e6f0ed1c1a510ef1e221d"}, - {file = "regex-2022.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:91e0f7e7be77250b808a5f46d90bf0032527d3c032b2131b63dee54753a4d729"}, - {file = "regex-2022.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:cb3652bbe6720786b9137862205986f3ae54a09dec8499a995ed58292bdf77c2"}, - {file = "regex-2022.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:878c626cbca3b649e14e972c14539a01191d79e58934e3f3ef4a9e17f90277f8"}, - {file = "regex-2022.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6df070a986fc064d865c381aecf0aaff914178fdf6874da2f2387e82d93cc5bd"}, - {file = "regex-2022.3.2-cp310-cp310-win32.whl", hash = "sha256:b549d851f91a4efb3e65498bd4249b1447ab6035a9972f7fc215eb1f59328834"}, - {file = "regex-2022.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:8babb2b5751105dc0aef2a2e539f4ba391e738c62038d8cb331c710f6b0f3da7"}, - {file = "regex-2022.3.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1977bb64264815d3ef016625adc9df90e6d0e27e76260280c63eca993e3f455f"}, - {file = "regex-2022.3.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e73652057473ad3e6934944af090852a02590c349357b79182c1b681da2c772"}, - {file = "regex-2022.3.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b22ff939a8856a44f4822da38ef4868bd3a9ade22bb6d9062b36957c850e404f"}, - {file = "regex-2022.3.2-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:878f5d649ba1db9f52cc4ef491f7dba2d061cdc48dd444c54260eebc0b1729b9"}, - {file = "regex-2022.3.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0008650041531d0eadecc96a73d37c2dc4821cf51b0766e374cb4f1ddc4e1c14"}, - {file = "regex-2022.3.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:06b1df01cf2aef3a9790858af524ae2588762c8a90e784ba00d003f045306204"}, - {file = "regex-2022.3.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:57484d39447f94967e83e56db1b1108c68918c44ab519b8ecfc34b790ca52bf7"}, - {file = "regex-2022.3.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:74d86e8924835f863c34e646392ef39039405f6ce52956d8af16497af4064a30"}, - {file = "regex-2022.3.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:ae17fc8103f3b63345709d3e9654a274eee1c6072592aec32b026efd401931d0"}, - {file = "regex-2022.3.2-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:5f92a7cdc6a0ae2abd184e8dfd6ef2279989d24c85d2c85d0423206284103ede"}, - {file = "regex-2022.3.2-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:5dcc4168536c8f68654f014a3db49b6b4a26b226f735708be2054314ed4964f4"}, - {file = "regex-2022.3.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:1e30762ddddb22f7f14c4f59c34d3addabc789216d813b0f3e2788d7bcf0cf29"}, - {file = "regex-2022.3.2-cp36-cp36m-win32.whl", hash = "sha256:286ff9ec2709d56ae7517040be0d6c502642517ce9937ab6d89b1e7d0904f863"}, - {file = "regex-2022.3.2-cp36-cp36m-win_amd64.whl", hash = "sha256:d326ff80ed531bf2507cba93011c30fff2dd51454c85f55df0f59f2030b1687b"}, - {file = "regex-2022.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9d828c5987d543d052b53c579a01a52d96b86f937b1777bbfe11ef2728929357"}, - {file = "regex-2022.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c87ac58b9baaf50b6c1b81a18d20eda7e2883aa9a4fb4f1ca70f2e443bfcdc57"}, - {file = "regex-2022.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6c2441538e4fadd4291c8420853431a229fcbefc1bf521810fbc2629d8ae8c2"}, - {file = "regex-2022.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f3356afbb301ec34a500b8ba8b47cba0b44ed4641c306e1dd981a08b416170b5"}, - {file = "regex-2022.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d96eec8550fd2fd26f8e675f6d8b61b159482ad8ffa26991b894ed5ee19038b"}, - {file = "regex-2022.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf668f26604e9f7aee9f8eaae4ca07a948168af90b96be97a4b7fa902a6d2ac1"}, - {file = "regex-2022.3.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0eb0e2845e81bdea92b8281a3969632686502565abf4a0b9e4ab1471c863d8f3"}, - {file = "regex-2022.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:87bc01226cd288f0bd9a4f9f07bf6827134dc97a96c22e2d28628e824c8de231"}, - {file = "regex-2022.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:09b4b6ccc61d4119342b26246ddd5a04accdeebe36bdfe865ad87a0784efd77f"}, - {file = "regex-2022.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:9557545c10d52c845f270b665b52a6a972884725aa5cf12777374e18f2ea8960"}, - {file = "regex-2022.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:0be0c34a39e5d04a62fd5342f0886d0e57592a4f4993b3f9d257c1f688b19737"}, - {file = "regex-2022.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7b103dffb9f6a47ed7ffdf352b78cfe058b1777617371226c1894e1be443afec"}, - {file = "regex-2022.3.2-cp37-cp37m-win32.whl", hash = "sha256:f8169ec628880bdbca67082a9196e2106060a4a5cbd486ac51881a4df805a36f"}, - {file = "regex-2022.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:4b9c16a807b17b17c4fa3a1d8c242467237be67ba92ad24ff51425329e7ae3d0"}, - {file = "regex-2022.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:67250b36edfa714ba62dc62d3f238e86db1065fccb538278804790f578253640"}, - {file = "regex-2022.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5510932596a0f33399b7fff1bd61c59c977f2b8ee987b36539ba97eb3513584a"}, - {file = "regex-2022.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6f7ee2289176cb1d2c59a24f50900f8b9580259fa9f1a739432242e7d254f93"}, - {file = "regex-2022.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d7a68fa53688e1f612c3246044157117403c7ce19ebab7d02daf45bd63913e"}, - {file = "regex-2022.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaf5317c961d93c1a200b9370fb1c6b6836cc7144fef3e5a951326912bf1f5a3"}, - {file = "regex-2022.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad397bc7d51d69cb07ef89e44243f971a04ce1dca9bf24c992c362406c0c6573"}, - {file = "regex-2022.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:297c42ede2c81f0cb6f34ea60b5cf6dc965d97fa6936c11fc3286019231f0d66"}, - {file = "regex-2022.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:af4d8cc28e4c7a2f6a9fed544228c567340f8258b6d7ea815b62a72817bbd178"}, - {file = "regex-2022.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:452519bc4c973e961b1620c815ea6dd8944a12d68e71002be5a7aff0a8361571"}, - {file = "regex-2022.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cb34c2d66355fb70ae47b5595aafd7218e59bb9c00ad8cc3abd1406ca5874f07"}, - {file = "regex-2022.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3d146e5591cb67c5e836229a04723a30af795ef9b70a0bbd913572e14b7b940f"}, - {file = "regex-2022.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:03299b0bcaa7824eb7c0ebd7ef1e3663302d1b533653bfe9dc7e595d453e2ae9"}, - {file = "regex-2022.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:9ccb0a4ab926016867260c24c192d9df9586e834f5db83dfa2c8fffb3a6e5056"}, - {file = "regex-2022.3.2-cp38-cp38-win32.whl", hash = "sha256:f7e8f1ee28e0a05831c92dc1c0c1c94af5289963b7cf09eca5b5e3ce4f8c91b0"}, - {file = "regex-2022.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:35ed2f3c918a00b109157428abfc4e8d1ffabc37c8f9abc5939ebd1e95dabc47"}, - {file = "regex-2022.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:55820bc631684172b9b56a991d217ec7c2e580d956591dc2144985113980f5a3"}, - {file = "regex-2022.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:83f03f0bd88c12e63ca2d024adeee75234d69808b341e88343b0232329e1f1a1"}, - {file = "regex-2022.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42d6007722d46bd2c95cce700181570b56edc0dcbadbfe7855ec26c3f2d7e008"}, - {file = "regex-2022.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:320c2f4106962ecea0f33d8d31b985d3c185757c49c1fb735501515f963715ed"}, - {file = "regex-2022.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbd3fe37353c62fd0eb19fb76f78aa693716262bcd5f9c14bb9e5aca4b3f0dc4"}, - {file = "regex-2022.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17e51ad1e6131c496b58d317bc9abec71f44eb1957d32629d06013a21bc99cac"}, - {file = "regex-2022.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72bc3a5effa5974be6d965ed8301ac1e869bc18425c8a8fac179fbe7876e3aee"}, - {file = "regex-2022.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e5602a9b5074dcacc113bba4d2f011d2748f50e3201c8139ac5b68cf2a76bd8b"}, - {file = "regex-2022.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:729aa8ca624c42f309397c5fc9e21db90bf7e2fdd872461aabdbada33de9063c"}, - {file = "regex-2022.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d6ecfd1970b3380a569d7b3ecc5dd70dba295897418ed9e31ec3c16a5ab099a5"}, - {file = "regex-2022.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:13bbf0c9453c6d16e5867bda7f6c0c7cff1decf96c5498318bb87f8136d2abd4"}, - {file = "regex-2022.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:58ba41e462653eaf68fc4a84ec4d350b26a98d030be1ab24aba1adcc78ffe447"}, - {file = "regex-2022.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c0446b2871335d5a5e9fcf1462f954586b09a845832263db95059dcd01442015"}, - {file = "regex-2022.3.2-cp39-cp39-win32.whl", hash = "sha256:20e6a27959f162f979165e496add0d7d56d7038237092d1aba20b46de79158f1"}, - {file = "regex-2022.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:9efa41d1527b366c88f265a227b20bcec65bda879962e3fc8a2aee11e81266d7"}, - {file = "regex-2022.3.2.tar.gz", hash = "sha256:79e5af1ff258bc0fe0bdd6f69bc4ae33935a898e3cbefbbccf22e88a27fa053b"}, + {file = "regex-2022.9.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0394265391a86e2bbaa7606e59ac71bd9f1edf8665a59e42771a9c9adbf6fd4f"}, + {file = "regex-2022.9.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:86df2049b18745f3cd4b0f4c4ef672bfac4b80ca488e6ecfd2bbfe68d2423a2c"}, + {file = "regex-2022.9.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce331b076b2b013e7d7f07157f957974ef0b0881a808e8a4a4b3b5105aee5d04"}, + {file = "regex-2022.9.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:360ffbc9357794ae41336b681dff1c0463193199dfb91fcad3ec385ea4972f46"}, + {file = "regex-2022.9.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18e503b1e515a10282b3f14f1b3d856194ecece4250e850fad230842ed31227f"}, + {file = "regex-2022.9.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6e167d1ccd41d27b7b6655bb7a2dcb1b1eb1e0d2d662043470bd3b4315d8b2b"}, + {file = "regex-2022.9.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4146cb7ae6029fc83b5c905ec6d806b7e5568dc14297c423e66b86294bad6c39"}, + {file = "regex-2022.9.13-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a1aec4ae549fd7b3f52ceaf67e133010e2fba1538bf4d5fc5cd162a5e058d5df"}, + {file = "regex-2022.9.13-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cab548d6d972e1de584161487b2ac1aa82edd8430d1bde69587ba61698ad1cfb"}, + {file = "regex-2022.9.13-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3d64e1a7e6d98a4cdc8b29cb8d8ed38f73f49e55fbaa737bdb5933db99b9de22"}, + {file = "regex-2022.9.13-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:67a4c625361db04ae40ef7c49d3cbe2c1f5ff10b5a4491327ab20f19f2fb5d40"}, + {file = "regex-2022.9.13-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:5d0dd8b06896423211ce18fba0c75dacc49182a1d6514c004b535be7163dca0f"}, + {file = "regex-2022.9.13-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4318f69b79f9f7d84a7420e97d4bfe872dc767c72f891d4fea5fa721c74685f7"}, + {file = "regex-2022.9.13-cp310-cp310-win32.whl", hash = "sha256:26df88c9636a0c3f3bd9189dd435850a0c49d0b7d6e932500db3f99a6dd604d1"}, + {file = "regex-2022.9.13-cp310-cp310-win_amd64.whl", hash = "sha256:6fe1dd1021e0f8f3f454ce2811f1b0b148f2d25bb38c712fec00316551e93650"}, + {file = "regex-2022.9.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:83cc32a1a2fa5bac00f4abc0e6ce142e3c05d3a6d57e23bd0f187c59b4e1e43b"}, + {file = "regex-2022.9.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2effeaf50a6838f3dd4d3c5d265f06eabc748f476e8441892645ae3a697e273"}, + {file = "regex-2022.9.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59a786a55d00439d8fae4caaf71581f2aaef7297d04ee60345c3594efef5648a"}, + {file = "regex-2022.9.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b701dbc124558fd2b1b08005eeca6c9160e209108fbcbd00091fcfac641ac7"}, + {file = "regex-2022.9.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dab81cc4d58026861445230cfba27f9825e9223557926e7ec22156a1a140d55c"}, + {file = "regex-2022.9.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b0c5cc3d1744a67c3b433dce91e5ef7c527d612354c1f1e8576d9e86bc5c5e2"}, + {file = "regex-2022.9.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:518272f25da93e02af4f1e94985f5042cec21557ef3591027d0716f2adda5d0a"}, + {file = "regex-2022.9.13-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8418ee2cb857b83881b8f981e4c636bc50a0587b12d98cb9b947408a3c484fe7"}, + {file = "regex-2022.9.13-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cfa4c956ff0a977c4823cb3b930b0a4e82543b060733628fec7ab3eb9b1abe37"}, + {file = "regex-2022.9.13-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a1c4d17879dd4c4432c08a1ca1ab379f12ab54af569e945b6fc1c4cf6a74ca45"}, + {file = "regex-2022.9.13-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:77c2879d3ba51e5ca6c2b47f2dcf3d04a976a623a8fc8236010a16c9e0b0a3c7"}, + {file = "regex-2022.9.13-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d2885ec6eea629c648ecc9bde0837ec6b92208b7f36381689937fe5d64a517e8"}, + {file = "regex-2022.9.13-cp311-cp311-win32.whl", hash = "sha256:2dda4b096a6f630d6531728a45bd12c67ec3badf44342046dc77d4897277d4f2"}, + {file = "regex-2022.9.13-cp311-cp311-win_amd64.whl", hash = "sha256:592b9e2e1862168e71d9e612bfdc22c451261967dbd46681f14e76dfba7105fd"}, + {file = "regex-2022.9.13-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:df8fe00b60e4717662c7f80c810ba66dcc77309183c76b7754c0dff6f1d42054"}, + {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:995e70bb8c91d1b99ed2aaf8ec44863e06ad1dfbb45d7df95f76ef583ec323a9"}, + {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad75173349ad79f9d21e0d0896b27dcb37bfd233b09047bc0b4d226699cf5c87"}, + {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7681c49da1a2d4b905b4f53d86c9ba4506e79fba50c4a664d9516056e0f7dfcc"}, + {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9bc8edc5f8ef0ebb46f3fa0d02bd825bbe9cc63d59e428ffb6981ff9672f6de1"}, + {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7bee775ff05c9d519195bd9e8aaaccfe3971db60f89f89751ee0f234e8aeac5"}, + {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1a901ce5cd42658ab8f8eade51b71a6d26ad4b68c7cfc86b87efc577dfa95602"}, + {file = "regex-2022.9.13-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:14a7ab070fa3aec288076eed6ed828587b805ef83d37c9bfccc1a4a7cfbd8111"}, + {file = "regex-2022.9.13-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d23ac6b4bf9e32fcde5fcdb2e1fd5e7370d6693fcac51ee1d340f0e886f50d1f"}, + {file = "regex-2022.9.13-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:4cdbfa6d2befeaee0c899f19222e9b20fc5abbafe5e9c43a46ef819aeb7b75e5"}, + {file = "regex-2022.9.13-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:ab07934725e6f25c6f87465976cc69aef1141e86987af49d8c839c3ffd367c72"}, + {file = "regex-2022.9.13-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d2a1371dc73e921f3c2e087c05359050f3525a9a34b476ebc8130e71bec55e97"}, + {file = "regex-2022.9.13-cp36-cp36m-win32.whl", hash = "sha256:fcbd1edff1473d90dc5cf4b52d355cf1f47b74eb7c85ba6e45f45d0116b8edbd"}, + {file = "regex-2022.9.13-cp36-cp36m-win_amd64.whl", hash = "sha256:fe428822b7a8c486bcd90b334e9ab541ce6cc0d6106993d59f201853e5e14121"}, + {file = "regex-2022.9.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d7430f041755801b712ec804aaf3b094b9b5facbaa93a6339812a8e00d7bd53a"}, + {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:079c182f99c89524069b9cd96f5410d6af437e9dca576a7d59599a574972707e"}, + {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59bac44b5a07b08a261537f652c26993af9b1bbe2a29624473968dd42fc29d56"}, + {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a59d0377e58d96a6f11636e97992f5b51b7e1e89eb66332d1c01b35adbabfe8a"}, + {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9d68eb704b24bc4d441b24e4a12653acd07d2c39940548761e0985a08bc1fff"}, + {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0385d66e73cdd4462f3cc42c76a6576ddcc12472c30e02a2ae82061bff132c32"}, + {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:db45016364eec9ddbb5af93c8740c5c92eb7f5fc8848d1ae04205a40a1a2efc6"}, + {file = "regex-2022.9.13-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:03ff695518482b946a6d3d4ce9cbbd99a21320e20d94913080aa3841f880abcd"}, + {file = "regex-2022.9.13-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6b32b45433df1fad7fed738fe15200b6516da888e0bd1fdd6aa5e50cc16b76bc"}, + {file = "regex-2022.9.13-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:003a2e1449d425afc817b5f0b3d4c4aa9072dd5f3dfbf6c7631b8dc7b13233de"}, + {file = "regex-2022.9.13-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:a9eb9558e1d0f78e07082d8a70d5c4d631c8dd75575fae92105df9e19c736730"}, + {file = "regex-2022.9.13-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:f6e0321921d2fdc082ef90c1fd0870f129c2e691bfdc4937dcb5cd308aba95c4"}, + {file = "regex-2022.9.13-cp37-cp37m-win32.whl", hash = "sha256:3f3b4594d564ed0b2f54463a9f328cf6a5b2a32610a90cdff778d6e3e561d08b"}, + {file = "regex-2022.9.13-cp37-cp37m-win_amd64.whl", hash = "sha256:8aba0d01e3dfd335f2cb107079b07fdddb4cd7fb2d8c8a1986f9cb8ce9246c24"}, + {file = "regex-2022.9.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:944567bb08f52268d8600ee5bdf1798b2b62ea002cc692a39cec113244cbdd0d"}, + {file = "regex-2022.9.13-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b664a4d33ffc6be10996606dfc25fd3248c24cc589c0b139feb4c158053565e"}, + {file = "regex-2022.9.13-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f06cc1190f3db3192ab8949e28f2c627e1809487e2cfc435b6524c1ce6a2f391"}, + {file = "regex-2022.9.13-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c57d50d4d5eb0c862569ca3c840eba2a73412f31d9ecc46ef0d6b2e621a592b"}, + {file = "regex-2022.9.13-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19a4da6f513045f5ba00e491215bd00122e5bd131847586522463e5a6b2bd65f"}, + {file = "regex-2022.9.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a926339356fe29595f8e37af71db37cd87ff764e15da8ad5129bbaff35bcc5a6"}, + {file = "regex-2022.9.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:091efcfdd4178a7e19a23776dc2b1fafb4f57f4d94daf340f98335817056f874"}, + {file = "regex-2022.9.13-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:880dbeb6bdde7d926b4d8e41410b16ffcd4cb3b4c6d926280fea46e2615c7a01"}, + {file = "regex-2022.9.13-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:73b985c9fc09a7896846e26d7b6f4d1fd5a20437055f4ef985d44729f9f928d0"}, + {file = "regex-2022.9.13-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c0b7cb9598795b01f9a3dd3f770ab540889259def28a3bf9b2fa24d52edecba3"}, + {file = "regex-2022.9.13-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:37e5a26e76c46f54b3baf56a6fdd56df9db89758694516413757b7d127d4c57b"}, + {file = "regex-2022.9.13-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:99945ddb4f379bb9831c05e9f80f02f079ba361a0fb1fba1fc3b267639b6bb2e"}, + {file = "regex-2022.9.13-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dcbcc9e72a791f622a32d17ff5011326a18996647509cac0609a7fc43adc229"}, + {file = "regex-2022.9.13-cp38-cp38-win32.whl", hash = "sha256:d3102ab9bf16bf541ca228012d45d88d2a567c9682a805ae2c145a79d3141fdd"}, + {file = "regex-2022.9.13-cp38-cp38-win_amd64.whl", hash = "sha256:14216ea15efc13f28d0ef1c463d86d93ca7158a79cd4aec0f9273f6d4c6bb047"}, + {file = "regex-2022.9.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9a165a05979e212b2c2d56a9f40b69c811c98a788964e669eb322de0a3e420b4"}, + {file = "regex-2022.9.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:14c71437ffb89479c89cc7022a5ea2075a842b728f37205e47c824cc17b30a42"}, + {file = "regex-2022.9.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee7045623a5ace70f3765e452528b4c1f2ce669ed31959c63f54de64fe2f6ff7"}, + {file = "regex-2022.9.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6e521d9db006c5e4a0f8acfef738399f72b704913d4e083516774eb51645ad7c"}, + {file = "regex-2022.9.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b86548b8234b2be3985dbc0b385e35f5038f0f3e6251464b827b83ebf4ed90e5"}, + {file = "regex-2022.9.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b39ee3b280e15824298b97cec3f7cbbe6539d8282cc8a6047a455b9a72c598"}, + {file = "regex-2022.9.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6e6e61e9a38b6cc60ca3e19caabc90261f070f23352e66307b3d21a24a34aaf"}, + {file = "regex-2022.9.13-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d837ccf3bd2474feabee96cd71144e991472e400ed26582edc8ca88ce259899c"}, + {file = "regex-2022.9.13-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6adfe300848d61a470ec7547adc97b0ccf86de86a99e6830f1d8c8d19ecaf6b3"}, + {file = "regex-2022.9.13-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d5b003d248e6f292475cd24b04e5f72c48412231961a675edcb653c70730e79e"}, + {file = "regex-2022.9.13-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:d5edd3eb877c9fc2e385173d4a4e1d792bf692d79e25c1ca391802d36ecfaa01"}, + {file = "regex-2022.9.13-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:50e764ffbd08b06aa8c4e86b8b568b6722c75d301b33b259099f237c46b2134e"}, + {file = "regex-2022.9.13-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6d43bd402b27e0e7eae85c612725ba1ce7798f20f6fab4e8bc3de4f263294f03"}, + {file = "regex-2022.9.13-cp39-cp39-win32.whl", hash = "sha256:7fcf7f94ccad19186820ac67e2ec7e09e0ac2dac39689f11cf71eac580503296"}, + {file = "regex-2022.9.13-cp39-cp39-win_amd64.whl", hash = "sha256:322bd5572bed36a5b39952d88e072738926759422498a96df138d93384934ff8"}, + {file = "regex-2022.9.13.tar.gz", hash = "sha256:f07373b6e56a6f3a0df3d75b651a278ca7bd357a796078a26a958ea1ce0588fd"}, ] reorder-python-imports = [ - {file = "reorder_python_imports-2.7.1-py2.py3-none-any.whl", hash = "sha256:5477c008cd7a5f2dbe32a35e90d74b5a3427468731441f033034310e427143a3"}, - {file = "reorder_python_imports-2.7.1.tar.gz", hash = "sha256:1ae34422f13f5a4b4669f340774909d721bfc0a8311973c70b3a50540b595bc5"}, -] -requests = [ - {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"}, - {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"}, + {file = "reorder_python_imports-2.8.0-py2.py3-none-any.whl", hash = "sha256:03354608c610a25cba75a352ad86552849dcf3c90849fcb415298efc911a4ecf"}, + {file = "reorder_python_imports-2.8.0.tar.gz", hash = "sha256:435af2a6feb39de3c4b7a415079f85b4b0052d3a7ed9ea7b269b0aff725abdaf"}, ] +requests = [] restructuredtext-lint = [ {file = "restructuredtext_lint-1.4.0.tar.gz", hash = "sha256:1b235c0c922341ab6c530390892eb9e92f90b9b75046063e047cacfb0f050c45"}, ] @@ -2679,31 +2635,7 @@ safety = [ {file = "safety-1.10.3-py2.py3-none-any.whl", hash = "sha256:5f802ad5df5614f9622d8d71fedec2757099705c2356f862847c58c6dfe13e84"}, {file = "safety-1.10.3.tar.gz", hash = "sha256:30e394d02a20ac49b7f65292d19d38fa927a8f9582cdfd3ad1adbbc66c641ad5"}, ] -scipy = [ - {file = "scipy-1.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:87b01c7d5761e8a266a0fbdb9d88dcba0910d63c1c671bdb4d99d29f469e9e03"}, - {file = "scipy-1.8.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:ae3e327da323d82e918e593460e23babdce40d7ab21490ddf9fc06dec6b91a18"}, - {file = "scipy-1.8.0-cp310-cp310-macosx_12_0_universal2.macosx_10_9_x86_64.whl", hash = "sha256:16e09ef68b352d73befa8bcaf3ebe25d3941fe1a58c82909d5589856e6bc8174"}, - {file = "scipy-1.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c17a1878d00a5dd2797ccd73623ceca9d02375328f6218ee6d921e1325e61aff"}, - {file = "scipy-1.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937d28722f13302febde29847bbe554b89073fbb924a30475e5ed7b028898b5f"}, - {file = "scipy-1.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:8f4d059a97b29c91afad46b1737274cb282357a305a80bdd9e8adf3b0ca6a3f0"}, - {file = "scipy-1.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:38aa39b6724cb65271e469013aeb6f2ce66fd44f093e241c28a9c6bc64fd79ed"}, - {file = "scipy-1.8.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:559a8a4c03a5ba9fe3232f39ed24f86457e4f3f6c0abbeae1fb945029f092720"}, - {file = "scipy-1.8.0-cp38-cp38-macosx_12_0_universal2.macosx_10_9_x86_64.whl", hash = "sha256:f4a6d3b9f9797eb2d43938ac2c5d96d02aed17ef170c8b38f11798717523ddba"}, - {file = "scipy-1.8.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:92b2c2af4183ed09afb595709a8ef5783b2baf7f41e26ece24e1329c109691a7"}, - {file = "scipy-1.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a279e27c7f4566ef18bab1b1e2c37d168e365080974758d107e7d237d3f0f484"}, - {file = "scipy-1.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad5be4039147c808e64f99c0e8a9641eb5d2fa079ff5894dcd8240e94e347af4"}, - {file = "scipy-1.8.0-cp38-cp38-win32.whl", hash = "sha256:3d9dd6c8b93a22bf9a3a52d1327aca7e092b1299fb3afc4f89e8eba381be7b59"}, - {file = "scipy-1.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:5e73343c5e0d413c1f937302b2e04fb07872f5843041bcfd50699aef6e95e399"}, - {file = "scipy-1.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:de2e80ee1d925984c2504812a310841c241791c5279352be4707cdcd7c255039"}, - {file = "scipy-1.8.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:c2bae431d127bf0b1da81fc24e4bba0a84d058e3a96b9dd6475dfcb3c5e8761e"}, - {file = "scipy-1.8.0-cp39-cp39-macosx_12_0_universal2.macosx_10_9_x86_64.whl", hash = "sha256:723b9f878095ed994756fa4ee3060c450e2db0139c5ba248ee3f9628bd64e735"}, - {file = "scipy-1.8.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:011d4386b53b933142f58a652aa0f149c9b9242abd4f900b9f4ea5fbafc86b89"}, - {file = "scipy-1.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6f0cd9c0bd374ef834ee1e0f0999678d49dcc400ea6209113d81528958f97c7"}, - {file = "scipy-1.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3720d0124aced49f6f2198a6900304411dbbeed12f56951d7c66ebef05e3df6"}, - {file = "scipy-1.8.0-cp39-cp39-win32.whl", hash = "sha256:3d573228c10a3a8c32b9037be982e6440e411b443a6267b067cac72f690b8d56"}, - {file = "scipy-1.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:bb7088e89cd751acf66195d2f00cf009a1ea113f3019664032d9075b1e727b6c"}, - {file = "scipy-1.8.0.tar.gz", hash = "sha256:31d4f2d6b724bc9a98e527b5849b8a7e589bf1ea630c33aa563eda912c9ff0bd"}, -] +scipy = [] seaborn = [ {file = "seaborn-0.11.2-py3-none-any.whl", hash = "sha256:85a6baa9b55f81a0623abddc4a26b334653ff4c6b18c418361de19dbba0ef283"}, {file = "seaborn-0.11.2.tar.gz", hash = "sha256:cf45e9286d40826864be0e3c066f98536982baf701a7caa386511792d61ff4f6"}, @@ -2729,16 +2661,16 @@ sparse = [ {file = "sparse-0.13.0.tar.gz", hash = "sha256:685dc994aa770ee1b23f2d5392819c8429f27958771f8dceb2c4fb80210d5915"}, ] sphinx = [ - {file = "Sphinx-4.4.0-py3-none-any.whl", hash = "sha256:5da895959511473857b6d0200f56865ed62c31e8f82dd338063b84ec022701fe"}, - {file = "Sphinx-4.4.0.tar.gz", hash = "sha256:6caad9786055cb1fa22b4a365c1775816b876f91966481765d7d50e9f0dd35cc"}, + {file = "Sphinx-4.5.0-py3-none-any.whl", hash = "sha256:ebf612653238bcc8f4359627a9b7ce44ede6fdd75d9d30f68255c7383d3a6226"}, + {file = "Sphinx-4.5.0.tar.gz", hash = "sha256:7bf8ca9637a4ee15af412d1a1d9689fec70523a68ca9bb9127c2f3eeb344e2e6"}, ] sphinx-autobuild = [ {file = "sphinx-autobuild-2021.3.14.tar.gz", hash = "sha256:de1ca3b66e271d2b5b5140c35034c89e47f263f2cd5db302c9217065f7443f05"}, {file = "sphinx_autobuild-2021.3.14-py3-none-any.whl", hash = "sha256:8fe8cbfdb75db04475232f05187c776f46f6e9e04cacf1e49ce81bdac649ccac"}, ] sphinx-autodoc-typehints = [ - {file = "sphinx_autodoc_typehints-1.17.0-py3-none-any.whl", hash = "sha256:081daf53077b4ae1c28347d6d858e13e63aefe3b4aacef79fd717dd60687b470"}, - {file = "sphinx_autodoc_typehints-1.17.0.tar.gz", hash = "sha256:51c7b3f5cb9ccd15d0b52088c62df3094f1abd9612930340365c26def8629a14"}, + {file = "sphinx_autodoc_typehints-1.19.1-py3-none-any.whl", hash = "sha256:9be46aeeb1b315eb5df1f3a7cb262149895d16c7d7dcd77b92513c3c3a1e85e6"}, + {file = "sphinx_autodoc_typehints-1.19.1.tar.gz", hash = "sha256:6c841db55e0e9be0483ff3962a2152b60e79306f4288d8c4e7e86ac84486a5ea"}, ] sphinx-click = [ {file = "sphinx-click-3.1.0.tar.gz", hash = "sha256:36dbf271b1d2600fb05bd598ddeed0b6b6acf35beaf8bc9d507ba7716b232b0e"}, @@ -2776,10 +2708,7 @@ sphinxcontrib-serializinghtml = [ {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"}, {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"}, ] -stevedore = [ - {file = "stevedore-3.5.0-py3-none-any.whl", hash = "sha256:a547de73308fd7e90075bb4d301405bebf705292fa90a90fc3bcf9133f58616c"}, - {file = "stevedore-3.5.0.tar.gz", hash = "sha256:f40253887d8712eaa2bb0ea3830374416736dc8ec0e22f5a65092c1174c44335"}, -] +stevedore = [] text-unidecode = [ {file = "text-unidecode-1.3.tar.gz", hash = "sha256:bad6603bb14d279193107714b288be206cac565dfa49aa5b105294dd5c4aab93"}, {file = "text_unidecode-1.3-py2.py3-none-any.whl", hash = "sha256:1311f10e8b895935241623731c2ba64f4c455287888b18189350b67134a822e8"}, @@ -2797,52 +2726,10 @@ tomlkit = [ {file = "tomlkit-0.7.2.tar.gz", hash = "sha256:d7a454f319a7e9bd2e249f239168729327e4dd2d27b17dc68be264ad1ce36754"}, ] toolz = [ - {file = "toolz-0.11.2-py3-none-any.whl", hash = "sha256:a5700ce83414c64514d82d60bcda8aabfde092d1c1a8663f9200c07fdcc6da8f"}, - {file = "toolz-0.11.2.tar.gz", hash = "sha256:6b312d5e15138552f1bda8a4e66c30e236c831b612b2bf0005f8a1df10a4bc33"}, -] -tornado = [ - {file = "tornado-6.1-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:d371e811d6b156d82aa5f9a4e08b58debf97c302a35714f6f45e35139c332e32"}, - {file = "tornado-6.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0d321a39c36e5f2c4ff12b4ed58d41390460f798422c4504e09eb5678e09998c"}, - {file = "tornado-6.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9de9e5188a782be6b1ce866e8a51bc76a0fbaa0e16613823fc38e4fc2556ad05"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:61b32d06ae8a036a6607805e6720ef00a3c98207038444ba7fd3d169cd998910"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:3e63498f680547ed24d2c71e6497f24bca791aca2fe116dbc2bd0ac7f191691b"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:6c77c9937962577a6a76917845d06af6ab9197702a42e1346d8ae2e76b5e3675"}, - {file = "tornado-6.1-cp35-cp35m-win32.whl", hash = "sha256:6286efab1ed6e74b7028327365cf7346b1d777d63ab30e21a0f4d5b275fc17d5"}, - {file = "tornado-6.1-cp35-cp35m-win_amd64.whl", hash = "sha256:fa2ba70284fa42c2a5ecb35e322e68823288a4251f9ba9cc77be04ae15eada68"}, - {file = "tornado-6.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0a00ff4561e2929a2c37ce706cb8233b7907e0cdc22eab98888aca5dd3775feb"}, - {file = "tornado-6.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:748290bf9112b581c525e6e6d3820621ff020ed95af6f17fedef416b27ed564c"}, - {file = "tornado-6.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e385b637ac3acaae8022e7e47dfa7b83d3620e432e3ecb9a3f7f58f150e50921"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:25ad220258349a12ae87ede08a7b04aca51237721f63b1808d39bdb4b2164558"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:65d98939f1a2e74b58839f8c4dab3b6b3c1ce84972ae712be02845e65391ac7c"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:e519d64089b0876c7b467274468709dadf11e41d65f63bba207e04217f47c085"}, - {file = "tornado-6.1-cp36-cp36m-win32.whl", hash = "sha256:b87936fd2c317b6ee08a5741ea06b9d11a6074ef4cc42e031bc6403f82a32575"}, - {file = "tornado-6.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cc0ee35043162abbf717b7df924597ade8e5395e7b66d18270116f8745ceb795"}, - {file = "tornado-6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7250a3fa399f08ec9cb3f7b1b987955d17e044f1ade821b32e5f435130250d7f"}, - {file = "tornado-6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:ed3ad863b1b40cd1d4bd21e7498329ccaece75db5a5bf58cd3c9f130843e7102"}, - {file = "tornado-6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:dcef026f608f678c118779cd6591c8af6e9b4155c44e0d1bc0c87c036fb8c8c4"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:70dec29e8ac485dbf57481baee40781c63e381bebea080991893cd297742b8fd"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d3f7594930c423fd9f5d1a76bee85a2c36fd8b4b16921cae7e965f22575e9c01"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3447475585bae2e77ecb832fc0300c3695516a47d46cefa0528181a34c5b9d3d"}, - {file = "tornado-6.1-cp37-cp37m-win32.whl", hash = "sha256:e7229e60ac41a1202444497ddde70a48d33909e484f96eb0da9baf8dc68541df"}, - {file = "tornado-6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:cb5ec8eead331e3bb4ce8066cf06d2dfef1bfb1b2a73082dfe8a161301b76e37"}, - {file = "tornado-6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:20241b3cb4f425e971cb0a8e4ffc9b0a861530ae3c52f2b0434e6c1b57e9fd95"}, - {file = "tornado-6.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c77da1263aa361938476f04c4b6c8916001b90b2c2fdd92d8d535e1af48fba5a"}, - {file = "tornado-6.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fba85b6cd9c39be262fcd23865652920832b61583de2a2ca907dbd8e8a8c81e5"}, - {file = "tornado-6.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:1e8225a1070cd8eec59a996c43229fe8f95689cb16e552d130b9793cb570a288"}, - {file = "tornado-6.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d14d30e7f46a0476efb0deb5b61343b1526f73ebb5ed84f23dc794bdb88f9d9f"}, - {file = "tornado-6.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8f959b26f2634a091bb42241c3ed8d3cedb506e7c27b8dd5c7b9f745318ddbb6"}, - {file = "tornado-6.1-cp38-cp38-win32.whl", hash = "sha256:34ca2dac9e4d7afb0bed4677512e36a52f09caa6fded70b4e3e1c89dbd92c326"}, - {file = "tornado-6.1-cp38-cp38-win_amd64.whl", hash = "sha256:6196a5c39286cc37c024cd78834fb9345e464525d8991c21e908cc046d1cc02c"}, - {file = "tornado-6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0ba29bafd8e7e22920567ce0d232c26d4d47c8b5cf4ed7b562b5db39fa199c5"}, - {file = "tornado-6.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:33892118b165401f291070100d6d09359ca74addda679b60390b09f8ef325ffe"}, - {file = "tornado-6.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7da13da6f985aab7f6f28debab00c67ff9cbacd588e8477034c0652ac141feea"}, - {file = "tornado-6.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:e0791ac58d91ac58f694d8d2957884df8e4e2f6687cdf367ef7eb7497f79eaa2"}, - {file = "tornado-6.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:66324e4e1beede9ac79e60f88de548da58b1f8ab4b2f1354d8375774f997e6c0"}, - {file = "tornado-6.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a48900ecea1cbb71b8c71c620dee15b62f85f7c14189bdeee54966fbd9a0c5bd"}, - {file = "tornado-6.1-cp39-cp39-win32.whl", hash = "sha256:d3d20ea5782ba63ed13bc2b8c291a053c8d807a8fa927d941bd718468f7b950c"}, - {file = "tornado-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:548430be2740e327b3fe0201abe471f314741efcb0067ec4f2d7dcfb4825f3e4"}, - {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"}, + {file = "toolz-0.12.0-py3-none-any.whl", hash = "sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f"}, + {file = "toolz-0.12.0.tar.gz", hash = "sha256:88c570861c440ee3f2f6037c4654613228ff40c93a6c25e0eba70d17282c6194"}, ] +tornado = [] typeguard = [ {file = "typeguard-2.13.3-py3-none-any.whl", hash = "sha256:5e3e3be01e887e7eafae5af63d1f36c849aaa94e3a0112097312aabfa16284f1"}, {file = "typeguard-2.13.3.tar.gz", hash = "sha256:00edaa8da3a133674796cf5ea87d9f4b4c367d77476e185e80251cc13dfbb8c4"}, @@ -2855,82 +2742,24 @@ types-pkg-resources = [ {file = "types_pkg_resources-0.1.3-py2.py3-none-any.whl", hash = "sha256:0cb9972cee992249f93fff1a491bf2dc3ce674e5a1926e27d4f0866f7d9b6d9c"}, ] types-requests = [ - {file = "types-requests-2.27.11.tar.gz", hash = "sha256:6a7ed24b21780af4a5b5e24c310b2cd885fb612df5fd95584d03d87e5f2a195a"}, - {file = "types_requests-2.27.11-py3-none-any.whl", hash = "sha256:506279bad570c7b4b19ac1f22e50146538befbe0c133b2cea66a9b04a533a859"}, + {file = "types-requests-2.28.10.tar.gz", hash = "sha256:97d8f40aa1ffe1e58c3726c77d63c182daea9a72d9f1fa2cafdea756b2a19f2c"}, + {file = "types_requests-2.28.10-py3-none-any.whl", hash = "sha256:45b485725ed58752f2b23461252f1c1ad9205b884a1e35f786bb295525a3e16a"}, ] types-urllib3 = [ - {file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"}, - {file = "types_urllib3-1.26.10-py3-none-any.whl", hash = "sha256:d755278d5ecd7a7a6479a190e54230f241f1a99c19b81518b756b19dc69e518c"}, -] -typing-extensions = [ - {file = "typing_extensions-4.1.1-py3-none-any.whl", hash = "sha256:21c85e0fe4b9a155d0799430b0ad741cdce7e359660ccbd8b530613e8df88ce2"}, - {file = "typing_extensions-4.1.1.tar.gz", hash = "sha256:1a9462dcc3347a79b1f1c0271fbe79e844580bb598bafa1ed208b94da3cdcd42"}, -] -urllib3 = [ - {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"}, - {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"}, + {file = "types-urllib3-1.26.24.tar.gz", hash = "sha256:a1b3aaea7dda3eb1b51699ee723aadd235488e4dc4648e030f09bc429ecff42f"}, + {file = "types_urllib3-1.26.24-py3-none-any.whl", hash = "sha256:cf7918503d02d3576e503bbfb419b0e047c4617653bba09624756ab7175e15c9"}, ] +typing-extensions = [] +urllib3 = [] virtualenv = [ - {file = "virtualenv-20.13.3-py2.py3-none-any.whl", hash = "sha256:dd448d1ded9f14d1a4bfa6bfc0c5b96ae3be3f2d6c6c159b23ddcfd701baa021"}, - {file = "virtualenv-20.13.3.tar.gz", hash = "sha256:e9dd1a1359d70137559034c0f5433b34caf504af2dc756367be86a5a32967134"}, + {file = "virtualenv-20.16.5-py3-none-any.whl", hash = "sha256:d07dfc5df5e4e0dbc92862350ad87a36ed505b978f6c39609dc489eadd5b0d27"}, + {file = "virtualenv-20.16.5.tar.gz", hash = "sha256:227ea1b9994fdc5ea31977ba3383ef296d7472ea85be9d6732e42a91c04e80da"}, ] wcwidth = [ {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"}, {file = "wcwidth-0.2.5.tar.gz", hash = "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"}, ] -wrapt = [ - {file = "wrapt-1.13.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:e05e60ff3b2b0342153be4d1b597bbcfd8330890056b9619f4ad6b8d5c96a81a"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:85148f4225287b6a0665eef08a178c15097366d46b210574a658c1ff5b377489"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:2dded5496e8f1592ec27079b28b6ad2a1ef0b9296d270f77b8e4a3a796cf6909"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:e94b7d9deaa4cc7bac9198a58a7240aaf87fe56c6277ee25fa5b3aa1edebd229"}, - {file = "wrapt-1.13.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:498e6217523111d07cd67e87a791f5e9ee769f9241fcf8a379696e25806965af"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:ec7e20258ecc5174029a0f391e1b948bf2906cd64c198a9b8b281b811cbc04de"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:87883690cae293541e08ba2da22cacaae0a092e0ed56bbba8d018cc486fbafbb"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:f99c0489258086308aad4ae57da9e8ecf9e1f3f30fa35d5e170b4d4896554d80"}, - {file = "wrapt-1.13.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6a03d9917aee887690aa3f1747ce634e610f6db6f6b332b35c2dd89412912bca"}, - {file = "wrapt-1.13.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:936503cb0a6ed28dbfa87e8fcd0a56458822144e9d11a49ccee6d9a8adb2ac44"}, - {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f9c51d9af9abb899bd34ace878fbec8bf357b3194a10c4e8e0a25512826ef056"}, - {file = "wrapt-1.13.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:220a869982ea9023e163ba915077816ca439489de6d2c09089b219f4e11b6785"}, - {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0877fe981fd76b183711d767500e6b3111378ed2043c145e21816ee589d91096"}, - {file = "wrapt-1.13.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:43e69ffe47e3609a6aec0fe723001c60c65305784d964f5007d5b4fb1bc6bf33"}, - {file = "wrapt-1.13.3-cp310-cp310-win32.whl", hash = "sha256:78dea98c81915bbf510eb6a3c9c24915e4660302937b9ae05a0947164248020f"}, - {file = "wrapt-1.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:ea3e746e29d4000cd98d572f3ee2a6050a4f784bb536f4ac1f035987fc1ed83e"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:8c73c1a2ec7c98d7eaded149f6d225a692caa1bd7b2401a14125446e9e90410d"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:086218a72ec7d986a3eddb7707c8c4526d677c7b35e355875a0fe2918b059179"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:e92d0d4fa68ea0c02d39f1e2f9cb5bc4b4a71e8c442207433d8db47ee79d7aa3"}, - {file = "wrapt-1.13.3-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:d4a5f6146cfa5c7ba0134249665acd322a70d1ea61732723c7d3e8cc0fa80755"}, - {file = "wrapt-1.13.3-cp35-cp35m-win32.whl", hash = "sha256:8aab36778fa9bba1a8f06a4919556f9f8c7b33102bd71b3ab307bb3fecb21851"}, - {file = "wrapt-1.13.3-cp35-cp35m-win_amd64.whl", hash = "sha256:944b180f61f5e36c0634d3202ba8509b986b5fbaf57db3e94df11abee244ba13"}, - {file = "wrapt-1.13.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:2ebdde19cd3c8cdf8df3fc165bc7827334bc4e353465048b36f7deeae8ee0918"}, - {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:610f5f83dd1e0ad40254c306f4764fcdc846641f120c3cf424ff57a19d5f7ade"}, - {file = "wrapt-1.13.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5601f44a0f38fed36cc07db004f0eedeaadbdcec90e4e90509480e7e6060a5bc"}, - {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:e6906d6f48437dfd80464f7d7af1740eadc572b9f7a4301e7dd3d65db285cacf"}, - {file = "wrapt-1.13.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:766b32c762e07e26f50d8a3468e3b4228b3736c805018e4b0ec8cc01ecd88125"}, - {file = "wrapt-1.13.3-cp36-cp36m-win32.whl", hash = "sha256:5f223101f21cfd41deec8ce3889dc59f88a59b409db028c469c9b20cfeefbe36"}, - {file = "wrapt-1.13.3-cp36-cp36m-win_amd64.whl", hash = "sha256:f122ccd12fdc69628786d0c947bdd9cb2733be8f800d88b5a37c57f1f1d73c10"}, - {file = "wrapt-1.13.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:46f7f3af321a573fc0c3586612db4decb7eb37172af1bc6173d81f5b66c2e068"}, - {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:778fd096ee96890c10ce96187c76b3e99b2da44e08c9e24d5652f356873f6709"}, - {file = "wrapt-1.13.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0cb23d36ed03bf46b894cfec777eec754146d68429c30431c99ef28482b5c1df"}, - {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:96b81ae75591a795d8c90edc0bfaab44d3d41ffc1aae4d994c5aa21d9b8e19a2"}, - {file = "wrapt-1.13.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7dd215e4e8514004c8d810a73e342c536547038fb130205ec4bba9f5de35d45b"}, - {file = "wrapt-1.13.3-cp37-cp37m-win32.whl", hash = "sha256:47f0a183743e7f71f29e4e21574ad3fa95676136f45b91afcf83f6a050914829"}, - {file = "wrapt-1.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fd76c47f20984b43d93de9a82011bb6e5f8325df6c9ed4d8310029a55fa361ea"}, - {file = "wrapt-1.13.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b73d4b78807bd299b38e4598b8e7bd34ed55d480160d2e7fdaabd9931afa65f9"}, - {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ec9465dd69d5657b5d2fa6133b3e1e989ae27d29471a672416fd729b429eb554"}, - {file = "wrapt-1.13.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dd91006848eb55af2159375134d724032a2d1d13bcc6f81cd8d3ed9f2b8e846c"}, - {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ae9de71eb60940e58207f8e71fe113c639da42adb02fb2bcbcaccc1ccecd092b"}, - {file = "wrapt-1.13.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:51799ca950cfee9396a87f4a1240622ac38973b6df5ef7a41e7f0b98797099ce"}, - {file = "wrapt-1.13.3-cp38-cp38-win32.whl", hash = "sha256:4b9c458732450ec42578b5642ac53e312092acf8c0bfce140ada5ca1ac556f79"}, - {file = "wrapt-1.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:7dde79d007cd6dfa65afe404766057c2409316135cb892be4b1c768e3f3a11cb"}, - {file = "wrapt-1.13.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:981da26722bebb9247a0601e2922cedf8bb7a600e89c852d063313102de6f2cb"}, - {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:705e2af1f7be4707e49ced9153f8d72131090e52be9278b5dbb1498c749a1e32"}, - {file = "wrapt-1.13.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:25b1b1d5df495d82be1c9d2fad408f7ce5ca8a38085e2da41bb63c914baadff7"}, - {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:77416e6b17926d953b5c666a3cb718d5945df63ecf922af0ee576206d7033b5e"}, - {file = "wrapt-1.13.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:865c0b50003616f05858b22174c40ffc27a38e67359fa1495605f96125f76640"}, - {file = "wrapt-1.13.3-cp39-cp39-win32.whl", hash = "sha256:0a017a667d1f7411816e4bf214646d0ad5b1da2c1ea13dec6c162736ff25a374"}, - {file = "wrapt-1.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:81bd7c90d28a4b2e1df135bfbd7c23aee3050078ca6441bead44c42483f9ebfb"}, - {file = "wrapt-1.13.3.tar.gz", hash = "sha256:1fea9cd438686e6682271d36f3481a9f3636195578bab9ca3382e2f5f01fc185"}, -] +wrapt = [] xdoctest = [ {file = "xdoctest-0.15.10-py3-none-any.whl", hash = "sha256:7666bd0511df59275dfe94ef94b0fde9654afd14f00bf88902fdc9bcee77d527"}, {file = "xdoctest-0.15.10.tar.gz", hash = "sha256:5f16438f2b203860e75ec594dbc38020df7524db0b41bb88467ea0a6030e6685"}, @@ -2939,7 +2768,4 @@ xlrd = [ {file = "xlrd-1.2.0-py2.py3-none-any.whl", hash = "sha256:e551fb498759fa3a5384a94ccd4c3c02eb7c00ea424426e212ac0c57be9dfbde"}, {file = "xlrd-1.2.0.tar.gz", hash = "sha256:546eb36cee8db40c3eaa46c351e67ffee6eeb5fa2650b71bc4c758a29a1b29b2"}, ] -zipp = [ - {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, - {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"}, -] +zipp = [] diff --git a/pyproject.toml b/pyproject.toml index 95e5306b..4fc1a9d3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,7 +37,7 @@ seaborn = "^0.11.2" bandit = "1.7.2" [tool.poetry.dev-dependencies] -pytest = "^6.2.3" +pytest = "^7.1.3" coverage = {extras = ["toml"], version = "^5.3"} safety = "^1.9.0" typeguard = "^2.12.0" @@ -63,7 +63,7 @@ types-requests = "^2.25.2" types-attrs = "^19.1.0" sphinx-rtd-dark-mode = "^1.2.3" Jinja2 = "^3.0.1" -mypy = "^0.910" +mypy = "^0.971" matplotlib = "^3.5.1" nox = "^2022.1.7" cookietemple = "^1.3.11" From 0a21e4a5e74292b347fdf8584022a3c97125cb3c Mon Sep 17 00:00:00 2001 From: picciama Date: Tue, 13 Sep 2022 11:15:59 +0200 Subject: [PATCH 46/58] updated pre-commit yaml --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4f706840..8c29e794 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -41,11 +41,11 @@ repos: types: [text] stages: [commit, push, manual] - repo: https://github.com/pre-commit/mirrors-prettier - rev: v2.3.0 + rev: v3.0.0-alpha.0 hooks: - id: prettier - repo: https://github.com/pycqa/isort - rev: 5.8.0 + rev: 5.10.1 hooks: - id: isort name: isort (python) From 059437513c1972635fd2d9bd35583ba2f1d1d4fa Mon Sep 17 00:00:00 2001 From: picciama Date: Tue, 13 Sep 2022 11:40:06 +0200 Subject: [PATCH 47/58] bumped version of black to support newer click --- poetry.lock | 146 +++++++++++-------------------------------------- pyproject.toml | 2 +- 2 files changed, 33 insertions(+), 115 deletions(-) diff --git a/poetry.lock b/poetry.lock index 067d2f3d..7af473ff 100644 --- a/poetry.lock +++ b/poetry.lock @@ -126,26 +126,24 @@ chardet = ">=3.0.2" [[package]] name = "black" -version = "21.10b0" +version = "22.8.0" description = "The uncompromising code formatter." category = "dev" optional = false python-versions = ">=3.6.2" [package.dependencies] -click = ">=7.1.2" +click = ">=8.0.0" mypy-extensions = ">=0.4.3" -pathspec = ">=0.9.0,<1" +pathspec = ">=0.9.0" platformdirs = ">=2" -regex = ">=2020.1.8" -tomli = ">=0.2.6,<2.0.0" -typing-extensions = ">=3.10.0.0" +tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} +typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} [package.extras] colorama = ["colorama (>=0.4.3)"] d = ["aiohttp (>=3.7.4)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] -python2 = ["typed-ast (>=1.4.3)"] uvloop = ["uvloop (>=0.15.2)"] [[package]] @@ -1197,14 +1195,6 @@ prompt_toolkit = ">=2.0,<4.0" [package.extras] docs = ["Sphinx (>=3.3,<4.0)", "sphinx-autobuild (>=2020.9.1,<2021.0.0)", "sphinx-autodoc-typehints (>=1.11.1,<2.0.0)", "sphinx-copybutton (>=0.3.1,<0.4.0)", "sphinx-rtd-theme (>=0.5.0,<0.6.0)"] -[[package]] -name = "regex" -version = "2022.9.13" -description = "Alternative regular expression module, to replace re." -category = "dev" -optional = false -python-versions = ">=3.6" - [[package]] name = "reorder-python-imports" version = "2.8.0" @@ -1584,11 +1574,11 @@ python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "tomli" -version = "1.2.3" +version = "2.0.1" description = "A lil' TOML parser" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "tomlkit" @@ -1758,7 +1748,7 @@ testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>= [metadata] lock-version = "1.1" python-versions = ">=3.8.0, <3.10.0" -content-hash = "57ce11025e1ec208a3d9406765fc223dd905e81b7047673ffcf6b67710269b41" +content-hash = "7b73563131b544a6ebecfd78e37fccb86da3815dd206993ebf4d5ec4030edcb7" [metadata.files] alabaster = [ @@ -1796,8 +1786,29 @@ binaryornot = [ {file = "binaryornot-0.4.4.tar.gz", hash = "sha256:359501dfc9d40632edc9fac890e19542db1a287bbcfa58175b66658392018061"}, ] black = [ - {file = "black-21.10b0-py3-none-any.whl", hash = "sha256:6eb7448da9143ee65b856a5f3676b7dda98ad9abe0f87fce8c59291f15e82a5b"}, - {file = "black-21.10b0.tar.gz", hash = "sha256:a9952229092e325fe5f3dae56d81f639b23f7131eb840781947e4b2886030f33"}, + {file = "black-22.8.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ce957f1d6b78a8a231b18e0dd2d94a33d2ba738cd88a7fe64f53f659eea49fdd"}, + {file = "black-22.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5107ea36b2b61917956d018bd25129baf9ad1125e39324a9b18248d362156a27"}, + {file = "black-22.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8166b7bfe5dcb56d325385bd1d1e0f635f24aae14b3ae437102dedc0c186747"}, + {file = "black-22.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd82842bb272297503cbec1a2600b6bfb338dae017186f8f215c8958f8acf869"}, + {file = "black-22.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d839150f61d09e7217f52917259831fe2b689f5c8e5e32611736351b89bb2a90"}, + {file = "black-22.8.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a05da0430bd5ced89176db098567973be52ce175a55677436a271102d7eaa3fe"}, + {file = "black-22.8.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a098a69a02596e1f2a58a2a1c8d5a05d5a74461af552b371e82f9fa4ada8342"}, + {file = "black-22.8.0-cp36-cp36m-win_amd64.whl", hash = "sha256:5594efbdc35426e35a7defa1ea1a1cb97c7dbd34c0e49af7fb593a36bd45edab"}, + {file = "black-22.8.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a983526af1bea1e4cf6768e649990f28ee4f4137266921c2c3cee8116ae42ec3"}, + {file = "black-22.8.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b2c25f8dea5e8444bdc6788a2f543e1fb01494e144480bc17f806178378005e"}, + {file = "black-22.8.0-cp37-cp37m-win_amd64.whl", hash = "sha256:78dd85caaab7c3153054756b9fe8c611efa63d9e7aecfa33e533060cb14b6d16"}, + {file = "black-22.8.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:cea1b2542d4e2c02c332e83150e41e3ca80dc0fb8de20df3c5e98e242156222c"}, + {file = "black-22.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5b879eb439094751185d1cfdca43023bc6786bd3c60372462b6f051efa6281a5"}, + {file = "black-22.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0a12e4e1353819af41df998b02c6742643cfef58282915f781d0e4dd7a200411"}, + {file = "black-22.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3a73f66b6d5ba7288cd5d6dad9b4c9b43f4e8a4b789a94bf5abfb878c663eb3"}, + {file = "black-22.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:e981e20ec152dfb3e77418fb616077937378b322d7b26aa1ff87717fb18b4875"}, + {file = "black-22.8.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8ce13ffed7e66dda0da3e0b2eb1bdfc83f5812f66e09aca2b0978593ed636b6c"}, + {file = "black-22.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:32a4b17f644fc288c6ee2bafdf5e3b045f4eff84693ac069d87b1a347d861497"}, + {file = "black-22.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ad827325a3a634bae88ae7747db1a395d5ee02cf05d9aa7a9bd77dfb10e940c"}, + {file = "black-22.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53198e28a1fb865e9fe97f88220da2e44df6da82b18833b588b1883b16bb5d41"}, + {file = "black-22.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:bc4d4123830a2d190e9cc42a2e43570f82ace35c3aeb26a512a2102bce5af7ec"}, + {file = "black-22.8.0-py3-none-any.whl", hash = "sha256:d2c21d439b2baf7aa80d6dd4e3659259be64c6f49dfd0f32091063db0e006db4"}, + {file = "black-22.8.0.tar.gz", hash = "sha256:792f7eb540ba9a17e8656538701d3eb1afcb134e3b45b71f20b25c77a8db7e6e"}, ] cached-property = [ {file = "cached-property-1.5.2.tar.gz", hash = "sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130"}, @@ -2498,96 +2509,6 @@ questionary = [ {file = "questionary-1.10.0-py3-none-any.whl", hash = "sha256:fecfcc8cca110fda9d561cb83f1e97ecbb93c613ff857f655818839dac74ce90"}, {file = "questionary-1.10.0.tar.gz", hash = "sha256:600d3aefecce26d48d97eee936fdb66e4bc27f934c3ab6dd1e292c4f43946d90"}, ] -regex = [ - {file = "regex-2022.9.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0394265391a86e2bbaa7606e59ac71bd9f1edf8665a59e42771a9c9adbf6fd4f"}, - {file = "regex-2022.9.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:86df2049b18745f3cd4b0f4c4ef672bfac4b80ca488e6ecfd2bbfe68d2423a2c"}, - {file = "regex-2022.9.13-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce331b076b2b013e7d7f07157f957974ef0b0881a808e8a4a4b3b5105aee5d04"}, - {file = "regex-2022.9.13-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:360ffbc9357794ae41336b681dff1c0463193199dfb91fcad3ec385ea4972f46"}, - {file = "regex-2022.9.13-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18e503b1e515a10282b3f14f1b3d856194ecece4250e850fad230842ed31227f"}, - {file = "regex-2022.9.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6e167d1ccd41d27b7b6655bb7a2dcb1b1eb1e0d2d662043470bd3b4315d8b2b"}, - {file = "regex-2022.9.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4146cb7ae6029fc83b5c905ec6d806b7e5568dc14297c423e66b86294bad6c39"}, - {file = "regex-2022.9.13-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a1aec4ae549fd7b3f52ceaf67e133010e2fba1538bf4d5fc5cd162a5e058d5df"}, - {file = "regex-2022.9.13-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cab548d6d972e1de584161487b2ac1aa82edd8430d1bde69587ba61698ad1cfb"}, - {file = "regex-2022.9.13-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3d64e1a7e6d98a4cdc8b29cb8d8ed38f73f49e55fbaa737bdb5933db99b9de22"}, - {file = "regex-2022.9.13-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:67a4c625361db04ae40ef7c49d3cbe2c1f5ff10b5a4491327ab20f19f2fb5d40"}, - {file = "regex-2022.9.13-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:5d0dd8b06896423211ce18fba0c75dacc49182a1d6514c004b535be7163dca0f"}, - {file = "regex-2022.9.13-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4318f69b79f9f7d84a7420e97d4bfe872dc767c72f891d4fea5fa721c74685f7"}, - {file = "regex-2022.9.13-cp310-cp310-win32.whl", hash = "sha256:26df88c9636a0c3f3bd9189dd435850a0c49d0b7d6e932500db3f99a6dd604d1"}, - {file = "regex-2022.9.13-cp310-cp310-win_amd64.whl", hash = "sha256:6fe1dd1021e0f8f3f454ce2811f1b0b148f2d25bb38c712fec00316551e93650"}, - {file = "regex-2022.9.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:83cc32a1a2fa5bac00f4abc0e6ce142e3c05d3a6d57e23bd0f187c59b4e1e43b"}, - {file = "regex-2022.9.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2effeaf50a6838f3dd4d3c5d265f06eabc748f476e8441892645ae3a697e273"}, - {file = "regex-2022.9.13-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59a786a55d00439d8fae4caaf71581f2aaef7297d04ee60345c3594efef5648a"}, - {file = "regex-2022.9.13-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b701dbc124558fd2b1b08005eeca6c9160e209108fbcbd00091fcfac641ac7"}, - {file = "regex-2022.9.13-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dab81cc4d58026861445230cfba27f9825e9223557926e7ec22156a1a140d55c"}, - {file = "regex-2022.9.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b0c5cc3d1744a67c3b433dce91e5ef7c527d612354c1f1e8576d9e86bc5c5e2"}, - {file = "regex-2022.9.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:518272f25da93e02af4f1e94985f5042cec21557ef3591027d0716f2adda5d0a"}, - {file = "regex-2022.9.13-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8418ee2cb857b83881b8f981e4c636bc50a0587b12d98cb9b947408a3c484fe7"}, - {file = "regex-2022.9.13-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cfa4c956ff0a977c4823cb3b930b0a4e82543b060733628fec7ab3eb9b1abe37"}, - {file = "regex-2022.9.13-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a1c4d17879dd4c4432c08a1ca1ab379f12ab54af569e945b6fc1c4cf6a74ca45"}, - {file = "regex-2022.9.13-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:77c2879d3ba51e5ca6c2b47f2dcf3d04a976a623a8fc8236010a16c9e0b0a3c7"}, - {file = "regex-2022.9.13-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d2885ec6eea629c648ecc9bde0837ec6b92208b7f36381689937fe5d64a517e8"}, - {file = "regex-2022.9.13-cp311-cp311-win32.whl", hash = "sha256:2dda4b096a6f630d6531728a45bd12c67ec3badf44342046dc77d4897277d4f2"}, - {file = "regex-2022.9.13-cp311-cp311-win_amd64.whl", hash = "sha256:592b9e2e1862168e71d9e612bfdc22c451261967dbd46681f14e76dfba7105fd"}, - {file = "regex-2022.9.13-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:df8fe00b60e4717662c7f80c810ba66dcc77309183c76b7754c0dff6f1d42054"}, - {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:995e70bb8c91d1b99ed2aaf8ec44863e06ad1dfbb45d7df95f76ef583ec323a9"}, - {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad75173349ad79f9d21e0d0896b27dcb37bfd233b09047bc0b4d226699cf5c87"}, - {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7681c49da1a2d4b905b4f53d86c9ba4506e79fba50c4a664d9516056e0f7dfcc"}, - {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9bc8edc5f8ef0ebb46f3fa0d02bd825bbe9cc63d59e428ffb6981ff9672f6de1"}, - {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7bee775ff05c9d519195bd9e8aaaccfe3971db60f89f89751ee0f234e8aeac5"}, - {file = "regex-2022.9.13-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1a901ce5cd42658ab8f8eade51b71a6d26ad4b68c7cfc86b87efc577dfa95602"}, - {file = "regex-2022.9.13-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:14a7ab070fa3aec288076eed6ed828587b805ef83d37c9bfccc1a4a7cfbd8111"}, - {file = "regex-2022.9.13-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d23ac6b4bf9e32fcde5fcdb2e1fd5e7370d6693fcac51ee1d340f0e886f50d1f"}, - {file = "regex-2022.9.13-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:4cdbfa6d2befeaee0c899f19222e9b20fc5abbafe5e9c43a46ef819aeb7b75e5"}, - {file = "regex-2022.9.13-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:ab07934725e6f25c6f87465976cc69aef1141e86987af49d8c839c3ffd367c72"}, - {file = "regex-2022.9.13-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d2a1371dc73e921f3c2e087c05359050f3525a9a34b476ebc8130e71bec55e97"}, - {file = "regex-2022.9.13-cp36-cp36m-win32.whl", hash = "sha256:fcbd1edff1473d90dc5cf4b52d355cf1f47b74eb7c85ba6e45f45d0116b8edbd"}, - {file = "regex-2022.9.13-cp36-cp36m-win_amd64.whl", hash = "sha256:fe428822b7a8c486bcd90b334e9ab541ce6cc0d6106993d59f201853e5e14121"}, - {file = "regex-2022.9.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d7430f041755801b712ec804aaf3b094b9b5facbaa93a6339812a8e00d7bd53a"}, - {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:079c182f99c89524069b9cd96f5410d6af437e9dca576a7d59599a574972707e"}, - {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59bac44b5a07b08a261537f652c26993af9b1bbe2a29624473968dd42fc29d56"}, - {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a59d0377e58d96a6f11636e97992f5b51b7e1e89eb66332d1c01b35adbabfe8a"}, - {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9d68eb704b24bc4d441b24e4a12653acd07d2c39940548761e0985a08bc1fff"}, - {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0385d66e73cdd4462f3cc42c76a6576ddcc12472c30e02a2ae82061bff132c32"}, - {file = "regex-2022.9.13-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:db45016364eec9ddbb5af93c8740c5c92eb7f5fc8848d1ae04205a40a1a2efc6"}, - {file = "regex-2022.9.13-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:03ff695518482b946a6d3d4ce9cbbd99a21320e20d94913080aa3841f880abcd"}, - {file = "regex-2022.9.13-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6b32b45433df1fad7fed738fe15200b6516da888e0bd1fdd6aa5e50cc16b76bc"}, - {file = "regex-2022.9.13-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:003a2e1449d425afc817b5f0b3d4c4aa9072dd5f3dfbf6c7631b8dc7b13233de"}, - {file = "regex-2022.9.13-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:a9eb9558e1d0f78e07082d8a70d5c4d631c8dd75575fae92105df9e19c736730"}, - {file = "regex-2022.9.13-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:f6e0321921d2fdc082ef90c1fd0870f129c2e691bfdc4937dcb5cd308aba95c4"}, - {file = "regex-2022.9.13-cp37-cp37m-win32.whl", hash = "sha256:3f3b4594d564ed0b2f54463a9f328cf6a5b2a32610a90cdff778d6e3e561d08b"}, - {file = "regex-2022.9.13-cp37-cp37m-win_amd64.whl", hash = "sha256:8aba0d01e3dfd335f2cb107079b07fdddb4cd7fb2d8c8a1986f9cb8ce9246c24"}, - {file = "regex-2022.9.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:944567bb08f52268d8600ee5bdf1798b2b62ea002cc692a39cec113244cbdd0d"}, - {file = "regex-2022.9.13-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0b664a4d33ffc6be10996606dfc25fd3248c24cc589c0b139feb4c158053565e"}, - {file = "regex-2022.9.13-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f06cc1190f3db3192ab8949e28f2c627e1809487e2cfc435b6524c1ce6a2f391"}, - {file = "regex-2022.9.13-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c57d50d4d5eb0c862569ca3c840eba2a73412f31d9ecc46ef0d6b2e621a592b"}, - {file = "regex-2022.9.13-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19a4da6f513045f5ba00e491215bd00122e5bd131847586522463e5a6b2bd65f"}, - {file = "regex-2022.9.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a926339356fe29595f8e37af71db37cd87ff764e15da8ad5129bbaff35bcc5a6"}, - {file = "regex-2022.9.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:091efcfdd4178a7e19a23776dc2b1fafb4f57f4d94daf340f98335817056f874"}, - {file = "regex-2022.9.13-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:880dbeb6bdde7d926b4d8e41410b16ffcd4cb3b4c6d926280fea46e2615c7a01"}, - {file = "regex-2022.9.13-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:73b985c9fc09a7896846e26d7b6f4d1fd5a20437055f4ef985d44729f9f928d0"}, - {file = "regex-2022.9.13-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c0b7cb9598795b01f9a3dd3f770ab540889259def28a3bf9b2fa24d52edecba3"}, - {file = "regex-2022.9.13-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:37e5a26e76c46f54b3baf56a6fdd56df9db89758694516413757b7d127d4c57b"}, - {file = "regex-2022.9.13-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:99945ddb4f379bb9831c05e9f80f02f079ba361a0fb1fba1fc3b267639b6bb2e"}, - {file = "regex-2022.9.13-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dcbcc9e72a791f622a32d17ff5011326a18996647509cac0609a7fc43adc229"}, - {file = "regex-2022.9.13-cp38-cp38-win32.whl", hash = "sha256:d3102ab9bf16bf541ca228012d45d88d2a567c9682a805ae2c145a79d3141fdd"}, - {file = "regex-2022.9.13-cp38-cp38-win_amd64.whl", hash = "sha256:14216ea15efc13f28d0ef1c463d86d93ca7158a79cd4aec0f9273f6d4c6bb047"}, - {file = "regex-2022.9.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9a165a05979e212b2c2d56a9f40b69c811c98a788964e669eb322de0a3e420b4"}, - {file = "regex-2022.9.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:14c71437ffb89479c89cc7022a5ea2075a842b728f37205e47c824cc17b30a42"}, - {file = "regex-2022.9.13-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee7045623a5ace70f3765e452528b4c1f2ce669ed31959c63f54de64fe2f6ff7"}, - {file = "regex-2022.9.13-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6e521d9db006c5e4a0f8acfef738399f72b704913d4e083516774eb51645ad7c"}, - {file = "regex-2022.9.13-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b86548b8234b2be3985dbc0b385e35f5038f0f3e6251464b827b83ebf4ed90e5"}, - {file = "regex-2022.9.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b39ee3b280e15824298b97cec3f7cbbe6539d8282cc8a6047a455b9a72c598"}, - {file = "regex-2022.9.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6e6e61e9a38b6cc60ca3e19caabc90261f070f23352e66307b3d21a24a34aaf"}, - {file = "regex-2022.9.13-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d837ccf3bd2474feabee96cd71144e991472e400ed26582edc8ca88ce259899c"}, - {file = "regex-2022.9.13-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6adfe300848d61a470ec7547adc97b0ccf86de86a99e6830f1d8c8d19ecaf6b3"}, - {file = "regex-2022.9.13-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d5b003d248e6f292475cd24b04e5f72c48412231961a675edcb653c70730e79e"}, - {file = "regex-2022.9.13-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:d5edd3eb877c9fc2e385173d4a4e1d792bf692d79e25c1ca391802d36ecfaa01"}, - {file = "regex-2022.9.13-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:50e764ffbd08b06aa8c4e86b8b568b6722c75d301b33b259099f237c46b2134e"}, - {file = "regex-2022.9.13-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6d43bd402b27e0e7eae85c612725ba1ce7798f20f6fab4e8bc3de4f263294f03"}, - {file = "regex-2022.9.13-cp39-cp39-win32.whl", hash = "sha256:7fcf7f94ccad19186820ac67e2ec7e09e0ac2dac39689f11cf71eac580503296"}, - {file = "regex-2022.9.13-cp39-cp39-win_amd64.whl", hash = "sha256:322bd5572bed36a5b39952d88e072738926759422498a96df138d93384934ff8"}, - {file = "regex-2022.9.13.tar.gz", hash = "sha256:f07373b6e56a6f3a0df3d75b651a278ca7bd357a796078a26a958ea1ce0588fd"}, -] reorder-python-imports = [ {file = "reorder_python_imports-2.8.0-py2.py3-none-any.whl", hash = "sha256:03354608c610a25cba75a352ad86552849dcf3c90849fcb415298efc911a4ecf"}, {file = "reorder_python_imports-2.8.0.tar.gz", hash = "sha256:435af2a6feb39de3c4b7a415079f85b4b0052d3a7ed9ea7b269b0aff725abdaf"}, @@ -2717,10 +2638,7 @@ toml = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] -tomli = [ - {file = "tomli-1.2.3-py3-none-any.whl", hash = "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c"}, - {file = "tomli-1.2.3.tar.gz", hash = "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f"}, -] +tomli = [] tomlkit = [ {file = "tomlkit-0.7.2-py2.py3-none-any.whl", hash = "sha256:173ad840fa5d2aac140528ca1933c29791b79a374a0861a80347f42ec9328117"}, {file = "tomlkit-0.7.2.tar.gz", hash = "sha256:d7a454f319a7e9bd2e249f239168729327e4dd2d27b17dc68be264ad1ce36754"}, diff --git a/pyproject.toml b/pyproject.toml index 4fc1a9d3..91f0b75f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,7 @@ sphinx = "^4.0.2" sphinx-autobuild = "^2021.3.14" pre-commit = "^2.11.1" flake8 = "^3.8.4" -black = ">=20.8b1, <=21.10b0" +black = ">=20.8b1" flake8-bandit = "^2.1.2" flake8-bugbear = "^21.4.3" flake8-docstrings = "^1.5.0" From 09475f036feb175904593cc8076c0656dafdc642 Mon Sep 17 00:00:00 2001 From: picciama Date: Tue, 13 Sep 2022 11:40:41 +0200 Subject: [PATCH 48/58] typo fixed --- batchglm/utils/plotting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/batchglm/utils/plotting.py b/batchglm/utils/plotting.py index defe6375..6148c11e 100644 --- a/batchglm/utils/plotting.py +++ b/batchglm/utils/plotting.py @@ -24,7 +24,7 @@ def _input_checks( def _cast(data: Union[np.ndarray, dask.array.core.Array]) -> Tuple[np.ndarray, np.ndarray]: if isinstance(data, dask.array.core.Array): - to_return = data.coompute() + to_return = data.compute() elif isinstance(data, np.ndarray): to_return = data else: From 0e53b114345fc66fd782e0190c76d964863fbfd4 Mon Sep 17 00:00:00 2001 From: picciama Date: Tue, 13 Sep 2022 11:41:14 +0200 Subject: [PATCH 49/58] added dask support to typehint --- tests/numpy/test_accuracy.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/numpy/test_accuracy.py b/tests/numpy/test_accuracy.py index f514560a..7a42cf73 100644 --- a/tests/numpy/test_accuracy.py +++ b/tests/numpy/test_accuracy.py @@ -1,6 +1,8 @@ import logging import unittest +from typing import Union +import dask.array import numpy as np from utils import get_estimator, get_generated_model @@ -29,7 +31,12 @@ def eval_estimation(self, estimator: EstimatorGlm): std_thres_location = 1 std_thres_scale = 1 - def deviation_theta(true: np.ndarray, pred: np.ndarray, mean_thres: float, std_thres: float) -> bool: + def deviation_theta( + true: Union[np.ndarray, dask.array.core.Array], + pred: Union[np.ndarray, dask.array.core.Array], + mean_thres: float, + std_thres: float, + ) -> bool: relative_deviation = (pred - true) / true mean = np.mean(relative_deviation) std = np.std(relative_deviation) From 5cd467607c2bebc6b5193f7b31459c456314373b Mon Sep 17 00:00:00 2001 From: picciama Date: Tue, 13 Sep 2022 11:43:20 +0200 Subject: [PATCH 50/58] make constraints a mandatory argument --- batchglm/models/base_glm/model.py | 2 +- batchglm/models/base_glm/utils.py | 4 ++-- batchglm/models/glm_beta/utils.py | 4 ++-- batchglm/models/glm_nb/utils.py | 2 +- batchglm/models/glm_norm/utils.py | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/batchglm/models/base_glm/model.py b/batchglm/models/base_glm/model.py index 36aa8e33..b142eb44 100644 --- a/batchglm/models/base_glm/model.py +++ b/batchglm/models/base_glm/model.py @@ -8,7 +8,7 @@ from ...utils.input import InputDataGLM from .external import pkg_constants -from .utils import generate_sample_description, parse_constraints, parse_design +from .utils import generate_sample_description logger = logging.getLogger(__name__) diff --git a/batchglm/models/base_glm/utils.py b/batchglm/models/base_glm/utils.py index cc45e07f..7c66b879 100644 --- a/batchglm/models/base_glm/utils.py +++ b/batchglm/models/base_glm/utils.py @@ -145,7 +145,7 @@ def parse_constraints( def closedform_glm_mean( x: Union[np.ndarray, scipy.sparse.csr_matrix, dask.array.core.Array], dmat: Union[np.ndarray, dask.array.core.Array], - constraints: Optional[Union[np.ndarray, dask.array.core.Array]] = None, + constraints: Union[np.ndarray, dask.array.core.Array], size_factors: Optional[np.ndarray] = None, link_fn: Optional[Callable] = None, inv_link_fn: Optional[Callable] = None, @@ -188,7 +188,7 @@ def apply_fun(grouping): def closedform_glm_scale( x: Union[np.ndarray, scipy.sparse.csr_matrix, dask.array.core.Array], design_scale: Union[np.ndarray, dask.array.core.Array], - constraints: Optional[Union[np.ndarray, dask.array.core.Array]] = None, + constraints: Union[np.ndarray, dask.array.core.Array], size_factors: Optional[np.ndarray] = None, groupwise_means: Optional[np.ndarray] = None, link_fn: Optional[Callable] = None, diff --git a/batchglm/models/glm_beta/utils.py b/batchglm/models/glm_beta/utils.py index 1c8d8ded..9f8dcb77 100644 --- a/batchglm/models/glm_beta/utils.py +++ b/batchglm/models/glm_beta/utils.py @@ -9,7 +9,7 @@ def closedform_beta_glm_logitmean( x: Union[np.ndarray, scipy.sparse.csr_matrix], design_loc: np.ndarray, - constraints_loc, + constraints_loc: np.ndarray, size_factors=None, link_fn=lambda x: np.log(1 / (1 / x - 1)), inv_link_fn=lambda x: 1 / (1 + np.exp(-x)), @@ -39,7 +39,7 @@ def closedform_beta_glm_logitmean( def closedform_beta_glm_logsamplesize( x: Union[np.ndarray, scipy.sparse.csr_matrix], design_scale: np.ndarray, - constraints=None, + constraints: np.ndarray, size_factors=None, groupwise_means=None, link_fn=np.log, diff --git a/batchglm/models/glm_nb/utils.py b/batchglm/models/glm_nb/utils.py index 2a0c78cc..dd292582 100644 --- a/batchglm/models/glm_nb/utils.py +++ b/batchglm/models/glm_nb/utils.py @@ -43,7 +43,7 @@ def closedform_nb_glm_logmu( def closedform_nb_glm_logphi( x: Union[np.ndarray, scipy.sparse.csr_matrix, dask.array.core.Array], design_scale: Union[np.ndarray, dask.array.core.Array], - constraints: Optional[Union[np.ndarray, dask.array.core.Array]] = None, + constraints: Union[np.ndarray, dask.array.core.Array], size_factors: Optional[np.ndarray] = None, groupwise_means: Optional[np.ndarray] = None, link_fn: Callable = np.log, diff --git a/batchglm/models/glm_norm/utils.py b/batchglm/models/glm_norm/utils.py index ab572851..a5763988 100644 --- a/batchglm/models/glm_norm/utils.py +++ b/batchglm/models/glm_norm/utils.py @@ -12,7 +12,7 @@ def closedform_norm_glm_mean( x: Union[np.ndarray, scipy.sparse.csr_matrix], design_loc: np.ndarray, - constraints_loc, + constraints_loc: np.ndarray, size_factors=None, link_fn=lambda x: x, inv_link_fn=lambda x: x, @@ -42,7 +42,7 @@ def closedform_norm_glm_mean( def closedform_norm_glm_logsd( x: Union[np.ndarray, scipy.sparse.csr_matrix], design_scale: np.ndarray, - constraints=None, + constraints: np.ndarray, size_factors=None, groupwise_means=None, link_fn=np.log, From 8730ba73ecd7d4b34c861bffcc47ea897154eedd Mon Sep 17 00:00:00 2001 From: picciama Date: Tue, 13 Sep 2022 11:47:21 +0200 Subject: [PATCH 51/58] fix mypy related problems + black reformatting --- batchglm/external/edgeR/aveLogCPM.py | 2 +- batchglm/external/edgeR/calcNormFactors.py | 18 +++---- batchglm/external/edgeR/estimateDisp.py | 52 +++++++++------------ batchglm/external/edgeR/estimator.py | 6 +-- batchglm/external/edgeR/glm_one_group.py | 18 ++++--- batchglm/external/edgeR/limma/fitFDist.py | 9 ++-- batchglm/external/edgeR/qr_decomposition.py | 7 +-- batchglm/external/edgeR/wleb.py | 20 ++++---- 8 files changed, 66 insertions(+), 66 deletions(-) diff --git a/batchglm/external/edgeR/aveLogCPM.py b/batchglm/external/edgeR/aveLogCPM.py index 5954bbb7..5d625656 100644 --- a/batchglm/external/edgeR/aveLogCPM.py +++ b/batchglm/external/edgeR/aveLogCPM.py @@ -12,7 +12,7 @@ def calculate_avg_log_cpm( size_factors: Optional[np.ndarray] = None, dispersion: Union[np.ndarray, float] = 0.05, prior_count: int = 2, - weights: Optional[np.ndarray] = None, + weights: Optional[Union[np.ndarray, float]] = None, maxit: int = 50, tolerance: float = 1e-10, chunk_size_cells=1e6, diff --git a/batchglm/external/edgeR/calcNormFactors.py b/batchglm/external/edgeR/calcNormFactors.py index fbed536d..da10e809 100644 --- a/batchglm/external/edgeR/calcNormFactors.py +++ b/batchglm/external/edgeR/calcNormFactors.py @@ -4,20 +4,20 @@ from scipy.stats import rankdata -def calc_size_factors(x: np.ndarray, method: Optional[str] = None, *args, **kwargs): +def calc_size_factors(x: np.ndarray, method: Optional[str] = None, **kwargs): assert ~np.any(np.isnan(x)), "Counts matrix must not contain NaN!" x = x[:, np.sum(x, axis=0) > 0] if method is None: size_factors = np.ones((x.shape[1], 1), dtype=float) elif method.lower() == "tmm": - size_factors = _calc_factor_tmm(data=x, *args, **kwargs) + size_factors = _calc_factor_tmm(data=x, **kwargs) elif method.lower() == "tmmwsp": - size_factors = _calc_factor_tmmwsp(data=x, *args, **kwargs) + size_factors = _calc_factor_tmmwsp(data=x, **kwargs) elif method.lower() == "rle": size_factors = _calc_factor_rle(data=x) elif method == "upperquartile": - size_factors = _calc_factor_quantile(data=x, *args, **kwargs) + size_factors = _calc_factor_quantile(data=x, **kwargs) else: raise ValueError(f"Method {method} not recognized.") @@ -57,9 +57,9 @@ def _calc_factor_tmm( if ref_idx is None: f75 = _calc_factor_quantile(data, p=0.75) if np.median(f75) < 1e-20: - ref_idx = np.argmax(np.sum(np.sqrt(data), axis=1)) + ref_idx = np.sum(np.sqrt(data), axis=1).argmax() else: - ref_idx = np.argmin(np.abs(f75 - np.mean(f75))) + ref_idx = np.abs(f75 - np.mean(f75)).argmin() sample_sums = np.sum(data, axis=1, keepdims=True) sum_normalized_data = data / sample_sums @@ -102,7 +102,7 @@ def _calc_factor_tmm( # In this case, return unity if np.isnan(size_factor_i): size_factor_i = 0 - size_factors[i] = 2 ** size_factor_i + size_factors[i] = 2**size_factor_i return size_factors @@ -116,7 +116,7 @@ def _calc_factor_tmmwsp( ): # TMM with pairing of singleton positive counts between the obs and ref libraries if ref_idx is None: - ref_idx = np.argmax(np.sum(np.sqrt(data), axis=1)) + ref_idx = np.sum(np.sqrt(data), axis=1).argmax() eps = 1e-14 sample_sums = np.sum(data, axis=1, keepdims=True) @@ -196,5 +196,5 @@ def _calc_factor_tmmwsp( size_factor_i = np.sum(w * m) / np.sum(w) else: size_factor_i = np.mean(m) - size_factors[i] = 2 ** size_factor_i + size_factors[i] = 2**size_factor_i return size_factors diff --git a/batchglm/external/edgeR/estimateDisp.py b/batchglm/external/edgeR/estimateDisp.py index 37342548..b2512023 100644 --- a/batchglm/external/edgeR/estimateDisp.py +++ b/batchglm/external/edgeR/estimateDisp.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Union import dask.array import numpy as np @@ -15,11 +15,10 @@ def estimate_disp( - model: Optional[NBModel] = None, - x: Optional[np.ndarray] = None, + x: Union[NBModel, np.ndarray], design: Optional[np.ndarray] = None, design_loc_names: Optional[np.ndarray] = None, - norm_factors: Optional[np.ndarray] = None, + size_factors: Optional[np.ndarray] = None, group=None, # prior_df=None, # TODO trend_method="loess", @@ -67,24 +66,13 @@ def estimate_disp( :param weights: optional numeric matrix giving observation weights """ - if model is None: - if x is None: - raise AssertionError("Provide x when no model is specified.") + if isinstance(x, np.ndarray): if design is None: - raise AssertionError("Provide design when no model is specified.") - - if norm_factors is None: - sum_counts_observation = x.sum(axis=1) - if norm_factors is None: - size_factors = np.log(sum_counts_observation) - else: - size_factors = np.log(sum_counts_observation * norm_factors) - - selected_features = x.sum(axis=0) >= min_rowsum - x_filtered = x[:, selected_features] - + raise AssertionError("Provide design when x is not a model already.") + if size_factors is None: + size_factors = np.log(x.sum(axis=1)) input_data = InputDataGLM( - data=x_filtered, + data=x, design_loc=design, design_loc_names=design_loc_names, size_factors=size_factors, @@ -94,12 +82,14 @@ def estimate_disp( ) model = NBModel(input_data) else: - selected_features = ... - x = model.x.copy() + model = x + x_all = model.x.copy() + selected_features = x_all.sum(axis=0) >= min_rowsum + model.x = x_all[:, selected_features] # Spline points spline_pts = np.linspace(start=grid_range[0], stop=grid_range[1], num=grid_length) - spline_disp = 0.1 * 2 ** spline_pts + spline_disp = 0.1 * 2**spline_pts l0 = np.zeros((model.num_features, grid_length)) # Identify which observations have means of zero (weights aren't needed here). @@ -165,7 +155,7 @@ def estimate_disp( # Calculate common dispersion overall = maximize_interpolant(spline_pts, l0.sum(axis=0, keepdims=True)) # (1, spline_pts) - common_dispersion = 0.1 * 2 ** overall + common_dispersion = 0.1 * 2**overall print(f"Common dispersion is {common_dispersion}.") @@ -185,8 +175,8 @@ def estimate_disp( overall=False, individual=False, ) - disp_trend = 0.1 * 2 ** trend - trended_dispersion = np.full(x.shape[1], disp_trend[np.argmin(avg_log_cpm[selected_features])]) + disp_trend = 0.1 * 2**trend + trended_dispersion = np.full(x_all.shape[1], disp_trend[np.argmin(avg_log_cpm[selected_features])]) trended_dispersion[selected_features] = disp_trend print("DONE.") else: @@ -210,10 +200,10 @@ def estimate_disp( prior_n = prior_df / (model.num_observations - n_loc_params) # Initiate featurewise dispersions - if trend_method is not None: + if trend_method is not None and trended_dispersion is not None: featurewise_dispersion = trended_dispersion.copy() else: - featurewise_dispersion = np.full(x.shape[1], common_dispersion) + featurewise_dispersion = np.full(x_all.shape[1], common_dispersion) # Checking if the shrinkage is near-infinite. too_large = prior_n > 1e6 @@ -235,15 +225,15 @@ def estimate_disp( m0=m0, ) if not robust or len(too_large) == 1: - featurewise_dispersion[selected_features] = 0.1 * 2 ** out_individual + featurewise_dispersion[selected_features] = 0.1 * 2**out_individual else: featurewise_dispersion[selected_features][~too_large] = 0.1 * 2 ** out_individual[~too_large] print("DONE.") if robust: temp_df = prior_df temp_n = prior_n - prior_df = np.full(x.shape[1], np.inf) - prior_n = np.full(x.shape[1], np.inf) + prior_df = np.full(x_all.shape[1], np.inf) + prior_n = np.full(x_all.shape[1], np.inf) prior_df[selected_features] = temp_df prior_n[selected_features] = temp_n diff --git a/batchglm/external/edgeR/estimator.py b/batchglm/external/edgeR/estimator.py index 62cca5cd..fb302dfb 100644 --- a/batchglm/external/edgeR/estimator.py +++ b/batchglm/external/edgeR/estimator.py @@ -90,10 +90,10 @@ def train_oneway(self, maxit: int, tolerance: float): group_model = model.model.__class__( InputDataGLM( data=model.x[obs_group], - design_loc=dloc[np.ix_(obs_group, [i])], + design_loc=dloc[np.ix_(obs_group, np.ndarray([i]))], design_loc_names=model.design_loc_names[[i]], size_factors=sf, - design_scale=dscale[np.ix_(obs_group, [0])], + design_scale=dscale[np.ix_(obs_group, np.ndarray([0]))], design_scale_names=model.design_scale_names[[0]], as_dask=isinstance(model.x, dask.array.core.Array), chunk_size_cells=model.chunk_size_cells, @@ -116,7 +116,7 @@ def train_oneway(self, maxit: int, tolerance: float): theta_location = np.linalg.solve(unique_design, theta_location) model.theta_location = theta_location - def train_levenberg(self, maxit: int, tolerance: int = 1e-6): + def train_levenberg(self, maxit: int, tolerance: float = 1e-6): model = self._model_container max_x = np.max(model.x, axis=0).compute() diff --git a/batchglm/external/edgeR/glm_one_group.py b/batchglm/external/edgeR/glm_one_group.py index ca232cd1..54686780 100644 --- a/batchglm/external/edgeR/glm_one_group.py +++ b/batchglm/external/edgeR/glm_one_group.py @@ -11,12 +11,15 @@ def get_single_group_start( - x: np.ndarray, - sf: Optional[np.ndarray] = None, + x: Union[np.ndarray, dask.array.core.Array], + sf: Optional[Union[np.ndarray, dask.array.core.Array, float]] = None, weights: Optional[Union[np.ndarray, float]] = None, ) -> np.ndarray: if weights is None: - weights = np.ones_like(x) + weights = 1.0 + if isinstance(weights, float): + weights = np.full(x.shape, weights) + if weights.shape != x.shape: raise ValueError("Shape of weights must be idential to shape of model.x") @@ -24,10 +27,13 @@ def get_single_group_start( if sf is None: sf = np.log(1.0) + elif isinstance(sf, dask.array.core.Array): + sf = sf.compute() + if not isinstance(sf, (np.ndarray, float)): + raise TypeError("sf must be of type np.ndarray, dask.array.core.Array or None") if isinstance(x, dask.array.core.Array): x = x.compute() - if isinstance(sf, dask.array.core.Array): - sf = sf.compute() + theta_location = np.sum(np.where(x > low_value, x / np.exp(sf) * weights, 0), axis=0, keepdims=True) with np.errstate(divide="ignore", invalid="ignore"): theta_location = np.log(theta_location / total_weights) @@ -37,7 +43,7 @@ def get_single_group_start( def fit_single_group( model: BaseModelContainer, maxit: int = 50, - tolerance: int = 1e-10, + tolerance: float = 1e-10, ): """ Setting up initial values for beta as the log of the mean of the ratio of counts to offsets. diff --git a/batchglm/external/edgeR/limma/fitFDist.py b/batchglm/external/edgeR/limma/fitFDist.py index a8414fcc..e7d0a0a8 100644 --- a/batchglm/external/edgeR/limma/fitFDist.py +++ b/batchglm/external/edgeR/limma/fitFDist.py @@ -1,4 +1,5 @@ import logging +from typing import Optional import numpy as np import patsy @@ -9,7 +10,7 @@ logger = logging.getLogger(__name__) -def fit_f_dist(x: np.ndarray, df1: np.ndarray, covariate: np.ndarray): +def fit_f_dist(x: np.ndarray, df1: np.ndarray, covariate: Optional[np.ndarray]): """ Moment estimation of the parameters of a scaled F-distribution. The numerator degrees of freedom is given, the scale factor and denominator df is to be estimated. @@ -25,7 +26,7 @@ def fit_f_dist(x: np.ndarray, df1: np.ndarray, covariate: np.ndarray): # Check covariate if covariate is None: - spline_df = np.ones_like(x) + spline_df = 1 else: assert len(x) == len(df1) == len(covariate), "All inputs must have the same length" if np.any(np.isnan(covariate)): @@ -55,11 +56,11 @@ def fit_f_dist(x: np.ndarray, df1: np.ndarray, covariate: np.ndarray): # Set df for spline trend if covariate is not None: spline_df = 1 + int(n_ok >= 3) + int(n_ok >= 6) + int(n_ok >= 30) - spline_df = np.min((np.min(spline_df), len(np.unique(covariate)))) + spline_df = np.minimum(spline_df, len(np.unique(covariate))) # If covariate takes only one unique value or insufficient # observations, recall with NULL covariate if spline_df < 2: - scale, df2 = fit_f_dist(x=x, df1=df1) + scale, df2 = fit_f_dist(x=x, df1=df1, covariate=None) scale = np.full(n, scale) return scale, df2 diff --git a/batchglm/external/edgeR/qr_decomposition.py b/batchglm/external/edgeR/qr_decomposition.py index 5bb87fba..63b5f0f4 100644 --- a/batchglm/external/edgeR/qr_decomposition.py +++ b/batchglm/external/edgeR/qr_decomposition.py @@ -1,3 +1,4 @@ +import dask.array import numpy as np from scipy.linalg.lapack import dgeqrf, dormqr, dtrtrs @@ -90,9 +91,9 @@ def get_levenberg_start(model: _ModelGLM, disp: np.ndarray, use_null: bool): sum_norm_x = np.sum(model.x * weights / sf_exp, axis=0) # shape (n_features,) sum_weights = np.sum(weights, axis=0) # shape (n_features,) - values = np.broadcast_to( - np.log(sum_norm_x / sum_weights), (n_obs, n_features) - ).compute() # shape(n_obs, n_features) + values = np.broadcast_to(np.log(sum_norm_x / sum_weights), (n_obs, n_features)) + if isinstance(values, dask.array.core.Array): + values = values.compute() # shape(n_obs, n_features) for j in range(n_features): qr.solve(values[:, j]) diff --git a/batchglm/external/edgeR/wleb.py b/batchglm/external/edgeR/wleb.py index 4bcec901..a74ab08c 100644 --- a/batchglm/external/edgeR/wleb.py +++ b/batchglm/external/edgeR/wleb.py @@ -1,4 +1,4 @@ -from typing import Any +from typing import Any, Optional, Union import dask.array import numpy as np @@ -11,8 +11,8 @@ def wleb( theta: Any, loglik: Any, prior_n: int = 5, - covariate: np.ndarray = None, - trend_method: str = "loess", + covariate: Optional[np.ndarray] = None, + trend_method: Optional[str] = "loess", span: Any = None, overall: bool = True, trend: bool = True, @@ -42,8 +42,8 @@ def wleb( :return: Tuple(out_span, out_overall, m0, out_trend, out_individual) """ n_features, n_theta = loglik.shape - if covariate is None: - trend_method = "none" + if covariate is None and trend_method is not None: + raise ValueError("covariate cannot be None if trend_method is given.") if span is None: if n_features < 50: @@ -61,10 +61,10 @@ def wleb( # calculate trended prior if m0 is None: - if trend_method == "none": + if trend_method is None: m0 = np.broadcast_to(np.sum(loglik, axis=0), loglik.shape) elif trend_method == "loess": - m0, _ = loess(loglik, covariate, span=out_span) + m0, _ = loess(span=out_span, y=loglik, x=covariate) else: raise NotImplementedError(f"Method {trend_method} is not yet implemented.") @@ -80,7 +80,7 @@ def wleb( return out_span, out_overall, m0, out_trend, out_individual -def loess(y: np.ndarray, x: np.ndarray, span: float): +def loess(span: float, y: np.ndarray, x: Optional[Union[np.ndarray, dask.array.core.Array]] = None): """ Wrapper around loess as implemented in edgeR. This calls the C++ function loess_by_col. @@ -88,8 +88,10 @@ def loess(y: np.ndarray, x: np.ndarray, span: float): n_features = y.shape[0] if x is None: x = np.arange(n_features) - if isinstance(x, dask.array.core.Array): + elif isinstance(x, dask.array.core.Array): x = x.compute() + if not isinstance(x, np.ndarray): + raise TypeError("x must be of type np.ndarray, None or dask.array.core.Array") order = np.argsort(x, kind="stable") y = y[order] From a0636ca5af2467c44cc04f32c893ae5dabc4be52 Mon Sep 17 00:00:00 2001 From: picciama Date: Tue, 20 Sep 2022 11:42:34 +0200 Subject: [PATCH 52/58] index using np.ndarray for mypy to succeed --- batchglm/external/edgeR/estimator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/batchglm/external/edgeR/estimator.py b/batchglm/external/edgeR/estimator.py index fb302dfb..016052a6 100644 --- a/batchglm/external/edgeR/estimator.py +++ b/batchglm/external/edgeR/estimator.py @@ -90,10 +90,10 @@ def train_oneway(self, maxit: int, tolerance: float): group_model = model.model.__class__( InputDataGLM( data=model.x[obs_group], - design_loc=dloc[np.ix_(obs_group, np.ndarray([i]))], + design_loc=dloc[np.ix_(obs_group, np.array([i]))], design_loc_names=model.design_loc_names[[i]], size_factors=sf, - design_scale=dscale[np.ix_(obs_group, np.ndarray([0]))], + design_scale=dscale[np.ix_(obs_group, np.array([0]))], design_scale_names=model.design_scale_names[[0]], as_dask=isinstance(model.x, dask.array.core.Array), chunk_size_cells=model.chunk_size_cells, From 5856631e9234fc9269f89aa83a703ace7209a20d Mon Sep 17 00:00:00 2001 From: picciama Date: Tue, 20 Sep 2022 11:43:25 +0200 Subject: [PATCH 53/58] return all, not just prior_df --- batchglm/external/edgeR/prior_df.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/batchglm/external/edgeR/prior_df.py b/batchglm/external/edgeR/prior_df.py index dbf2d485..fc466dd2 100644 --- a/batchglm/external/edgeR/prior_df.py +++ b/batchglm/external/edgeR/prior_df.py @@ -47,5 +47,4 @@ def calculate_prior_df( s2[df_residual == 0] = 0.0 s2 = np.maximum(s2, 0) - df_prior, _, _ = squeeze_var(s2, df=df_residual, covariate=avg_log_cpm, robust=robust, winsor_tail_p=winsor_tail_p) - return df_prior + return squeeze_var(s2, df=df_residual, covariate=avg_log_cpm, robust=robust, winsor_tail_p=winsor_tail_p) From d961166332c8296e1e8eff5397e8131ca7b152e1 Mon Sep 17 00:00:00 2001 From: picciama Date: Tue, 20 Sep 2022 11:43:46 +0200 Subject: [PATCH 54/58] minor refactoring and bugfixing --- batchglm/external/edgeR/estimateDisp.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/batchglm/external/edgeR/estimateDisp.py b/batchglm/external/edgeR/estimateDisp.py index b2512023..78fb728e 100644 --- a/batchglm/external/edgeR/estimateDisp.py +++ b/batchglm/external/edgeR/estimateDisp.py @@ -85,7 +85,7 @@ def estimate_disp( model = x x_all = model.x.copy() selected_features = x_all.sum(axis=0) >= min_rowsum - model.x = x_all[:, selected_features] + model._x = x_all[:, selected_features] # Spline points spline_pts = np.linspace(start=grid_range[0], stop=grid_range[1], num=grid_length) @@ -165,7 +165,7 @@ def estimate_disp( sf = model.size_factors if sf is not None and isinstance(sf, dask.array.core.Array): sf = sf.compute() - avg_log_cpm = calculate_avg_log_cpm(x, size_factors=sf, dispersion=common_dispersion[0], weights=weights) + avg_log_cpm = calculate_avg_log_cpm(x_all, size_factors=sf, dispersion=common_dispersion[0], weights=weights) span, _, m0, trend, _ = wleb( theta=spline_pts, loglik=l0, @@ -176,7 +176,7 @@ def estimate_disp( individual=False, ) disp_trend = 0.1 * 2**trend - trended_dispersion = np.full(x_all.shape[1], disp_trend[np.argmin(avg_log_cpm[selected_features])]) + trended_dispersion = np.full(x_all.shape[1], disp_trend[np.argmin(avg_log_cpm[0, selected_features])]) trended_dispersion[selected_features] = disp_trend print("DONE.") else: @@ -193,7 +193,7 @@ def estimate_disp( # Calculate prior.df print("Calculating featurewise dispersion...") if prior_df is None: # - prior_df = calculate_prior_df( + prior_df, _, _ = calculate_prior_df( model, avg_log_cpm[0, selected_features], robust=robust, winsor_tail_p=winsor_tail_p, dispersion=disp_trend ) n_loc_params = model.design_loc.shape[1] @@ -217,7 +217,7 @@ def estimate_disp( theta=spline_pts, loglik=l0, prior_n=temp_n, - covariate=avg_log_cpm[selected_features], + covariate=avg_log_cpm[0, selected_features], trend_method=trend_method, span=span, overall=False, From 4a230b566f9e920b0beda0c6b19863fe97c5eb0d Mon Sep 17 00:00:00 2001 From: picciama Date: Thu, 22 Sep 2022 11:13:43 +0200 Subject: [PATCH 55/58] fixed pre-commit hooks and mypy --- batchglm/external/edgeR/glmQLFit.py | 74 +++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 batchglm/external/edgeR/glmQLFit.py diff --git a/batchglm/external/edgeR/glmQLFit.py b/batchglm/external/edgeR/glmQLFit.py new file mode 100644 index 00000000..708731e9 --- /dev/null +++ b/batchglm/external/edgeR/glmQLFit.py @@ -0,0 +1,74 @@ +from typing import Optional, Tuple, Union + +import numpy as np + +from .aveLogCPM import calculate_avg_log_cpm +from .external import InputDataGLM, NBEstimator, NBModel +from .prior_df import calculate_prior_df + + +def glm_ql_fit( + x: Union[NBModel, np.ndarray], + design: Optional[np.ndarray] = None, + design_loc_names: Optional[np.ndarray] = None, + dispersion: Optional[np.ndarray] = None, + offset: Optional[np.ndarray] = None, + lib_size: Optional[np.ndarray] = None, + size_factors: Optional[np.ndarray] = None, + tol: float = 1e-6, # TODO + weights: Optional[np.ndarray] = None, + abundance_trend: bool = True, + ave_log_cpm: Optional[np.ndarray] = None, + robust: bool = False, + winsor_tail_p: Tuple[float, float] = (0.05, 0.1), + **input_data_kwargs, +): + """ + Fit a GLM and compute quasi-likelihood dispersions for each gene. + """ + # Original method docstring: + # Fits a GLM and computes quasi-likelihood dispersions for each gene. + # Davis McCarthy, Gordon Smyth, Yunshun Chen, Aaron Lun. + # Originally part of glmQLFTest, as separate function 15 September 2014. Last modified 4 April 2020. + + if isinstance(x, np.ndarray): + if design is None: + raise AssertionError("Provide design when x is not a model already.") + if size_factors is None: + size_factors = np.log(x.sum(axis=1)) + input_data = InputDataGLM( + data=x, + design_loc=design, + design_loc_names=design_loc_names, + size_factors=size_factors, + design_scale=np.ones((x.shape[0], 1)), + design_scale_names=np.array(["Intercept"]), + **input_data_kwargs, + ) + model = NBModel(input_data) + else: + model = x + + estimator = NBEstimator(model, dispersion=dispersion) + estimator.train(maxit=250, tolerance=tol) + # glmfit = glmFit(y, design=design, dispersion=dispersion, offset=offset, lib.size=lib.size, weights=weights,...) + + # Setting up the abundances. + if abundance_trend: + if ave_log_cpm is None: + pass + # big TODO + # ave_log_cpm = calculate_avg_log_cpm(x=model.x, size_factors=TODO, dispersion=dispersion, weights=weights) + # ave_log_cpm = aveLogCPM(y, lib.size=lib.size, weights=weights, dispersion=dispersion) + # glmfit$AveLogCPM <- AveLogCPM + else: + ave_log_cpm = None + + return calculate_prior_df( + model=x, + avg_log_cpm=ave_log_cpm, + robust=robust, + winsor_tail_p=winsor_tail_p, + dispersion=dispersion, + tolerance=tol, + ) From fbc5e9100a580f87f688d5a9356bfa3ea37dcdd8 Mon Sep 17 00:00:00 2001 From: picciama Date: Thu, 22 Sep 2022 11:14:14 +0200 Subject: [PATCH 56/58] increased the param clipping limits --- batchglm/models/glm_nb/model.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/batchglm/models/glm_nb/model.py b/batchglm/models/glm_nb/model.py index 36c17df1..c5a23730 100644 --- a/batchglm/models/glm_nb/model.py +++ b/batchglm/models/glm_nb/model.py @@ -57,20 +57,20 @@ def phi(self) -> Union[np.ndarray, dask.array.core.Array]: def bounds(self, sf, dmax, dtype) -> Tuple[Dict[str, Any], Dict[str, Any]]: bounds_min = { - "theta_location": np.log(np.nextafter(0, np.inf, dtype=dtype)) / sf, - "theta_scale": np.log(np.nextafter(0, np.inf, dtype=dtype)) / sf, - "eta_loc": np.log(np.nextafter(0, np.inf, dtype=dtype)) / sf, - "eta_scale": np.log(np.nextafter(0, np.inf, dtype=dtype)) / sf, + "theta_location": -1e8, # np.log(np.nextafter(0, np.inf, dtype=dtype)) / sf, + "theta_scale": -1e8, # np.log(np.nextafter(0, np.inf, dtype=dtype)) / sf, + "eta_loc": -1e8, # np.log(np.nextafter(0, np.inf, dtype=dtype)) / sf, + "eta_scale": -1e8, # np.log(np.nextafter(0, np.inf, dtype=dtype)) / sf, "loc": np.nextafter(0, np.inf, dtype=dtype), "scale": np.nextafter(0, np.inf, dtype=dtype), "likelihood": dtype(0), - "ll": np.log(np.nextafter(0, np.inf, dtype=dtype)), + "ll": -1e8, # np.log(np.nextafter(0, np.inf, dtype=dtype)), } bounds_max = { - "theta_location": np.nextafter(np.log(dmax), -np.inf, dtype=dtype) / sf, - "theta_scale": np.nextafter(np.log(dmax), -np.inf, dtype=dtype) / sf, - "eta_loc": np.nextafter(np.log(dmax), -np.inf, dtype=dtype) / sf, - "eta_scale": np.nextafter(np.log(dmax), -np.inf, dtype=dtype) / sf, + "theta_location": 1e10, # np.nextafter(np.log(dmax), -np.inf, dtype=dtype) / sf, + "theta_scale": 1e10, # np.nextafter(np.log(dmax), -np.inf, dtype=dtype) / sf, + "eta_loc": 1e10, # np.nextafter(np.log(dmax), -np.inf, dtype=dtype) / sf, + "eta_scale": 1e10, # np.nextafter(np.log(dmax), -np.inf, dtype=dtype) / sf, "loc": np.nextafter(dmax, -np.inf, dtype=dtype) / sf, "scale": np.nextafter(dmax, -np.inf, dtype=dtype) / sf, "likelihood": dtype(1), From 13a78005eeb7c21b5708a049d6ac88ed5529f0cf Mon Sep 17 00:00:00 2001 From: picciama Date: Fri, 7 Oct 2022 13:21:17 +0200 Subject: [PATCH 57/58] fix mypy / batchglm API after merging develoment --- batchglm/external/edgeR/aveLogCPM.py | 10 +++---- batchglm/external/edgeR/estimateDisp.py | 29 ++++++++++++------- batchglm/external/edgeR/estimator.py | 23 +++++++++------ batchglm/external/edgeR/external.py | 4 +-- batchglm/external/edgeR/glmQLFit.py | 21 ++++++++------ batchglm/external/edgeR/glm_one_group.py | 4 +-- batchglm/external/edgeR/limma/fitFDist.py | 6 ++-- batchglm/external/edgeR/limma/squeezeVar.py | 10 ++++++- batchglm/external/edgeR/nbinomDeviance.py | 4 +-- batchglm/external/edgeR/prior_df.py | 32 ++++++++++++--------- batchglm/external/edgeR/qr_decomposition.py | 4 +-- 11 files changed, 89 insertions(+), 58 deletions(-) diff --git a/batchglm/external/edgeR/aveLogCPM.py b/batchglm/external/edgeR/aveLogCPM.py index 5d625656..9020b81d 100644 --- a/batchglm/external/edgeR/aveLogCPM.py +++ b/batchglm/external/edgeR/aveLogCPM.py @@ -47,16 +47,16 @@ def calculate_avg_log_cpm( InputDataGLM( data=x, design_loc=np.ones((x.shape[0], 1)), - design_loc_names=np.array(["Intercept"]), + design_loc_names=["Intercept"], size_factors=adjusted_size_factors, design_scale=np.ones((x.shape[0], 1)), - design_scale_names=np.array(["Intercept"]), + design_scale_names=["Intercept"], as_dask=isinstance(x, dask.array.core.Array), chunk_size_cells=chunk_size_cells, chunk_size_genes=chunk_size_genes, ) ) - avg_cpm_model = ModelContainer( + avg_cpm_model_container = ModelContainer( model=avg_cpm_model, init_theta_location=get_single_group_start(avg_cpm_model.x, avg_cpm_model.size_factors), init_theta_scale=np.log(1 / dispersion), @@ -64,8 +64,8 @@ def calculate_avg_log_cpm( dtype=x.dtype, ) - fit_single_group(avg_cpm_model, maxit=maxit, tolerance=tolerance) - output = (avg_cpm_model.theta_location + np.log(1e6)) / np.log(2) + fit_single_group(avg_cpm_model_container, maxit=maxit, tolerance=tolerance) + output = (avg_cpm_model_container.theta_location + np.log(1e6)) / np.log(2) return output diff --git a/batchglm/external/edgeR/estimateDisp.py b/batchglm/external/edgeR/estimateDisp.py index 78fb728e..62418018 100644 --- a/batchglm/external/edgeR/estimateDisp.py +++ b/batchglm/external/edgeR/estimateDisp.py @@ -1,4 +1,4 @@ -from typing import Optional, Union +from typing import List, Optional, Tuple, Union import dask.array import numpy as np @@ -17,7 +17,7 @@ def estimate_disp( x: Union[NBModel, np.ndarray], design: Optional[np.ndarray] = None, - design_loc_names: Optional[np.ndarray] = None, + design_loc_names: Optional[List[str]] = None, size_factors: Optional[np.ndarray] = None, group=None, # prior_df=None, # TODO @@ -26,9 +26,9 @@ def estimate_disp( span=None, # TODO min_rowsum: int = 5, # TODO grid_length: int = 21, # TODO - grid_range: tuple = (-10, 10), # TODO + grid_range: Tuple[float, float] = (-10.0, 10.0), # TODO robust: bool = False, # TODO - winsor_tail_p: tuple = (0.05, 0.1), # TODO + winsor_tail_p: Tuple[float, float] = (0.05, 0.1), # TODO tol: float = 1e-6, # TODO weights=None, # TODO adjust: bool = True, @@ -66,6 +66,9 @@ def estimate_disp( :param weights: optional numeric matrix giving observation weights """ + # define return values: + trended_dispersion: Optional[np.ndarray] = None + if isinstance(x, np.ndarray): if design is None: raise AssertionError("Provide design when x is not a model already.") @@ -77,7 +80,7 @@ def estimate_disp( design_loc_names=design_loc_names, size_factors=size_factors, design_scale=np.ones((x.shape[0], 1)), - design_scale_names=np.array(["Intercept"]), + design_scale_names=["Intercept"], **input_data_kwargs, ) model = NBModel(input_data) @@ -125,10 +128,10 @@ def estimate_disp( if len(coefs_new) == design_new.shape[0]: continue design_new = design_new[:, coefs_new] - new_dloc_names = model.design_loc_names[coefs_new] + new_dloc_names = [model.design_loc_names[i] for i in coefs_new] subgroup_x = model.x - if isinstance(model.x, dask.array.core.Array): + if isinstance(subgroup_x, dask.array.core.Array): subgroup_x = subgroup_x.compute() sf = model.size_factors if sf is not None: @@ -141,7 +144,7 @@ def estimate_disp( design_loc_names=new_dloc_names, size_factors=sf, design_scale=model.design_scale[not_zero_obs_in_group], - design_scale_names=np.array(["Intercept"]), + design_scale_names=["Intercept"], as_dask=isinstance(model.x, dask.array.core.Array), chunk_size_cells=1000000, chunk_size_genes=1000000, @@ -183,7 +186,6 @@ def estimate_disp( avg_log_cpm = None m0 = np.broadcast_to(l0.mean(axis=0), shape=(model.x.shape[1], len(spline_pts))) disp_trend = common_dispersion - trended_dispersion = None # Are tagwise dispersions required? if not tagwise: @@ -192,9 +194,14 @@ def estimate_disp( avg_log_cpm = avg_log_cpm.compute() # Calculate prior.df print("Calculating featurewise dispersion...") - if prior_df is None: # + if prior_df is None: prior_df, _, _ = calculate_prior_df( - model, avg_log_cpm[0, selected_features], robust=robust, winsor_tail_p=winsor_tail_p, dispersion=disp_trend + model=model, + robust=robust, + dispersion=disp_trend, + winsor_tail_p=winsor_tail_p, + avg_log_cpm=avg_log_cpm[0, selected_features], + tolerance=tol, ) n_loc_params = model.design_loc.shape[1] prior_n = prior_df / (model.num_observations - n_loc_params) diff --git a/batchglm/external/edgeR/estimator.py b/batchglm/external/edgeR/estimator.py index 016052a6..0057aec8 100644 --- a/batchglm/external/edgeR/estimator.py +++ b/batchglm/external/edgeR/estimator.py @@ -6,7 +6,7 @@ from scipy.linalg import cho_solve, cholesky from .c_utils import nb_deviance -from .external import BaseModelContainer, EstimatorGlm, InputDataGLM, ModelContainer, NBModel, init_par +from .external import EstimatorGlm, InputDataGLM, ModelContainer, NBModel, NumpyModelContainer, init_par from .glm_one_group import fit_single_group, get_single_group_start from .qr_decomposition import get_levenberg_start @@ -20,9 +20,9 @@ class Estimator: _train_loc: bool = False _train_scale: bool = False - _model_container: BaseModelContainer + _model_container: NumpyModelContainer - def __init__(self, model_container: BaseModelContainer, dtype: str): + def __init__(self, model_container: NumpyModelContainer, dtype: str): """ Performs initialisation and creates a new estimator. :param model_container: @@ -87,7 +87,7 @@ def train_oneway(self, maxit: int, tolerance: float): dscale = model.design_scale if isinstance(model.design_loc, dask.array.core.Array): dscale = dscale.compute() - group_model = model.model.__class__( + _group_model = model.model.__class__( InputDataGLM( data=model.x[obs_group], design_loc=dloc[np.ix_(obs_group, np.array([i]))], @@ -101,8 +101,8 @@ def train_oneway(self, maxit: int, tolerance: float): ) ) group_model = ModelContainer( - model=group_model, - init_theta_location=get_single_group_start(group_model.x, group_model.size_factors), + model=_group_model, + init_theta_location=get_single_group_start(_group_model.x, _group_model.size_factors), init_theta_scale=model.theta_scale, chunk_size_genes=model.chunk_size_genes, dtype=model.theta_location.dtype, @@ -375,7 +375,7 @@ class NBEstimator(Estimator): def __init__( self, model: NBModel, - dispersion: float, + dispersion: Union[float, np.ndarray], dtype: str = "float64", ): """ @@ -386,7 +386,14 @@ def __init__( :param dtype: Numerical precision. """ init_theta_location = np.zeros((model.xh_loc.shape[1], model.num_features), dtype=model.cast_dtype) - init_theta_scale = np.full((1, model.num_features), np.log(1 / dispersion)) + if isinstance(dispersion, float): + init_theta_scale = np.full((1, model.num_features), np.log(1 / dispersion)) + elif isinstance(dispersion, np.ndarray): + if dispersion.shape != (1, model.num_features): + raise ValueError( + f"Shape mismatch (dispersion): Given: {dispersion.shape} Expected: (1, {model.num_features}))" + ) + init_theta_scale = dispersion self._train_loc = True self._train_scale = False # This is fixed as edgeR doesn't fit the scale parameter _model_container = ModelContainer( diff --git a/batchglm/external/edgeR/external.py b/batchglm/external/edgeR/external.py index 3498bee2..900543c3 100644 --- a/batchglm/external/edgeR/external.py +++ b/batchglm/external/edgeR/external.py @@ -1,7 +1,7 @@ -from batchglm.models.base_glm import _ModelGLM +from batchglm.models.base_glm import ModelGLM from batchglm.models.glm_nb.model import Model as NBModel from batchglm.models.glm_nb.utils import init_par -from batchglm.train.numpy.base_glm import BaseModelContainer, EstimatorGlm +from batchglm.train.numpy.base_glm import EstimatorGlm, NumpyModelContainer from batchglm.train.numpy.glm_nb import ModelContainer from batchglm.utils.input import InputDataGLM diff --git a/batchglm/external/edgeR/glmQLFit.py b/batchglm/external/edgeR/glmQLFit.py index 708731e9..53a835b0 100644 --- a/batchglm/external/edgeR/glmQLFit.py +++ b/batchglm/external/edgeR/glmQLFit.py @@ -1,17 +1,18 @@ -from typing import Optional, Tuple, Union +from typing import List, Optional, Tuple, Union import numpy as np from .aveLogCPM import calculate_avg_log_cpm -from .external import InputDataGLM, NBEstimator, NBModel +from .estimator import NBEstimator +from .external import InputDataGLM, NBModel from .prior_df import calculate_prior_df def glm_ql_fit( x: Union[NBModel, np.ndarray], + dispersion: Union[np.ndarray, float], design: Optional[np.ndarray] = None, - design_loc_names: Optional[np.ndarray] = None, - dispersion: Optional[np.ndarray] = None, + design_loc_names: Optional[List[str]] = None, offset: Optional[np.ndarray] = None, lib_size: Optional[np.ndarray] = None, size_factors: Optional[np.ndarray] = None, @@ -42,15 +43,17 @@ def glm_ql_fit( design_loc_names=design_loc_names, size_factors=size_factors, design_scale=np.ones((x.shape[0], 1)), - design_scale_names=np.array(["Intercept"]), + design_scale_names=["Intercept"], **input_data_kwargs, ) model = NBModel(input_data) - else: + elif isinstance(x, NBModel): model = x + else: + raise TypeError(f"Type for argument x not understood: {type(x)}. Valid types are NBModel, np.ndarray") - estimator = NBEstimator(model, dispersion=dispersion) - estimator.train(maxit=250, tolerance=tol) + # estimator = NBEstimator(model, dispersion=dispersion) + # estimator.train(maxit=250, tolerance=tol) # glmfit = glmFit(y, design=design, dispersion=dispersion, offset=offset, lib.size=lib.size, weights=weights,...) # Setting up the abundances. @@ -65,7 +68,7 @@ def glm_ql_fit( ave_log_cpm = None return calculate_prior_df( - model=x, + model=model, avg_log_cpm=ave_log_cpm, robust=robust, winsor_tail_p=winsor_tail_p, diff --git a/batchglm/external/edgeR/glm_one_group.py b/batchglm/external/edgeR/glm_one_group.py index 54686780..a5c155cd 100644 --- a/batchglm/external/edgeR/glm_one_group.py +++ b/batchglm/external/edgeR/glm_one_group.py @@ -4,7 +4,7 @@ import dask.array import numpy as np -from .external import BaseModelContainer +from .external import NumpyModelContainer low_value = 1e-10 logger = logging.getLogger(__name__) @@ -41,7 +41,7 @@ def get_single_group_start( def fit_single_group( - model: BaseModelContainer, + model: NumpyModelContainer, maxit: int = 50, tolerance: float = 1e-10, ): diff --git a/batchglm/external/edgeR/limma/fitFDist.py b/batchglm/external/edgeR/limma/fitFDist.py index e7d0a0a8..cf553994 100644 --- a/batchglm/external/edgeR/limma/fitFDist.py +++ b/batchglm/external/edgeR/limma/fitFDist.py @@ -1,5 +1,5 @@ import logging -from typing import Optional +from typing import Optional, Tuple import numpy as np import patsy @@ -184,5 +184,7 @@ def trigamma_inverse(x: np.ndarray): return y -def fit_f_dist_robustly(var: np.ndarray, df1: np.ndarray, covariate: np.ndarray, winsor_tail_p: np.ndarray): +def fit_f_dist_robustly( + var: np.ndarray, df1: np.ndarray, winsor_tail_p: Tuple[float, float], covariate: Optional[np.ndarray] = None +): pass diff --git a/batchglm/external/edgeR/limma/squeezeVar.py b/batchglm/external/edgeR/limma/squeezeVar.py index e9c79548..2443053f 100644 --- a/batchglm/external/edgeR/limma/squeezeVar.py +++ b/batchglm/external/edgeR/limma/squeezeVar.py @@ -1,9 +1,17 @@ +from typing import Optional, Tuple + import numpy as np from .fitFDist import fit_f_dist, fit_f_dist_robustly -def squeeze_var(var: np.ndarray, df: np.ndarray, covariate: np.ndarray, robust: bool, winsor_tail_p: np.ndarray): +def squeeze_var( + var: np.ndarray, + df: np.ndarray, + robust: bool, + winsor_tail_p: Tuple[float, float], + covariate: Optional[np.ndarray] = None, +): """ This method is a python version of limma's squeezeVar function. """ diff --git a/batchglm/external/edgeR/nbinomDeviance.py b/batchglm/external/edgeR/nbinomDeviance.py index 5d0f6c28..c422ccb8 100644 --- a/batchglm/external/edgeR/nbinomDeviance.py +++ b/batchglm/external/edgeR/nbinomDeviance.py @@ -2,10 +2,10 @@ import numpy as np -from .external import BaseModelContainer +from .external import NumpyModelContainer -def nb_deviance(model: BaseModelContainer, idx=...): +def nb_deviance(model: NumpyModelContainer, idx=...): """ Python version of the method implemented in a C++ function in edgeR. diff --git a/batchglm/external/edgeR/prior_df.py b/batchglm/external/edgeR/prior_df.py index fc466dd2..b163a67e 100644 --- a/batchglm/external/edgeR/prior_df.py +++ b/batchglm/external/edgeR/prior_df.py @@ -1,19 +1,21 @@ +from typing import Optional, Tuple, Union + import dask.array import numpy as np from .c_utils import nb_deviance from .estimator import NBEstimator -from .external import BaseModelContainer +from .external import NBModel from .limma import squeeze_var from .residDF import resid_df def calculate_prior_df( - model: BaseModelContainer, - avg_log_cpm: np.ndarray, + model: NBModel, robust: bool, - winsor_tail_p: np.ndarray, - dispersion: np.ndarray, + dispersion: Union[np.ndarray, float], + winsor_tail_p: Tuple[float, float], + avg_log_cpm: Optional[np.ndarray] = None, tolerance: float = 1e-10, ): """ @@ -23,23 +25,25 @@ def calculate_prior_df( estimator = NBEstimator(model, dispersion=dispersion) estimator.train(maxit=250, tolerance=tolerance) - zerofit = (model.x < 1e-4) & (np.nan_to_num(model.location) < 1e-4) + fitted_model = estimator._model_container + loc = fitted_model.location + scale = fitted_model.scale + x = fitted_model.x + dloc = fitted_model.design_loc + + zerofit = (x < 1e-4) & (np.nan_to_num(loc) < 1e-4) if isinstance(zerofit, dask.array.core.Array): zerofit = zerofit.compute() # shape (obs, features) - dloc = model.design_loc - if isinstance(model.design_loc, dask.array.core.Array): + if isinstance(dloc, dask.array.core.Array): dloc = dloc.compute() df_residual = resid_df(zerofit, dloc) # Empirical Bayes squeezing of the quasi-likelihood variance factors - x = model.x - if isinstance(model.x, dask.array.core.Array): + if isinstance(x, dask.array.core.Array): x = x.compute() - loc = model.location - if isinstance(model.location, dask.array.core.Array): + if isinstance(loc, dask.array.core.Array): loc = loc.compute() - scale = model.scale - if isinstance(model.scale, dask.array.core.Array): + if isinstance(scale, dask.array.core.Array): scale = scale.compute() with np.errstate(divide="ignore"): diff --git a/batchglm/external/edgeR/qr_decomposition.py b/batchglm/external/edgeR/qr_decomposition.py index 63b5f0f4..d26b375e 100644 --- a/batchglm/external/edgeR/qr_decomposition.py +++ b/batchglm/external/edgeR/qr_decomposition.py @@ -2,7 +2,7 @@ import numpy as np from scipy.linalg.lapack import dgeqrf, dormqr, dtrtrs -from .external import _ModelGLM +from .external import ModelGLM class QRDecomposition: @@ -61,7 +61,7 @@ def solve(self, y): raise RuntimeError("failed to solve the triangular system") -def get_levenberg_start(model: _ModelGLM, disp: np.ndarray, use_null: bool): +def get_levenberg_start(model: ModelGLM, disp: np.ndarray, use_null: bool): """ Parameter initialisation of location parameters using QR decomposition. This method is a python version of the C++ code in edgeR. From dd63af45aecb52d943fcde92d108d0e882ff92b3 Mon Sep 17 00:00:00 2001 From: picciama Date: Sat, 8 Oct 2022 10:16:14 +0200 Subject: [PATCH 58/58] added missing import and init file --- batchglm/external/edgeR/__init__.py | 0 batchglm/train/base/external.py | 1 + 2 files changed, 1 insertion(+) create mode 100644 batchglm/external/edgeR/__init__.py create mode 100644 batchglm/train/base/external.py diff --git a/batchglm/external/edgeR/__init__.py b/batchglm/external/edgeR/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/batchglm/train/base/external.py b/batchglm/train/base/external.py new file mode 100644 index 00000000..1f38c7e5 --- /dev/null +++ b/batchglm/train/base/external.py @@ -0,0 +1 @@ +from batchglm.models.base_glm import ModelGLM