diff --git a/.github/workflows/docs-tests.yml b/.github/workflows/docs-tests.yml index c0d6bc0..67d1372 100644 --- a/.github/workflows/docs-tests.yml +++ b/.github/workflows/docs-tests.yml @@ -22,16 +22,16 @@ jobs: env: OS: ubuntu-latest - PYTHON-VERSION: "3.9" + PYTHON-VERSION: "3.13" runs-on: ubuntu-latest steps: - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.13" - name: Install Pandoc run: sudo apt-get install pandoc @@ -40,7 +40,7 @@ jobs: run: python -m pip install --upgrade pip - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get pip cache location id: pip-cache @@ -57,10 +57,10 @@ jobs: print(f'text={now.year}/{now.month}-part{1 + now.day // 8}')" >> $GITHUB_OUTPUT - name: Load pip cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ steps.pip-cache.outputs.dir }} - key: doctests-${{ runner.os }}-Python3.9-${{ steps.date.outputs.text }}-pip-${{ hashFiles('**/setup.py', './.github/workflows/unit-tests.yml') }} + key: doctests-${{ runner.os }}-Python3.13-${{ steps.date.outputs.text }}-pip-${{ hashFiles('**/setup.py', './.github/workflows/unit-tests.yml') }} - name: Install spellcheck library run: sudo apt-get install libenchant-2-2 diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index 1cffbc1..c8618c0 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@master - name: Set up Python 3.9 - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.9 diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 2fbb73e..7f56fa4 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -21,7 +21,7 @@ jobs: fail-fast: false matrix: os: [windows-latest, ubuntu-latest, macos-latest] - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] env: OS: ${{ matrix.os }} diff --git a/.gitignore b/.gitignore index 1224580..128b34b 100644 --- a/.gitignore +++ b/.gitignore @@ -179,6 +179,7 @@ cython_debug/ .idea/**/usage.statistics.xml .idea/**/dictionaries .idea/**/shelf +.vscode/* # Generated files .idea/**/contentModel.xml @@ -244,3 +245,6 @@ fabric.properties # Test files *_scratch.ipynb + +# Backup files +*.bak \ No newline at end of file diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 6a7ad9c..03701c1 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -7,9 +7,9 @@ version: 2 # Set the version of Python and other tools you might need build: - os: ubuntu-20.04 + os: ubuntu-24.04 tools: - python: "3.8" + python: "3.13" # Build documentation in the docs/ directory with Sphinx sphinx: diff --git a/docs/conf.py b/docs/conf.py index 4b2daa0..529c8c7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,18 +13,35 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # -import pkg_resources +import codecs +import os.path +import sys + +# go up a dir and include that guy = +p = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) +sys.path.insert(0, p) -import endaq # -- Project information ----------------------------------------------------- +def get_version(rel_path): + """ Read the version number directly from the source. """ + with codecs.open(rel_path, 'r') as fp: + for line in fp: + if line.startswith('__version__'): + delim = '"' if '"' in line else "'" + return line.split(delim)[1] + else: + raise RuntimeError("Unable to find version string.") + + project = 'enDAQ' copyright = '2021, Mide Technology Corp.' author = '' # The full version, including alpha/beta/rc tags -release = pkg_resources.get_distribution("endaq").version +release = get_version(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'endaq', '__init__.py'))) + # The short X.Y version version = '.'.join(release.split(".")[:2]) @@ -44,6 +61,7 @@ 'sphinx.ext.ifconfig', 'sphinx.ext.githubpages', 'sphinx_plotly_directive', + 'sphinxcontrib.spelling', 'nbsphinx', ] @@ -64,7 +82,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -100,7 +118,9 @@ "github_url": "https://github.com/MideTechnology/endaq-python", "twitter_url": "https://twitter.com/enDAQ_sensors", "collapse_navigation": True, - "google_analytics_id": "G-E9QXH4H5LP", + "analytics": { + "google_analytics_id": "G-E9QXH4H5LP", + } } # Add any paths that contain custom static files (such as style sheets) here, diff --git a/docs/endaq/ide_usage.rst b/docs/endaq/ide_usage.rst index 324392e..7099c83 100644 --- a/docs/endaq/ide_usage.rst +++ b/docs/endaq/ide_usage.rst @@ -37,6 +37,68 @@ parameters: doc3 = get_doc("tests/test.ide", start="5s", end="10s") +Accessing measurement data in a Dataset/IDE file +------------------------------------------------ +An enDAQ device consists of many different sensors, and enDAQ devices record their measurement data into separate +Channels that correspond to the sensor taking the measurement. This is done because each Channel samples at a different +rate, so while Channel 59 (the Control Pad Pressure/Temperature/Humidity sensor) samples at 10 Hz, Channel 8 (the main +analog accelerometer channel) may sample at 20000 Hz. Channels themselves consist of different subchannels, which may be +different axes (X, Y, Z) or completely different measurements like temperature and pressure. All subchannels in a +channel are sampled at approximately the same time. + +The Channel data are stored in the ``channels`` property of a Dataset, which is returned from the :py:func:`~endaq.ide.get_doc() function. The easiest way to access this is to convert it to a +Pandas DataFrame using :py:func:`~endaq.ide.to_pandas(doc)`. Visit `our internal documentation `_ +for some quick tips on Pandas, or go `straight to the source `_. + +.. code:: python3 + + import endaq.ide + # Read in a doc + doc2 = endaq.ide.get_doc("https://drive.google.com/file/d/1t3JqbZGhuZbIK9agH24YZIdVE26-NOF5/view?usp=sharing") + # List the available Channels + print(f"{doc2.channels=}") + # Convert the Control Pad Pressure/Temperature/Humidity Channel (Channel 59) to a Pandas DataFrame + control_pad_data = endaq.ide.to_pandas(doc2.channels[59]) + # Print the subchannel names + print(f"{control_pad_data.columns=}") + # Print the max and min temperatures seen + print(f"Max Temp={control_pad_data['Control Pad Temperature'].max()}, Min Temp={control_pad_data['Control Pad Temperature'].min()}") + +The output of the above code is: + +.. code-block:: + + doc2.channels={32: , 80: , 36: , 70: , 59: , 76: } + control_pad_data.columns=Index(['Control Pad Pressure', 'Control Pad Temperature'], dtype='object') + Max Temp=24.899999618530273, Min Temp=24.260000228881836 + +Note that by default, :py:func:`~endaq.ide.to_pandas(doc)` uses ``datetime`` for the index format, meaning the +measurements are accessed based on the absolute time they were recorded. Users often prefer to access the data using +``timedelta``, the amount of time since the recording started. Using this, to get the duration of the Control Pad data +and the average of the first 5 seconds, we could use: + +.. code:: python3 + + import endaq.ide + import pandas as pd + # Read in a doc + doc2 = endaq.ide.get_doc("https://drive.google.com/file/d/1t3JqbZGhuZbIK9agH24YZIdVE26-NOF5/view?usp=sharing") + # Convert the Control Pad Pressure/Temperature/Humidity Channel (Channel 59) to a Pandas DataFrame + control_pad_data = endaq.ide.to_pandas(doc2.channels[59], time_mode='timedelta') + # Print the time duration + print(f"Duration={control_pad_data.index[-1]-control_pad_data.index[0]}") + # Print the mean of the first 5 seconds + print(f"{control_pad_data[pd.Timedelta(seconds=0):pd.Timedelta(seconds=5)].mean()}") + +The output of the above code is: + +.. code-block:: + + Duration=0 days 00:00:17.931518 + Control Pad Pressure 101728.414991 + Control Pad Temperature 24.607073 + dtype: float64 + Summarizing IDE files: :py:func:`endaq.ide.get_channel_table()` --------------------------------------------------------------- diff --git a/docs/requirements.txt b/docs/requirements.txt index baa9f31..4fa17c4 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,12 +1,13 @@ -Sphinx>=5.0.2 -sphinxcontrib-applehelp==1.0.2 -sphinxcontrib-devhelp==1.0.2 -sphinxcontrib-htmlhelp==2.0.0 +Sphinx>=8.1.3 +sphinxcontrib-applehelp==2.0.0 +sphinxcontrib-devhelp==2.0.0 +sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 -sphinxcontrib-qthelp==1.0.3 -sphinxcontrib-serializinghtml==1.1.5 +sphinxcontrib-qthelp==2.0.0 +sphinxcontrib-serializinghtml==2.0.0 +sphinxcontrib-spelling==8.0.1 -pydata-sphinx-theme==0.7.1 +pydata-sphinx-theme==0.16.1 sphinx-plotly-directive==0.1.3 -nbsphinx==0.8.8 -ipython==8.10 +nbsphinx==0.9.7 +ipython==9.2.0 diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index 69285ba..af58671 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -5,14 +5,29 @@ sinusoids centric enDAQ +Mide NumPy SciPy Pandas Plotly +Matplotlib +Colab +Jupyter +Seaborn Tukey Chebyshev Butterworth Fourier +Heatmaps +ide +calc +endaq +accel +str +psd +pkl +kth +GetDataBuilder basename pathname @@ -24,6 +39,7 @@ subchannel subchannels DataFrame dataframe +dataframes ndarray dropdown iterable @@ -36,6 +52,7 @@ periodogram periodograms spectrogram spectrograms +spectrums resample resampled resampling @@ -43,3 +60,5 @@ SNE integrations quaternion quaternions +Welch +kurtosis \ No newline at end of file diff --git a/docs/webinars/Webinar_Intro_Python_Acceleration_CSV_Analysis.ipynb b/docs/webinars/Webinar_Intro_Python_Acceleration_CSV_Analysis.ipynb index 03a172c..a073667 100644 --- a/docs/webinars/Webinar_Intro_Python_Acceleration_CSV_Analysis.ipynb +++ b/docs/webinars/Webinar_Intro_Python_Acceleration_CSV_Analysis.ipynb @@ -14,7 +14,7 @@ "## Introduction\n", "This notebook serves as an introduction to Python for a mechanical engineer looking to plot and analyze some acceleration data in a CSV file. Being a Colab, this tool can freely be used without installing anything.\n", "\n", - "For more information on making the swith to Python see [enDAQ's blog, Why and How to Get Started in Python for a MATLAB User](https://blog.endaq.com/why-and-how-to-get-started-in-python-for-a-matlab-user).\n", + "For more information on making the switch to Python see [enDAQ's blog, Why and How to Get Started in Python for a MATLAB User](https://blog.endaq.com/why-and-how-to-get-started-in-python-for-a-matlab-user).\n", "\n", "This is part of our webinar series on Python for Mechanical Engineers:\n", "\n", @@ -1493,7 +1493,7 @@ }, "source": [ "### FFT from PSD\n", - "Here we can use the output of a PSD and convet it to a typical DFT. This has the benefit of allowing you to explicitely define the frequency bin width." + "Here we can use the output of a PSD and convert it to a typical DFT. This has the benefit of allowing you to explicitly define the frequency bin width." ] }, { @@ -1887,4 +1887,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb b/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb index 5459757..01ee202 100644 --- a/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb +++ b/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb @@ -14,7 +14,7 @@ "\n", "1. [Get Started with Python](https://colab.research.google.com/drive/1_pcGtgJleapV9tz5WfuRuqfWryjqhPHy#scrollTo=ikUJITDDIp19)\n", " * Blog: [Get Started with Python: Why and How Mechanical Engineers Should Make the Switch](https://blog.endaq.com/get-started-with-python-why-how-mechanical-engineers-should-make-the-switch)\n", - "2. **Introduction to Numpy & Pandas**\n", + "2. **Introduction to NumPy & Pandas**\n", " * [Watch Recording of This](https://info.endaq.com/pandas-and-numpy-for-data-analysis-webinar)\n", "3. [Introduction to Plotly](https://colab.research.google.com/drive/1pag2pKQQW5amWgRykAH8uMAPqHA2yUfU?usp=sharing)\n", "4. [Introduction of the enDAQ Library](https://colab.research.google.com/drive/1WAtQ8JJC_ny0fki7eUABACMA-isZzKB6)\n", @@ -505,7 +505,7 @@ "id": "cv0nw1SnLb5x" }, "source": [ - "Logspace is the equivalent of rasing a base by a linspaced array." + "Logspace is the equivalent of raising a base by a linspaced array." ] }, { @@ -7821,7 +7821,7 @@ "source": [ "### Installation\n", "\n", - "The code is live on [GitHub](https://github.com/MideTechnology/endaq-python), [PyPI](https://pypi.org/project/endaq/), and cleaner documentation is in process that will eventually live on a subdomain of endaq.com.\n", + "The code is live on [GitHub](https://github.com/MideTechnology/endaq-python), [PyPI](https://pypi.org/project/endaq/), and cleaner documentation lives at [docs.endaq.com](https://docs.endaq.com/en/latest/).\n", "\n", "It can easily be installed with pip." ] diff --git a/docs/webinars/Webinar_Introduction_Plotly.ipynb b/docs/webinars/Webinar_Introduction_Plotly.ipynb index 0ca2356..735c3a6 100644 --- a/docs/webinars/Webinar_Introduction_Plotly.ipynb +++ b/docs/webinars/Webinar_Introduction_Plotly.ipynb @@ -345,7 +345,7 @@ }, "source": [ "### [ggplot](https://plotnine.readthedocs.io/en/stable/)\n", - "Introduces a \"grammer of graphics\" logic to plotting data which allows explicit mapping of data to the visual representation. This is something plotly express excels at." + "Introduces a \"grammar of graphics\" logic to plotting data which allows explicit mapping of data to the visual representation. This is something plotly express excels at." ] }, { @@ -1304,7 +1304,7 @@ "id": "u_pZeZ5An8PG" }, "source": [ - "Now let's get crazy and customize all the \"common\" settings. But note that there are a LOT of different parameters that can be explicitely defined. Remember, Plotly has very thorough documentation, so check it out!\n", + "Now let's get crazy and customize all the \"common\" settings. But note that there are a LOT of different parameters that can be explicitly defined. Remember, Plotly has very thorough documentation, so check it out!\n", "* [Figure Layout](https://plotly.com/python/reference/layout/)\n", "* [X Axis](https://plotly.com/python/reference/layout/xaxis/)\n", "* [Y Axis](https://plotly.com/python/reference/layout/yaxis/)\n", diff --git a/docs/webinars/Webinar_enDAQ_Custom_Analysis.ipynb b/docs/webinars/Webinar_enDAQ_Custom_Analysis.ipynb index 1e583e4..2ce7614 100644 --- a/docs/webinars/Webinar_enDAQ_Custom_Analysis.ipynb +++ b/docs/webinars/Webinar_enDAQ_Custom_Analysis.ipynb @@ -17,7 +17,7 @@ "\n", "1. [Get Started with Python](https://colab.research.google.com/drive/1_pcGtgJleapV9tz5WfuRuqfWryjqhPHy#scrollTo=ikUJITDDIp19)\n", " * Blog: [Get Started with Python: Why and How Mechanical Engineers Should Make the Switch](https://blog.endaq.com/get-started-with-python-why-how-mechanical-engineers-should-make-the-switch)\n", - "2. [Introduction to Numpy & Pandas for Data Analysis](https://colab.research.google.com/drive/1O-VwAdRoSlcrineAk0Jkd_fcw7mFGHa4#scrollTo=ce97q1ZcBiwj)\n", + "2. [Introduction to NumPy & Pandas for Data Analysis](https://colab.research.google.com/drive/1O-VwAdRoSlcrineAk0Jkd_fcw7mFGHa4#scrollTo=ce97q1ZcBiwj)\n", "3. [Introduction to Plotly for Plotting Data](https://colab.research.google.com/drive/1pag2pKQQW5amWgRykAH8uMAPqHA2yUfU)\n", "4. [Introduction of the enDAQ Library](https://colab.research.google.com/drive/1WAtQ8JJC_ny0fki7eUABACMA-isZzKB6)\n", " - There are lots of examples in this!\n", @@ -1082,7 +1082,7 @@ "id": "env80d2NrsQ8" }, "source": [ - "Now we will use the [FFTW algorithm](http://www.fftw.org/) which is available in the [pyFFTW library](https://hgomersall.github.io/pyFFTW/index.html) under a GPL license (which makes it potentially difficult for us to use because we use the more premissive MIT license).\n", + "Now we will use the [FFTW algorithm](http://www.fftw.org/) which is available in the [pyFFTW library](https://hgomersall.github.io/pyFFTW/index.html) under a GPL license (which makes it potentially difficult for us to use because we use the more permissive MIT license).\n", "\n", "First let's download it." ] @@ -1117,7 +1117,7 @@ "id": "M70BmBjKsRvV" }, "source": [ - "Now let's use it in a function which allows for a drop-in replacement to the Numpy code. This algorithm is generally regarded as the fatest for computing discrete Fourier transforms - so we'll put it to the test!" + "Now let's use it in a function which allows for a drop-in replacement to the Numpy code. This algorithm is generally regarded as the fastest for computing discrete Fourier transforms - so we'll put it to the test!" ] }, { @@ -1585,7 +1585,7 @@ "id": "5bXza1s0Ep-Q" }, "source": [ - "So what does this mean!? FFTW is the fastest as expected, but only if we first structure the data in a more efficient way. But typically you will not have the data structured in this \"optimal\" way for FFTW which means the time it takes to restucture it is real.\n", + "So what does this mean!? FFTW is the fastest as expected, but only if we first structure the data in a more efficient way. But typically you will not have the data structured in this \"optimal\" way for FFTW which means the time it takes to restructure it is real.\n", "\n", "Long story short for *this audience*, using Welch's method is fastest!" ] @@ -4466,7 +4466,7 @@ "\n", "This is currently available, [see docs](https://docs.endaq.com/en/latest/endaq/batch.html), but we are working on a few bug fixes and improved functionality. This module allows you to batch process many *IDE* (only works for our sensors for now).\n", "\n", - "In a seperate document I first executed the following code to gather all the .IDE files I wanted to analyze (I hide the actual folder name).\n", + "In a separate document I first executed the following code to gather all the .IDE files I wanted to analyze (I hide the actual folder name).\n", "~~~python\n", "import glob\n", "directory = r\"C:\\Users\\shanly\\enDAQ-Notebooks\\...\"+\"\\\\\"\n", @@ -5450,7 +5450,7 @@ "id": "iFv3nr5DPBhj" }, "source": [ - "That's pretty cool! But we'll notice that the animation moves outside the inital bounds pretty quickly. So let's first find an easy way to calculate these metrics of max/min/median per frequency bin." + "That's pretty cool! But we'll notice that the animation moves outside the initial bounds pretty quickly. So let's first find an easy way to calculate these metrics of max/min/median per frequency bin." ] }, { diff --git a/endaq/calc/rotation.py b/endaq/calc/rotation.py index e3117ca..abb19e8 100644 --- a/endaq/calc/rotation.py +++ b/endaq/calc/rotation.py @@ -31,7 +31,7 @@ def _validate_euler_mode(mode: str) -> Tuple[str, List[str]]: if not set(_mode).issubset({'x', 'y', 'z'}): raise ValueError(f'Modes other than xyz (such as xyz, xyx, zxz) must ' f'separated with one of " ", "-" or "_". Mode ' - f'{mode} is not a valid euler angle mode.') + f'{mode} is not a valid Euler angle mode.') mode_list = list(_mode) if not ( @@ -63,18 +63,18 @@ def _validate_euler_mode(mode: str) -> Tuple[str, List[str]]: def quaternion_to_euler(df: pd.DataFrame, mode: str = 'x-y-z') -> pd.DataFrame: """ - Convert quaternion data in the dataframe ``df`` to euler angles. This can + Convert quaternion data in the dataframe ``df`` to Euler angles. This can be done with either intrinsic or extrinsic rotations, determined automatically based on ``mode``. - :param df: The input quaternions to convert. Must have columns labelled + :param df: The input quaternions to convert. Must have columns labeled 'X', 'Y', 'Z', and 'W'. :param mode: The order of the axes to rotate. The default is intrinsic rotation about x-y-z. - :return: A dataframe with the euler-angles of the quaternion data. + :return: A dataframe with the Euler-angles of the quaternion data. .. seealso:: - - `SciPy's documentation on converting into euler angles `_ + - `SciPy's documentation on converting into Euler angles `_ - `Wikipedia's article on Euler angles `_ """ diff --git a/endaq/plot/plots.py b/endaq/plot/plots.py index 69afcad..7996570 100644 --- a/endaq/plot/plots.py +++ b/endaq/plot/plots.py @@ -259,11 +259,9 @@ def gen_map(df_map: pd.DataFrame, (defaults to ground speed). :param df_map: The pandas dataframe containing the recording data. - :param mapbox_access_token: The access token (or API key) needed to be able to plot against a map using Mapbox, - `create a free account here `_ - - * If no access token is provided, a `"stamen-terrain"` tile will be used, - `see Plotly for more information `_ + :param mapbox_access_token: Deprecated, the access token is no longer needed, plots are now made through MapLibre, + `to learn more, see `_ + `see Plotly for more information `_ :param lat: The dataframe column title to use for latitude :param lon: The dataframe column title to use for longitude :param color_by_column: The dataframe column title to color the plotted points by. @@ -321,22 +319,17 @@ def gen_map(df_map: pd.DataFrame, zoom = determine_plotly_map_zoom(lats=df_map[lat], lons=df_map[lon]) center = get_center_of_coordinates(lats=df_map[lat], lons=df_map[lon]) - px.set_mapbox_access_token(mapbox_access_token) - - fig = px.scatter_mapbox( + fig = px.scatter_map( df_map, lat=lat, lon=lon, color=color_by_column, hover_data=hover_data, size_max=size_max, - zoom=zoom + zoom_offset, + zoom=int(zoom + zoom_offset), center=center, ) - if mapbox_access_token is None: - fig.update_layout(mapbox_style="stamen-terrain") - return fig.update_layout(margin={"r": 20, "t": 20, "l": 20, "b": 0}) @@ -658,7 +651,7 @@ def spectrum_over_time( * `Peak`: per timestamp the peak frequency is determined and plotted against time * `Lines`: the value in each frequency bin is plotted against time :param var_column: the column name in the dataframe that defines the different variables, default is `"variable"` - :param var_to_process: the variable value in the `var_column` to filter the input df down to, + :param var_to_process: the variable value in the `var_column` to filter the input `df` down to, if none is provided (the default) this function will filter to the first value :param time_column: the column name in the dataframe that defines the timestamps, default is `"timestamp"` :param freq_column: the column name in the dataframe that defines the frequency, default is `"frequency (Hz)"` diff --git a/endaq/plot/utilities.py b/endaq/plot/utilities.py index babf47f..f1e62a3 100644 --- a/endaq/plot/utilities.py +++ b/endaq/plot/utilities.py @@ -4,7 +4,8 @@ import plotly.graph_objects as go import numpy as np import typing -from typing import Union +from typing import Union, Optional +import copy def define_theme(template_name: str = "endaq_cloud", default_plotly_template: str = 'plotly_dark', @@ -44,7 +45,7 @@ def define_theme(template_name: str = "endaq_cloud", default_plotly_template: st pio.templates[template_name]['layout']['colorscale']['diverging'] = [[0.0, '#6914F0'], [0.5, '#f7f7f7'], [1.0, '#EE7F27']] - plot_types = ['contour', 'heatmap', 'heatmapgl', 'histogram2d', 'histogram2dcontour', 'surface'] + plot_types = ['contour', 'heatmap', 'histogram2d', 'histogram2dcontour', 'surface'] for p in plot_types: pio.templates[template_name]['data'][p][0].colorscale = colorbar @@ -151,8 +152,8 @@ def get_center_of_coordinates(lats: np.ndarray, lons: np.ndarray, as_list: bool on the formatting of this return value """ # Create Copy to Not Change Source Data - lats = np.copy(lats) - lons = np.copy(lons) + lats = copy.deepcopy(lats) + lons = copy.deepcopy(lons) # Convert coordinates to radians if given in degrees if as_degrees: @@ -197,7 +198,7 @@ def determine_plotly_map_zoom( margin: float = 1.2, ) -> float: """ - Finds optimal zoom for a plotly mapbox. Must be passed (``lons`` & ``lats``) or ``lonlats``. + Finds optimal zoom for a plotly map. Must be passed (``lons`` & ``lats``) or ``lonlats``. Originally based on the following post: https://stackoverflow.com/questions/63787612/plotly-automatic-zooming-for-mapbox-maps diff --git a/setup.py b/setup.py index a19b21b..48bda63 100644 --- a/setup.py +++ b/setup.py @@ -65,14 +65,14 @@ def get_version(rel_path): long_description_content_type='text/markdown', url='https://github.com/MideTechnology/endaq-python', license='MIT', - classifiers=['Development Status :: 4 - Beta', + classifiers=['Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', 'Topic :: Scientific/Engineering', ], keywords='ebml binary ide mide endaq', diff --git a/tests/plot/test_plots.py b/tests/plot/test_plots.py index 6cf0870..60f5d4b 100644 --- a/tests/plot/test_plots.py +++ b/tests/plot/test_plots.py @@ -106,7 +106,7 @@ def test_map(): lon="Longitude", color_by_column="Ground Speed" ) - assert fig['data'][0]['subplot'] == 'mapbox' + assert fig['data'][0]['subplot'] == 'map' def test_table_plot(generate_dataframe):