From f40c615b07ee66effa933f36a4015899b2c0806f Mon Sep 17 00:00:00 2001 From: Pete Scheidler Date: Fri, 23 May 2025 23:52:26 -0400 Subject: [PATCH 1/9] Updating for python 3.13. Includes updates for plotly 6 to remove heatmapgl and update mapbox to map, also fixed GPS plotting issue --- .github/workflows/unit-tests.yml | 2 +- endaq/plot/plots.py | 17 +++++------------ endaq/plot/utilities.py | 11 ++++++----- setup.py | 2 +- tests/plot/test_plots.py | 2 +- 5 files changed, 14 insertions(+), 20 deletions(-) diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 2fbb73e7..7f56fa44 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -21,7 +21,7 @@ jobs: fail-fast: false matrix: os: [windows-latest, ubuntu-latest, macos-latest] - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] env: OS: ${{ matrix.os }} diff --git a/endaq/plot/plots.py b/endaq/plot/plots.py index 69afcadc..ff1324a8 100644 --- a/endaq/plot/plots.py +++ b/endaq/plot/plots.py @@ -259,11 +259,9 @@ def gen_map(df_map: pd.DataFrame, (defaults to ground speed). :param df_map: The pandas dataframe containing the recording data. - :param mapbox_access_token: The access token (or API key) needed to be able to plot against a map using Mapbox, - `create a free account here `_ - - * If no access token is provided, a `"stamen-terrain"` tile will be used, - `see Plotly for more information `_ + :param mapbox_access_token: Deprecated, the access token is nolonger needed, plots are now made through MapLibre, + `to learn more, see `_ + `see Plotly for more information `_ :param lat: The dataframe column title to use for latitude :param lon: The dataframe column title to use for longitude :param color_by_column: The dataframe column title to color the plotted points by. @@ -321,22 +319,17 @@ def gen_map(df_map: pd.DataFrame, zoom = determine_plotly_map_zoom(lats=df_map[lat], lons=df_map[lon]) center = get_center_of_coordinates(lats=df_map[lat], lons=df_map[lon]) - px.set_mapbox_access_token(mapbox_access_token) - - fig = px.scatter_mapbox( + fig = px.scatter_map( df_map, lat=lat, lon=lon, color=color_by_column, hover_data=hover_data, size_max=size_max, - zoom=zoom + zoom_offset, + zoom=int(zoom + zoom_offset), center=center, ) - if mapbox_access_token is None: - fig.update_layout(mapbox_style="stamen-terrain") - return fig.update_layout(margin={"r": 20, "t": 20, "l": 20, "b": 0}) diff --git a/endaq/plot/utilities.py b/endaq/plot/utilities.py index babf47f4..f1e62a3c 100644 --- a/endaq/plot/utilities.py +++ b/endaq/plot/utilities.py @@ -4,7 +4,8 @@ import plotly.graph_objects as go import numpy as np import typing -from typing import Union +from typing import Union, Optional +import copy def define_theme(template_name: str = "endaq_cloud", default_plotly_template: str = 'plotly_dark', @@ -44,7 +45,7 @@ def define_theme(template_name: str = "endaq_cloud", default_plotly_template: st pio.templates[template_name]['layout']['colorscale']['diverging'] = [[0.0, '#6914F0'], [0.5, '#f7f7f7'], [1.0, '#EE7F27']] - plot_types = ['contour', 'heatmap', 'heatmapgl', 'histogram2d', 'histogram2dcontour', 'surface'] + plot_types = ['contour', 'heatmap', 'histogram2d', 'histogram2dcontour', 'surface'] for p in plot_types: pio.templates[template_name]['data'][p][0].colorscale = colorbar @@ -151,8 +152,8 @@ def get_center_of_coordinates(lats: np.ndarray, lons: np.ndarray, as_list: bool on the formatting of this return value """ # Create Copy to Not Change Source Data - lats = np.copy(lats) - lons = np.copy(lons) + lats = copy.deepcopy(lats) + lons = copy.deepcopy(lons) # Convert coordinates to radians if given in degrees if as_degrees: @@ -197,7 +198,7 @@ def determine_plotly_map_zoom( margin: float = 1.2, ) -> float: """ - Finds optimal zoom for a plotly mapbox. Must be passed (``lons`` & ``lats``) or ``lonlats``. + Finds optimal zoom for a plotly map. Must be passed (``lons`` & ``lats``) or ``lonlats``. Originally based on the following post: https://stackoverflow.com/questions/63787612/plotly-automatic-zooming-for-mapbox-maps diff --git a/setup.py b/setup.py index a19b21ba..7227ca3f 100644 --- a/setup.py +++ b/setup.py @@ -68,11 +68,11 @@ def get_version(rel_path): classifiers=['Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', - 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', 'Topic :: Scientific/Engineering', ], keywords='ebml binary ide mide endaq', diff --git a/tests/plot/test_plots.py b/tests/plot/test_plots.py index 6cf08705..60f5d4b7 100644 --- a/tests/plot/test_plots.py +++ b/tests/plot/test_plots.py @@ -106,7 +106,7 @@ def test_map(): lon="Longitude", color_by_column="Ground Speed" ) - assert fig['data'][0]['subplot'] == 'mapbox' + assert fig['data'][0]['subplot'] == 'map' def test_table_plot(generate_dataframe): From 80ddb725cfa66562ecde4e5a57cbbbde7385f255 Mon Sep 17 00:00:00 2001 From: Pete Scheidler Date: Sat, 24 May 2025 10:43:42 -0400 Subject: [PATCH 2/9] replacing pkg_resource, updating github action versions, adding debug for xmlrpc error --- .github/workflows/docs-tests.yml | 13 ++++++++++--- docs/conf.py | 4 ++-- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.github/workflows/docs-tests.yml b/.github/workflows/docs-tests.yml index c0d6bc03..fe780f2a 100644 --- a/.github/workflows/docs-tests.yml +++ b/.github/workflows/docs-tests.yml @@ -29,7 +29,7 @@ jobs: steps: - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.9" @@ -40,7 +40,7 @@ jobs: run: python -m pip install --upgrade pip - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get pip cache location id: pip-cache @@ -57,7 +57,7 @@ jobs: print(f'text={now.year}/{now.month}-part{1 + now.day // 8}')" >> $GITHUB_OUTPUT - name: Load pip cache - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ${{ steps.pip-cache.outputs.dir }} key: doctests-${{ runner.os }}-Python3.9-${{ steps.date.outputs.text }}-pip-${{ hashFiles('**/setup.py', './.github/workflows/unit-tests.yml') }} @@ -76,3 +76,10 @@ jobs: - name: Run docs spell checker run: sphinx-build -b spelling docs docs/_spellcheck + + - name: print debug info + if: ${{ always() }} + run: + ls /tmp/ + ls ./tmp/ + cat /tmp/sphinx-err-975cxlqc.log diff --git a/docs/conf.py b/docs/conf.py index 4b2daa09..8e0b5f5d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,7 +13,7 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # -import pkg_resources +from importlib import metadata import endaq @@ -24,7 +24,7 @@ author = '' # The full version, including alpha/beta/rc tags -release = pkg_resources.get_distribution("endaq").version +release = metadata.version("endaq") # The short X.Y version version = '.'.join(release.split(".")[:2]) From bdd95b560ca20636d1e9c63efef732b6066f5cc7 Mon Sep 17 00:00:00 2001 From: Pete Scheidler Date: Sat, 24 May 2025 10:52:56 -0400 Subject: [PATCH 3/9] Working on xmlrpc issue, disabling unit tests for a moment --- .github/workflows/docs-tests.yml | 9 ++++++--- .github/workflows/unit-tests.yml | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/docs-tests.yml b/.github/workflows/docs-tests.yml index fe780f2a..07991e86 100644 --- a/.github/workflows/docs-tests.yml +++ b/.github/workflows/docs-tests.yml @@ -79,7 +79,10 @@ jobs: - name: print debug info if: ${{ always() }} - run: + run: | ls /tmp/ - ls ./tmp/ - cat /tmp/sphinx-err-975cxlqc.log + for file in /tmp/*.log; do + echo "---- Contents of $file ----" + cat "$file" + echo "" # extra newline between files + done diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 7f56fa44..4f244eda 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -5,11 +5,11 @@ on: push: branches: - main - - development +# - development pull_request: branches: - main - - development +# - development workflow_dispatch: From a3f92217f395c9eae83ac8c30fe810f4ac658ef2 Mon Sep 17 00:00:00 2001 From: Pete Scheidler Date: Sat, 24 May 2025 17:06:07 -0400 Subject: [PATCH 4/9] Updated the doc tests to run on Python3.13, fixed a bunch of typos. Tested locally, which doesn't mean much --- .readthedocs.yaml | 4 ++-- docs/conf.py | 15 +++++++++++++-- docs/requirements.txt | 19 ++++++++++--------- docs/spelling_wordlist.txt | 19 +++++++++++++++++++ ...tro_Python_Acceleration_CSV_Analysis.ipynb | 6 +++--- ...ebinar_Introduction_NumPy_and_Pandas.ipynb | 4 ++-- .../Webinar_Introduction_Plotly.ipynb | 4 ++-- .../Webinar_enDAQ_Custom_Analysis.ipynb | 12 ++++++------ endaq/calc/rotation.py | 10 +++++----- endaq/plot/plots.py | 4 ++-- 10 files changed, 64 insertions(+), 33 deletions(-) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 6a7ad9ce..03701c14 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -7,9 +7,9 @@ version: 2 # Set the version of Python and other tools you might need build: - os: ubuntu-20.04 + os: ubuntu-24.04 tools: - python: "3.8" + python: "3.13" # Build documentation in the docs/ directory with Sphinx sphinx: diff --git a/docs/conf.py b/docs/conf.py index 8e0b5f5d..8a5f671d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,6 +13,15 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # +import sys +print(f"{sys.path=}") +print(f"{__file__=}") +# go up a dir and include that guy = +p = "/".join(__file__.split("/")[:-2]) +sys.path.append(p) +print(f"{sys.path=}") + + from importlib import metadata import endaq @@ -24,7 +33,8 @@ author = '' # The full version, including alpha/beta/rc tags -release = metadata.version("endaq") +# release = metadata.version("endaq") +release = '1.5.3' # The short X.Y version version = '.'.join(release.split(".")[:2]) @@ -44,6 +54,7 @@ 'sphinx.ext.ifconfig', 'sphinx.ext.githubpages', 'sphinx_plotly_directive', + 'sphinxcontrib.spelling', 'nbsphinx', ] @@ -64,7 +75,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. diff --git a/docs/requirements.txt b/docs/requirements.txt index baa9f312..4fa17c41 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,12 +1,13 @@ -Sphinx>=5.0.2 -sphinxcontrib-applehelp==1.0.2 -sphinxcontrib-devhelp==1.0.2 -sphinxcontrib-htmlhelp==2.0.0 +Sphinx>=8.1.3 +sphinxcontrib-applehelp==2.0.0 +sphinxcontrib-devhelp==2.0.0 +sphinxcontrib-htmlhelp==2.1.0 sphinxcontrib-jsmath==1.0.1 -sphinxcontrib-qthelp==1.0.3 -sphinxcontrib-serializinghtml==1.1.5 +sphinxcontrib-qthelp==2.0.0 +sphinxcontrib-serializinghtml==2.0.0 +sphinxcontrib-spelling==8.0.1 -pydata-sphinx-theme==0.7.1 +pydata-sphinx-theme==0.16.1 sphinx-plotly-directive==0.1.3 -nbsphinx==0.8.8 -ipython==8.10 +nbsphinx==0.9.7 +ipython==9.2.0 diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt index 69285ba1..af58671c 100644 --- a/docs/spelling_wordlist.txt +++ b/docs/spelling_wordlist.txt @@ -5,14 +5,29 @@ sinusoids centric enDAQ +Mide NumPy SciPy Pandas Plotly +Matplotlib +Colab +Jupyter +Seaborn Tukey Chebyshev Butterworth Fourier +Heatmaps +ide +calc +endaq +accel +str +psd +pkl +kth +GetDataBuilder basename pathname @@ -24,6 +39,7 @@ subchannel subchannels DataFrame dataframe +dataframes ndarray dropdown iterable @@ -36,6 +52,7 @@ periodogram periodograms spectrogram spectrograms +spectrums resample resampled resampling @@ -43,3 +60,5 @@ SNE integrations quaternion quaternions +Welch +kurtosis \ No newline at end of file diff --git a/docs/webinars/Webinar_Intro_Python_Acceleration_CSV_Analysis.ipynb b/docs/webinars/Webinar_Intro_Python_Acceleration_CSV_Analysis.ipynb index 03a172c1..a073667f 100644 --- a/docs/webinars/Webinar_Intro_Python_Acceleration_CSV_Analysis.ipynb +++ b/docs/webinars/Webinar_Intro_Python_Acceleration_CSV_Analysis.ipynb @@ -14,7 +14,7 @@ "## Introduction\n", "This notebook serves as an introduction to Python for a mechanical engineer looking to plot and analyze some acceleration data in a CSV file. Being a Colab, this tool can freely be used without installing anything.\n", "\n", - "For more information on making the swith to Python see [enDAQ's blog, Why and How to Get Started in Python for a MATLAB User](https://blog.endaq.com/why-and-how-to-get-started-in-python-for-a-matlab-user).\n", + "For more information on making the switch to Python see [enDAQ's blog, Why and How to Get Started in Python for a MATLAB User](https://blog.endaq.com/why-and-how-to-get-started-in-python-for-a-matlab-user).\n", "\n", "This is part of our webinar series on Python for Mechanical Engineers:\n", "\n", @@ -1493,7 +1493,7 @@ }, "source": [ "### FFT from PSD\n", - "Here we can use the output of a PSD and convet it to a typical DFT. This has the benefit of allowing you to explicitely define the frequency bin width." + "Here we can use the output of a PSD and convert it to a typical DFT. This has the benefit of allowing you to explicitly define the frequency bin width." ] }, { @@ -1887,4 +1887,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb b/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb index 54597578..7fc81291 100644 --- a/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb +++ b/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb @@ -14,7 +14,7 @@ "\n", "1. [Get Started with Python](https://colab.research.google.com/drive/1_pcGtgJleapV9tz5WfuRuqfWryjqhPHy#scrollTo=ikUJITDDIp19)\n", " * Blog: [Get Started with Python: Why and How Mechanical Engineers Should Make the Switch](https://blog.endaq.com/get-started-with-python-why-how-mechanical-engineers-should-make-the-switch)\n", - "2. **Introduction to Numpy & Pandas**\n", + "2. **Introduction to NumPy & Pandas**\n", " * [Watch Recording of This](https://info.endaq.com/pandas-and-numpy-for-data-analysis-webinar)\n", "3. [Introduction to Plotly](https://colab.research.google.com/drive/1pag2pKQQW5amWgRykAH8uMAPqHA2yUfU?usp=sharing)\n", "4. [Introduction of the enDAQ Library](https://colab.research.google.com/drive/1WAtQ8JJC_ny0fki7eUABACMA-isZzKB6)\n", @@ -505,7 +505,7 @@ "id": "cv0nw1SnLb5x" }, "source": [ - "Logspace is the equivalent of rasing a base by a linspaced array." + "Logspace is the equivalent of raising a base by a linspaced array." ] }, { diff --git a/docs/webinars/Webinar_Introduction_Plotly.ipynb b/docs/webinars/Webinar_Introduction_Plotly.ipynb index 0ca23569..735c3a61 100644 --- a/docs/webinars/Webinar_Introduction_Plotly.ipynb +++ b/docs/webinars/Webinar_Introduction_Plotly.ipynb @@ -345,7 +345,7 @@ }, "source": [ "### [ggplot](https://plotnine.readthedocs.io/en/stable/)\n", - "Introduces a \"grammer of graphics\" logic to plotting data which allows explicit mapping of data to the visual representation. This is something plotly express excels at." + "Introduces a \"grammar of graphics\" logic to plotting data which allows explicit mapping of data to the visual representation. This is something plotly express excels at." ] }, { @@ -1304,7 +1304,7 @@ "id": "u_pZeZ5An8PG" }, "source": [ - "Now let's get crazy and customize all the \"common\" settings. But note that there are a LOT of different parameters that can be explicitely defined. Remember, Plotly has very thorough documentation, so check it out!\n", + "Now let's get crazy and customize all the \"common\" settings. But note that there are a LOT of different parameters that can be explicitly defined. Remember, Plotly has very thorough documentation, so check it out!\n", "* [Figure Layout](https://plotly.com/python/reference/layout/)\n", "* [X Axis](https://plotly.com/python/reference/layout/xaxis/)\n", "* [Y Axis](https://plotly.com/python/reference/layout/yaxis/)\n", diff --git a/docs/webinars/Webinar_enDAQ_Custom_Analysis.ipynb b/docs/webinars/Webinar_enDAQ_Custom_Analysis.ipynb index 1e583e47..2ce76142 100644 --- a/docs/webinars/Webinar_enDAQ_Custom_Analysis.ipynb +++ b/docs/webinars/Webinar_enDAQ_Custom_Analysis.ipynb @@ -17,7 +17,7 @@ "\n", "1. [Get Started with Python](https://colab.research.google.com/drive/1_pcGtgJleapV9tz5WfuRuqfWryjqhPHy#scrollTo=ikUJITDDIp19)\n", " * Blog: [Get Started with Python: Why and How Mechanical Engineers Should Make the Switch](https://blog.endaq.com/get-started-with-python-why-how-mechanical-engineers-should-make-the-switch)\n", - "2. [Introduction to Numpy & Pandas for Data Analysis](https://colab.research.google.com/drive/1O-VwAdRoSlcrineAk0Jkd_fcw7mFGHa4#scrollTo=ce97q1ZcBiwj)\n", + "2. [Introduction to NumPy & Pandas for Data Analysis](https://colab.research.google.com/drive/1O-VwAdRoSlcrineAk0Jkd_fcw7mFGHa4#scrollTo=ce97q1ZcBiwj)\n", "3. [Introduction to Plotly for Plotting Data](https://colab.research.google.com/drive/1pag2pKQQW5amWgRykAH8uMAPqHA2yUfU)\n", "4. [Introduction of the enDAQ Library](https://colab.research.google.com/drive/1WAtQ8JJC_ny0fki7eUABACMA-isZzKB6)\n", " - There are lots of examples in this!\n", @@ -1082,7 +1082,7 @@ "id": "env80d2NrsQ8" }, "source": [ - "Now we will use the [FFTW algorithm](http://www.fftw.org/) which is available in the [pyFFTW library](https://hgomersall.github.io/pyFFTW/index.html) under a GPL license (which makes it potentially difficult for us to use because we use the more premissive MIT license).\n", + "Now we will use the [FFTW algorithm](http://www.fftw.org/) which is available in the [pyFFTW library](https://hgomersall.github.io/pyFFTW/index.html) under a GPL license (which makes it potentially difficult for us to use because we use the more permissive MIT license).\n", "\n", "First let's download it." ] @@ -1117,7 +1117,7 @@ "id": "M70BmBjKsRvV" }, "source": [ - "Now let's use it in a function which allows for a drop-in replacement to the Numpy code. This algorithm is generally regarded as the fatest for computing discrete Fourier transforms - so we'll put it to the test!" + "Now let's use it in a function which allows for a drop-in replacement to the Numpy code. This algorithm is generally regarded as the fastest for computing discrete Fourier transforms - so we'll put it to the test!" ] }, { @@ -1585,7 +1585,7 @@ "id": "5bXza1s0Ep-Q" }, "source": [ - "So what does this mean!? FFTW is the fastest as expected, but only if we first structure the data in a more efficient way. But typically you will not have the data structured in this \"optimal\" way for FFTW which means the time it takes to restucture it is real.\n", + "So what does this mean!? FFTW is the fastest as expected, but only if we first structure the data in a more efficient way. But typically you will not have the data structured in this \"optimal\" way for FFTW which means the time it takes to restructure it is real.\n", "\n", "Long story short for *this audience*, using Welch's method is fastest!" ] @@ -4466,7 +4466,7 @@ "\n", "This is currently available, [see docs](https://docs.endaq.com/en/latest/endaq/batch.html), but we are working on a few bug fixes and improved functionality. This module allows you to batch process many *IDE* (only works for our sensors for now).\n", "\n", - "In a seperate document I first executed the following code to gather all the .IDE files I wanted to analyze (I hide the actual folder name).\n", + "In a separate document I first executed the following code to gather all the .IDE files I wanted to analyze (I hide the actual folder name).\n", "~~~python\n", "import glob\n", "directory = r\"C:\\Users\\shanly\\enDAQ-Notebooks\\...\"+\"\\\\\"\n", @@ -5450,7 +5450,7 @@ "id": "iFv3nr5DPBhj" }, "source": [ - "That's pretty cool! But we'll notice that the animation moves outside the inital bounds pretty quickly. So let's first find an easy way to calculate these metrics of max/min/median per frequency bin." + "That's pretty cool! But we'll notice that the animation moves outside the initial bounds pretty quickly. So let's first find an easy way to calculate these metrics of max/min/median per frequency bin." ] }, { diff --git a/endaq/calc/rotation.py b/endaq/calc/rotation.py index e3117ca9..abb19e8b 100644 --- a/endaq/calc/rotation.py +++ b/endaq/calc/rotation.py @@ -31,7 +31,7 @@ def _validate_euler_mode(mode: str) -> Tuple[str, List[str]]: if not set(_mode).issubset({'x', 'y', 'z'}): raise ValueError(f'Modes other than xyz (such as xyz, xyx, zxz) must ' f'separated with one of " ", "-" or "_". Mode ' - f'{mode} is not a valid euler angle mode.') + f'{mode} is not a valid Euler angle mode.') mode_list = list(_mode) if not ( @@ -63,18 +63,18 @@ def _validate_euler_mode(mode: str) -> Tuple[str, List[str]]: def quaternion_to_euler(df: pd.DataFrame, mode: str = 'x-y-z') -> pd.DataFrame: """ - Convert quaternion data in the dataframe ``df`` to euler angles. This can + Convert quaternion data in the dataframe ``df`` to Euler angles. This can be done with either intrinsic or extrinsic rotations, determined automatically based on ``mode``. - :param df: The input quaternions to convert. Must have columns labelled + :param df: The input quaternions to convert. Must have columns labeled 'X', 'Y', 'Z', and 'W'. :param mode: The order of the axes to rotate. The default is intrinsic rotation about x-y-z. - :return: A dataframe with the euler-angles of the quaternion data. + :return: A dataframe with the Euler-angles of the quaternion data. .. seealso:: - - `SciPy's documentation on converting into euler angles `_ + - `SciPy's documentation on converting into Euler angles `_ - `Wikipedia's article on Euler angles `_ """ diff --git a/endaq/plot/plots.py b/endaq/plot/plots.py index ff1324a8..7996570e 100644 --- a/endaq/plot/plots.py +++ b/endaq/plot/plots.py @@ -259,7 +259,7 @@ def gen_map(df_map: pd.DataFrame, (defaults to ground speed). :param df_map: The pandas dataframe containing the recording data. - :param mapbox_access_token: Deprecated, the access token is nolonger needed, plots are now made through MapLibre, + :param mapbox_access_token: Deprecated, the access token is no longer needed, plots are now made through MapLibre, `to learn more, see `_ `see Plotly for more information `_ :param lat: The dataframe column title to use for latitude @@ -651,7 +651,7 @@ def spectrum_over_time( * `Peak`: per timestamp the peak frequency is determined and plotted against time * `Lines`: the value in each frequency bin is plotted against time :param var_column: the column name in the dataframe that defines the different variables, default is `"variable"` - :param var_to_process: the variable value in the `var_column` to filter the input df down to, + :param var_to_process: the variable value in the `var_column` to filter the input `df` down to, if none is provided (the default) this function will filter to the first value :param time_column: the column name in the dataframe that defines the timestamps, default is `"timestamp"` :param freq_column: the column name in the dataframe that defines the frequency, default is `"frequency (Hz)"` From 086d8a636d3fdd3727080c54c5caa459d6c2e6c0 Mon Sep 17 00:00:00 2001 From: Pete Scheidler Date: Sat, 24 May 2025 17:18:53 -0400 Subject: [PATCH 5/9] Missed a spot --- .github/workflows/docs-tests.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docs-tests.yml b/.github/workflows/docs-tests.yml index 07991e86..3c4bb28a 100644 --- a/.github/workflows/docs-tests.yml +++ b/.github/workflows/docs-tests.yml @@ -22,7 +22,7 @@ jobs: env: OS: ubuntu-latest - PYTHON-VERSION: "3.9" + PYTHON-VERSION: "3.13" runs-on: ubuntu-latest @@ -31,7 +31,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.13" - name: Install Pandoc run: sudo apt-get install pandoc @@ -60,7 +60,7 @@ jobs: uses: actions/cache@v4 with: path: ${{ steps.pip-cache.outputs.dir }} - key: doctests-${{ runner.os }}-Python3.9-${{ steps.date.outputs.text }}-pip-${{ hashFiles('**/setup.py', './.github/workflows/unit-tests.yml') }} + key: doctests-${{ runner.os }}-Python3.13-${{ steps.date.outputs.text }}-pip-${{ hashFiles('**/setup.py', './.github/workflows/unit-tests.yml') }} - name: Install spellcheck library run: sudo apt-get install libenchant-2-2 From 2fa413f2034514b7d4a30099165924e328565b58 Mon Sep 17 00:00:00 2001 From: Pete Scheidler Date: Sat, 24 May 2025 17:32:10 -0400 Subject: [PATCH 6/9] Removing the last of the debug stuff, setting project to Stable, adding .bak to gitignore --- .github/workflows/docs-tests.yml | 10 ---------- .github/workflows/publish-to-pypi.yml | 2 +- .github/workflows/unit-tests.yml | 4 ++-- .gitignore | 4 ++++ setup.py | 2 +- 5 files changed, 8 insertions(+), 14 deletions(-) diff --git a/.github/workflows/docs-tests.yml b/.github/workflows/docs-tests.yml index 3c4bb28a..67d1372f 100644 --- a/.github/workflows/docs-tests.yml +++ b/.github/workflows/docs-tests.yml @@ -76,13 +76,3 @@ jobs: - name: Run docs spell checker run: sphinx-build -b spelling docs docs/_spellcheck - - - name: print debug info - if: ${{ always() }} - run: | - ls /tmp/ - for file in /tmp/*.log; do - echo "---- Contents of $file ----" - cat "$file" - echo "" # extra newline between files - done diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index 1cffbc13..c8618c03 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@master - name: Set up Python 3.9 - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: 3.9 diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 4f244eda..7f56fa44 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -5,11 +5,11 @@ on: push: branches: - main -# - development + - development pull_request: branches: - main -# - development + - development workflow_dispatch: diff --git a/.gitignore b/.gitignore index 12245807..128b34b1 100644 --- a/.gitignore +++ b/.gitignore @@ -179,6 +179,7 @@ cython_debug/ .idea/**/usage.statistics.xml .idea/**/dictionaries .idea/**/shelf +.vscode/* # Generated files .idea/**/contentModel.xml @@ -244,3 +245,6 @@ fabric.properties # Test files *_scratch.ipynb + +# Backup files +*.bak \ No newline at end of file diff --git a/setup.py b/setup.py index 7227ca3f..48bda63e 100644 --- a/setup.py +++ b/setup.py @@ -65,7 +65,7 @@ def get_version(rel_path): long_description_content_type='text/markdown', url='https://github.com/MideTechnology/endaq-python', license='MIT', - classifiers=['Development Status :: 4 - Beta', + classifiers=['Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 3.9', From ffd097004d86be9d1baed4ef5b6168c5420146b7 Mon Sep 17 00:00:00 2001 From: Peter Scheidler Date: Mon, 26 May 2025 20:58:06 -0400 Subject: [PATCH 7/9] Updating documentation --- docs/endaq/ide_usage.rst | 62 +++++++++++++++++++ ...ebinar_Introduction_NumPy_and_Pandas.ipynb | 2 +- 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/docs/endaq/ide_usage.rst b/docs/endaq/ide_usage.rst index 324392ed..79ca0a5d 100644 --- a/docs/endaq/ide_usage.rst +++ b/docs/endaq/ide_usage.rst @@ -37,6 +37,68 @@ parameters: doc3 = get_doc("tests/test.ide", start="5s", end="10s") +Accessing measurement data in a doc +----------------------------------- +An enDAQ consists of many different sensors, and the enDAQ devices separate their measurement data into separate +Channels that correspond to the sensor taking the measurement. This is done because each Channel samples at a different +rate, so while Channel 59 (the Control Pad Pressure/Temperature/Humidity sensor) samples at 10 Hz, Channel 8 (the main +accelerometer channel) may sample at 20000 Hz. Channels themselves consist of different subchannels, which may be +different axes (X, Y, Z) or completely different measurements like temperature and pressire. All subchannels in a +channel are sampled at approximately the same time. + +The Channel data are stored in the ``channels`` property of a doc. The easiest way to access this is to convert it to a +Pandas DataFrame using :py:func:`~endaq.ide.to_pandas(doc)`. Visit `our internal documentation `_ +for some quick tips on Pandas, or go `straight to the source `_. + +.. code:: python3 + + import endaq.ide + # Read in a doc + doc2 = endaq.ide.get_doc("https://drive.google.com/file/d/1t3JqbZGhuZbIK9agH24YZIdVE26-NOF5/view?usp=sharing") + # List the available Channels + print(f"{doc2.channels=}") + # Convert the Control Pad Pressure/Temperature/Humidity Channel (Channel 59) to a Pandas DataFrame + control_pad_data = endaq.ide.to_pandas(doc2.channels[59]) + # Print the subchannel names + print(f"{control_pad_data.columns=}") + # Print the max and min temperatures seen + print(f"Max Temp={control_pad_data['Control Pad Temperature'].max()}, Min Temp={control_pad_data['Control Pad Temperature'].min()}") + +The output of the above code is: + +.. code-block:: + + doc2.channels={32: , 80: , 36: , 70: , 59: , 76: } + control_pad_data.columns=Index(['Control Pad Pressure', 'Control Pad Temperature'], dtype='object') + Max Temp=24.899999618530273, Min Temp=24.260000228881836 + +Note that by default, :py:func:`~endaq.ide.to_pandas(doc)` uses ``datetime`` for the index format, meaning the +measurements are accessed based on the absolute time they were recorded. Users often prefer to access the data using +``timedelta``, the amount of time since the recording started. Using this, to get the duration of the Control Pad data +and the average of the first 5 seconds, we could use: + +.. code:: python3 + + import endaq.ide + import pandas as pd + # Read in a doc + doc2 = endaq.ide.get_doc("https://drive.google.com/file/d/1t3JqbZGhuZbIK9agH24YZIdVE26-NOF5/view?usp=sharing") + # Convert the Control Pad Pressure/Temperature/Humidity Channel (Channel 59) to a Pandas DataFrame + control_pad_data = endaq.ide.to_pandas(doc2.channels[59], time_mode='timedelta') + # Print the time duration + print(f"Duration={control_pad_data.index[-1]-control_pad_data.index[0]}") + # Print the mean of the first 5 seconds + print(f"{control_pad_data[pd.Timedelta(seconds=0):pd.Timedelta(seconds=5)].mean()}") + +The output of the above code is: + +.. code-block:: + + Duration=0 days 00:00:17.931518 + Control Pad Pressure 101728.414991 + Control Pad Temperature 24.607073 + dtype: float64 + Summarizing IDE files: :py:func:`endaq.ide.get_channel_table()` --------------------------------------------------------------- diff --git a/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb b/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb index 7fc81291..01ee2025 100644 --- a/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb +++ b/docs/webinars/Webinar_Introduction_NumPy_and_Pandas.ipynb @@ -7821,7 +7821,7 @@ "source": [ "### Installation\n", "\n", - "The code is live on [GitHub](https://github.com/MideTechnology/endaq-python), [PyPI](https://pypi.org/project/endaq/), and cleaner documentation is in process that will eventually live on a subdomain of endaq.com.\n", + "The code is live on [GitHub](https://github.com/MideTechnology/endaq-python), [PyPI](https://pypi.org/project/endaq/), and cleaner documentation lives at [docs.endaq.com](https://docs.endaq.com/en/latest/).\n", "\n", "It can easily be installed with pip." ] From a9bd10b3876a8ac296338cee6eccb7cbc89c7897 Mon Sep 17 00:00:00 2001 From: stokesMIDE Date: Tue, 27 May 2025 12:37:01 -0400 Subject: [PATCH 8/9] Sphinx fixes, minor doc edits --- docs/conf.py | 33 +++++++++++++++++++++------------ docs/endaq/ide_usage.rst | 12 ++++++------ 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 8a5f671d..529c8c7d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,28 +13,35 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # +import codecs +import os.path import sys -print(f"{sys.path=}") -print(f"{__file__=}") -# go up a dir and include that guy = -p = "/".join(__file__.split("/")[:-2]) -sys.path.append(p) -print(f"{sys.path=}") +# go up a dir and include that guy = +p = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) +sys.path.insert(0, p) -from importlib import metadata - -import endaq # -- Project information ----------------------------------------------------- +def get_version(rel_path): + """ Read the version number directly from the source. """ + with codecs.open(rel_path, 'r') as fp: + for line in fp: + if line.startswith('__version__'): + delim = '"' if '"' in line else "'" + return line.split(delim)[1] + else: + raise RuntimeError("Unable to find version string.") + + project = 'enDAQ' copyright = '2021, Mide Technology Corp.' author = '' # The full version, including alpha/beta/rc tags -# release = metadata.version("endaq") -release = '1.5.3' +release = get_version(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'endaq', '__init__.py'))) + # The short X.Y version version = '.'.join(release.split(".")[:2]) @@ -111,7 +118,9 @@ "github_url": "https://github.com/MideTechnology/endaq-python", "twitter_url": "https://twitter.com/enDAQ_sensors", "collapse_navigation": True, - "google_analytics_id": "G-E9QXH4H5LP", + "analytics": { + "google_analytics_id": "G-E9QXH4H5LP", + } } # Add any paths that contain custom static files (such as style sheets) here, diff --git a/docs/endaq/ide_usage.rst b/docs/endaq/ide_usage.rst index 79ca0a5d..6f389278 100644 --- a/docs/endaq/ide_usage.rst +++ b/docs/endaq/ide_usage.rst @@ -37,16 +37,16 @@ parameters: doc3 = get_doc("tests/test.ide", start="5s", end="10s") -Accessing measurement data in a doc ------------------------------------ -An enDAQ consists of many different sensors, and the enDAQ devices separate their measurement data into separate +Accessing measurement data in a Dataset/IDE file +------------------------------------------------ +An enDAQ device consists of many different sensors, and enDAQ devices record their measurement data into separate Channels that correspond to the sensor taking the measurement. This is done because each Channel samples at a different rate, so while Channel 59 (the Control Pad Pressure/Temperature/Humidity sensor) samples at 10 Hz, Channel 8 (the main -accelerometer channel) may sample at 20000 Hz. Channels themselves consist of different subchannels, which may be -different axes (X, Y, Z) or completely different measurements like temperature and pressire. All subchannels in a +analog accelerometer channel) may sample at 20000 Hz. Channels themselves consist of different subchannels, which may be +different axes (X, Y, Z) or completely different measurements like temperature and pressure. All subchannels in a channel are sampled at approximately the same time. -The Channel data are stored in the ``channels`` property of a doc. The easiest way to access this is to convert it to a +The Channel data are stored in the ``channels`` property of a Dataset. The easiest way to access this is to convert it to a Pandas DataFrame using :py:func:`~endaq.ide.to_pandas(doc)`. Visit `our internal documentation `_ for some quick tips on Pandas, or go `straight to the source `_. From a0d0260bf21f2d174403b6229726cc744e374af8 Mon Sep 17 00:00:00 2001 From: Peter Scheidler Date: Wed, 28 May 2025 22:48:55 -0400 Subject: [PATCH 9/9] Update docs/endaq/ide_usage.rst --- docs/endaq/ide_usage.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/endaq/ide_usage.rst b/docs/endaq/ide_usage.rst index 6f389278..7099c835 100644 --- a/docs/endaq/ide_usage.rst +++ b/docs/endaq/ide_usage.rst @@ -46,7 +46,7 @@ analog accelerometer channel) may sample at 20000 Hz. Channels themselves consis different axes (X, Y, Z) or completely different measurements like temperature and pressure. All subchannels in a channel are sampled at approximately the same time. -The Channel data are stored in the ``channels`` property of a Dataset. The easiest way to access this is to convert it to a +The Channel data are stored in the ``channels`` property of a Dataset, which is returned from the :py:func:`~endaq.ide.get_doc() function. The easiest way to access this is to convert it to a Pandas DataFrame using :py:func:`~endaq.ide.to_pandas(doc)`. Visit `our internal documentation `_ for some quick tips on Pandas, or go `straight to the source `_.