From abdff84447f7a322a7d8c8dd59f1d0b1774c4c71 Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Sun, 7 Jul 2024 01:22:26 +0000 Subject: [PATCH] Sync notebooks --- notebooks/41_Decoding_Clusterless.ipynb | 5 +++-- notebooks/42_Decoding_SortedSpikes.ipynb | 2 +- notebooks/py_scripts/41_Decoding_Clusterless.py | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/notebooks/41_Decoding_Clusterless.ipynb b/notebooks/41_Decoding_Clusterless.ipynb index 69286c6..6f8db89 100644 --- a/notebooks/41_Decoding_Clusterless.ipynb +++ b/notebooks/41_Decoding_Clusterless.ipynb @@ -700,7 +700,7 @@ "\n", "We use the the `PositionOutput` table to figure out the `merge_id` associated with `nwb_file_name` to get the position data associated with the NWB file of interest. In this case, we only have one position to insert, but we could insert multiple positions if we wanted to decode from multiple sessions.\n", "\n", - "Note that the position data sampling frequency is what determines the time step of the decoding. In this case, the position data sampling frequency is 30 Hz, so the time step of the decoding will be 1/30 seconds. In practice, you will want to use a smaller time step such as 500 Hz. This will allow you to decode at a finer time scale. To do this, you will want to interpolate the position data to a higher sampling frequency as shown in the [position trodes notebook](./20_Position_Trodes.ipynb).\n", + "Note that we can use the `upsample_rate` parameter to define the rate to which position data will be upsampled to to for decoding in Hz. This is useful if we want to decode at a finer time scale than the position data sampling frequency. In practice, a value of 500Hz is used in many analyses. Skipping or providing a null value for this parameter will default to using the position sampling rate.\n", "\n", "You will also want to specify the name of the position variables if they are different from the default names. The default names are `position_x` and `position_y`." ] @@ -981,6 +981,7 @@ " nwb_file_name=nwb_copy_file_name,\n", " group_name=\"test_group\",\n", " keys=[{\"pos_merge_id\": merge_id} for merge_id in position_merge_ids],\n", + " upsample_rate=500,\n", ")\n", "\n", "PositionGroup & {\n", @@ -2956,7 +2957,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.9.-1" } }, "nbformat": 4, diff --git a/notebooks/42_Decoding_SortedSpikes.ipynb b/notebooks/42_Decoding_SortedSpikes.ipynb index 66c3de7..e4448c6 100644 --- a/notebooks/42_Decoding_SortedSpikes.ipynb +++ b/notebooks/42_Decoding_SortedSpikes.ipynb @@ -6,7 +6,7 @@ "source": [ "# Sorted Spikes Decoding\n", "\n", - "The mechanics of decoding with sorted spikes are largely similar to those of decoding with unsorted spikes. You should familiarize yourself with the [clusterless decoding tutorial](./42_Decoding_Clusterless.ipynb) before proceeding with this one.\n", + "The mechanics of decoding with sorted spikes are largely similar to those of decoding with unsorted spikes. You should familiarize yourself with the [clusterless decoding tutorial](./41_Decoding_Clusterless.ipynb) before proceeding with this one.\n", "\n", "The elements we will need to decode with sorted spikes are:\n", "- `PositionGroup`\n", diff --git a/notebooks/py_scripts/41_Decoding_Clusterless.py b/notebooks/py_scripts/41_Decoding_Clusterless.py index 6c286bf..b9eede4 100644 --- a/notebooks/py_scripts/41_Decoding_Clusterless.py +++ b/notebooks/py_scripts/41_Decoding_Clusterless.py @@ -125,7 +125,7 @@ # # We use the the `PositionOutput` table to figure out the `merge_id` associated with `nwb_file_name` to get the position data associated with the NWB file of interest. In this case, we only have one position to insert, but we could insert multiple positions if we wanted to decode from multiple sessions. # -# Note that the position data sampling frequency is what determines the time step of the decoding. In this case, the position data sampling frequency is 30 Hz, so the time step of the decoding will be 1/30 seconds. In practice, you will want to use a smaller time step such as 500 Hz. This will allow you to decode at a finer time scale. To do this, you will want to interpolate the position data to a higher sampling frequency as shown in the [position trodes notebook](./20_Position_Trodes.ipynb). +# Note that we can use the `upsample_rate` parameter to define the rate to which position data will be upsampled to to for decoding in Hz. This is useful if we want to decode at a finer time scale than the position data sampling frequency. In practice, a value of 500Hz is used in many analyses. Skipping or providing a null value for this parameter will default to using the position sampling rate. # # You will also want to specify the name of the position variables if they are different from the default names. The default names are `position_x` and `position_y`. @@ -181,6 +181,7 @@ nwb_file_name=nwb_copy_file_name, group_name="test_group", keys=[{"pos_merge_id": merge_id} for merge_id in position_merge_ids], + upsample_rate=500, ) PositionGroup & {