diff --git a/.docs/architecture.graphml b/.docs/architecture.graphml
new file mode 100644
index 0000000..c8e16ef
--- /dev/null
+++ b/.docs/architecture.graphml
@@ -0,0 +1,1027 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ DriverControl
+
+
+
+
+
+
+
+
+
+ Folder 2
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ XBOXCtrl
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ DrivetrainCtrl
+
+
+
+
+
+
+
+
+
+ Folder 4
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Pose Estimation
+
+
+
+
+
+
+
+
+
+ Folder 4
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Gyro
+
+
+
+
+
+
+
+
+
+
+ poseEstPhotonCamera (FL)
+
+
+
+
+
+
+
+
+
+
+ Inverse
+Kinematics
+
+
+
+
+
+
+
+
+
+
+ WPILib Pose Estimator
+
+
+
+
+
+
+
+
+
+
+ poseEstPhotonCamera (FR)
+
+
+
+
+
+
+
+
+
+
+ poseEstPhotonCamera (BL)
+
+
+
+
+
+
+
+
+
+
+ poseEstPhotonCamera (BR)
+
+
+
+
+
+
+
+
+
+
+ Telemetry
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Control Strategies
+
+
+
+
+
+
+
+
+
+ Folder 2
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Autonomous Trajectory Following
+
+
+
+
+
+
+
+
+
+ Folder 2
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ trajectory.py
+
+
+
+
+
+
+
+
+
+
+ holonomicDriveController.py
+
+
+
+
+
+
+
+
+
+
+ Path Planned
+Auto Command
+
+
+
+
+
+
+
+
+
+ Telemetry
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Teleop AutoDrive Navigation
+
+
+
+
+
+
+
+
+
+ Folder 2
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ autoDrive.py
+
+
+
+
+
+
+
+
+
+
+ holonomicDriveController.py
+
+
+
+
+
+
+
+
+
+
+ repulsorFieldPlanner.py
+
+
+
+
+
+
+
+
+
+
+ forceGenerators.py
+
+
+
+
+
+
+
+
+
+
+ obstacleDetector.py
+
+
+
+
+
+
+
+
+
+
+ obstaclePhotonCamera
+(Front)
+
+
+
+
+
+
+
+
+
+
+ obstaclePhotonCamera
+(Rear)
+
+
+
+
+
+
+
+
+
+
+ Telemetry
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Base Control
+
+
+
+
+
+
+
+
+
+ Folder 2
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ SwerveModuleCtrl BR
+
+
+
+
+
+
+
+
+
+
+ Forward
+Kinematics
+
+
+
+
+
+
+
+
+
+
+ SwerveModuleCtrl BL
+
+
+
+
+
+
+
+
+
+
+ SwerveModuleCtrl FR
+
+
+
+
+
+
+
+
+
+
+ SwerveModuleCtrl FL
+
+
+
+
+
+
+
+
+
+
+ Telemetry
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ LED Control
+
+
+
+
+
+
+
+
+
+ Folder 2
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ PWMControl (LED Color/Blink)
+
+
+
+
+
+
+
+
+
+
+
+
+ ActualPose
+
+
+
+
+
+
+
+
+
+
+
+
+ Module State
+Command
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Measured Azmth Angles
+and Wheel Speeds
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Measured Poses
+from Apriltags
+
+
+
+
+
+
+
+
+
+
+ Measured
+Angular
+Velocity
+
+
+
+
+
+
+
+
+
+
+
+
+ Measured
+chassis speeds
+
+
+
+
+
+
+
+
+
+
+ Est Pose
+
+
+
+
+
+
+
+
+
+
+
+
+ IsAutoAligning
+IsStuck
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ DrivetrainCmd
+
+
+
+
+
+
+
+
+
+
+
+ DrivetrainCmd
+
+
+
+
+
+
+
+
+
+
+
+ Repulsive
+and Attractive
+Forces
+
+
+
+
+
+
+
+
+
+
+
+ Estimated Obstacle
+Poses
+
+
+
+
+
+
+
+
+
+
+
+ Estimated Obstacle
+Poses
+
+
+
+
+
+
+
+
+
+
+
+ DrivetrainCmd
+
+
+
+
+
+
+
+
+
+
+
+ All Current
+Obstacle Observations
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Des Pose/Vel
+
+
+
+
+
+
+
+
+
+
+
+
+
+ obstacle observations
+Goal Pose
+
+
+
+
+
+
+
+
+
+
+
+
+
+ des Pose/Vel
+
+
+
+
+
+
+
+
+
+
+
+
+
+ cmdVel
+
+
+
+
+
+
+
+
+
+
+
+
+
+ cmdVel
+
+
+
+
+
+
+
+
+
+
+
+
+
+ des Pose/Vel
+
+
+
+
+
+
+
+
+
+
+
+
+
+ AutoDrive
+Goal
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Manual
+Gyro Reset
+
+
+
+
+
+
+
+
+
+
+
+ Current Trajectory
+
+
+
+
+
+
+
+
+
+
+
+ Obstacles
+Lookahead Trajectory
+
+
+
+
+
+
+
+
+
+
+
+ DesPose
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/.docs/autoSequencerV2.md b/.docs/autoSequencerV2.md
new file mode 100644
index 0000000..83cad69
--- /dev/null
+++ b/.docs/autoSequencerV2.md
@@ -0,0 +1,48 @@
+# AutoSequencer V2
+
+## Goals
+Better flexibility for expressing complex sequences of autonomous routines
+
+## Key Updates from V1
+Rename `Event` to `Command` for better alignment with wpilib command based
+
+Introduce `CommandGroup` as an ordered list of `Command`s to be run together
+
+Introduce the following flavors of groups:
+* `CommandRaceGroup`
+ * member commands run at the same time, finishing when the FIRST one is done
+ * Extends `Command`, with pass-trough on all functions to do the same thing on all commands in the group, except for `isDone()` implemented as an OR
+* `CommandParallelGroup`
+ * member commands run at the same time, finishing when ALL are done
+ * Extends `Command`, with pass-trough on all functions to do the same thing on all commands in the group, except for `isDone()` implemented as an AND
+* `CommandSequentialGroup`
+ * member commands run one after another
+
+Requirements for `Command`
+* Abstract (extender-must-implement) methods for:
+ * `initialize` - called once right before the first call to `execute`
+ * `execute` - Called at a rate as long as the command is active
+ * `end(boolean interrupted)` - called once right after the final call to `execute`. `interrupted` indicates whether the end was due to this command finishing "naturally" or because somethign else stopped it prematurely
+ * `isDone()` - return true if we're done, false otherwise
+* Commands also implement convienence "composition" methods:
+ * `withTimeout` - returns a `raceWith()` a `WaitCommand`
+ * `raceWith()` - returns a race group with this command the input commands
+ * `alongWith()` - returns a parallel group with this command the input commands
+ * `andThen()` - returns a sequential group with this command and the input command
+* Commands can `schedule()` or `cancel()` themselves with the `AutoSequencer()`
+
+Pre-supplied implementations of `Command`:
+* `WaitCommand` - waits the given duration
+
+Existing requirements for `Mode`
+* Singular `CommandSequentialGroup` for all `Command`'s in the mode
+* Must provide API to:
+ * Supply the initial pose
+
+Existing requirements for `AutoSequencer`:
+* Top-level state machine for a command group
+* Singleton
+* Registration of multiple auto `Modes`, including publishing the available list to NT
+* NT-based selection of the current auto event
+* Ticks the `Mode`'s `CommandSequentialGroup` forward
+
diff --git a/.docs/closedLoopDrivetrainControl.md b/.docs/closedLoopDrivetrainControl.md
new file mode 100644
index 0000000..3a1489e
--- /dev/null
+++ b/.docs/closedLoopDrivetrainControl.md
@@ -0,0 +1,18 @@
+# Closed Loop Drivetrain Control
+
+We achieve smooth control of our drivetrain through a slightly modified [Holonomnic Drivetrain Controller](https://docs.wpilib.org/en/stable/docs/software/advanced-controls/trajectories/holonomic.html)
+
+Swerve Drive has three _independent_ axes of control - Translation (X, Y), and Rotation (Theta). Unlke a tank drive, where side-to-side translation can not be done independently of forward motion, our control startegy may be easier.
+
+Our control logic feeds the drivetrain with desired X, Y, and Rotational Velocity as the sum of two sources:
+
+1) Open Loop velocity command
+2) Closed loop position-correction command
+
+All control starategies (manual, auto-drive, trajectory, etc.) must supply an open loop velocity command for all three axes at all times. Manual control gets these from the joysticks, auto-drive gets them from looking at how we're moving from the current to the next step, trajectories in autonomous pre-calculate the velocity throuhgout the trajectory.
+
+Optionally, a `Pose2d` might be available from the controlling source. Manual control generally will not have this, but auto-drive and trajectory control probably will.
+
+Since we have `odometry` to calculate an estimated `Pose2d` of our robot, we can subtract this from our commanded `Pose2d` to get an error in position. Each axis of the error (3 in total) are run through an independent PID controller, which in turn generates a small additional _corrective_ velocity to get the robot's estiamted pose closer to where we want to be.
+
+Notably, in the past, wpilib's implementation did not have open-loop control of rotation. This was a strage omission. For this reason, we've had our own minimal `HolonomicDriveController` class for quite some time.
\ No newline at end of file
diff --git a/.docs/docs.md b/.docs/docs.md
new file mode 100644
index 0000000..b0273b0
--- /dev/null
+++ b/.docs/docs.md
@@ -0,0 +1,9 @@
+# About Docs
+
+These documents are designed to help developers of this codebase understand its architecture and purpose.
+
+## Editing Diagrams
+
+`.grapahml` files may be edited with the [yEd graph editor](https://www.yworks.com/products/yed/download#download).
+
+In general, exporting to .pdf will make viewing easier once the diagram is deemed "correct"
diff --git a/.docs/drivetrain.md b/.docs/drivetrain.md
new file mode 100644
index 0000000..015fbe4
--- /dev/null
+++ b/.docs/drivetrain.md
@@ -0,0 +1,87 @@
+# Drivetrain Control
+
+## Swerve Drive Overview
+
+A "swerve drive" is the most common type of drivetrain in the FIRST Robotics Competition in the last few years.
+
+It consists of four "modules", each mounted on a corner of the robot.
+
+Each module has two motors - one to point the wheel in a specific direction ("azimuth"), and one to actually spin the wheel ("wheel").
+
+As a coordinate frame reminder: for our rectangular chassis, the robot's origin is defined as lying in the same plane as the floor, and in the exact center of the rectangle of the drivetrain. Positive X points toward the front of the robot, positive Y points toward the robot's left side. Positive rotation swings from +X to +Y axes.
+
+## Overall Drivetrain Control
+
+Our control is "field-oreiented" - the commands into the drivetrain are assumed to be velocity commands aligned with the field walls.
+
+A few steps are needed in the overall control algorithm.
+
+1. Rotate the incoming velocity commands to be relative to the robot's heading, not the field's +X axis. WPILib provides these functions.
+2. Figure out the velocity and azimuth angle _at each module's contact patch with the ground_, using kinematics and the drivetrain dimensions. WPILib provides these functions.
+3. Perform per-module state optimization (see below)
+4. Send specific wheel velocity and azimuth angle commands to each module.
+
+### Module State Optimization
+
+At any given time, there are four possible ways a module could go from its present azimuth angle to a new commanded one:
+
+. Rotate left, keep wheel velocity the same
+. Rotate right, keep wheel velocity the same
+. Rotate left, invert wheel velocity
+. Rotate right, invert wheel velocity
+
+In this was, the maximum number of degrees a module should ever have to be commanded to rotate through is 90. By optimizing the state, we reduce the amount of time the module is in a "skidding" state, where the wheel is not smoothly rolling across the floor.
+
+WPILib provides the functions to do this, we simply have to call them.
+
+## Module Control
+
+Controlling the module requires controlling both motors.
+
+### Wheel
+
+The wheel velocity is achieved through a basic feed-forward, plus a small feedback portion.
+
+The feed-forward model is the standard motor veloicty model, consisting of:
+
+`kS` - static friction - maximum voltage that can be applied to the motor without motion occurring.
+`kV` - number of volts to achieve a certain rotational velocity
+
+Future adds include `kA` - number of volts to achieve a certain _change_ in rotational velocity.
+
+Feedforward should be doing the vast majority of the control effort. Feedback is just a small additional factor to help compensate.
+
+### Azimuth
+
+For now, we are just using a simple P/D feedback controller on the azimuth angle. This seems to be sufficent.
+
+Future adds could be to motion profile the commanded position of Azimuth angle, and then using that for a velocity feed-forward.
+
+## Constants & Configuration
+
+As with most good code, striving to minimize "magic constants" throughout the code is perferred.
+
+Most drivetrain related constants are in `drivetrainPhysical.py`. Key ones that users may have to adjust:
+
+. `WHEEL_BASE_HALF_*_M` - Distance from the origin of the robot, out to the center of where the wheel makes contact with the ground. Note this is not to the frame rail or the bumper. Note it's only _half_ the distance between two wheels.
+. `WHEEL_GEAR_RATIO` - Reduction ratio in the modules from the driving motord down to the wheel.
+. `WHEEL_RADIUS_IN` - radius of the wheel from center of rotation, to where it contacts the carpet
+. `MAX_DT_MOTOR_SPEED_RPS` - maximum achievable speed from the drive motor. WPILib has most of these internally in their physical plant model configurations.
+. `*_ENCODER_MOUNT_OFFSET_RAD` - adjusts for the physical mounting offset of the module/magnet and angle sensor on each drivetrain azimuth.
+. `ROBOT_TO_*_CAM` - 3d Transforms from robot origin to the camera described (including both translation, and angle)
+
+Other constants are present, but they are tied to things that don't change much year to year (robot weight, azimuth steer gear ratio, bumper thickness, etc.)
+
+Most other constants in the file are derived from these constants.
+
+### Encoder Mount Offset Cal Procedure
+
+Must be updated whenever the module is reassembled
+
+1. Put the robot up on blocks.
+2. Reset all these values to 0, deploy code
+3. Pull up dashboard with encoder readings (in radians)
+4. Using a square, twist the modules by hand until they are aligned with the robot's chassis
+5. Read out the encoder readings for each module, put them here
+6. Redeploy code, verify that the encoder readings are correct as each module is manually rotated
+
diff --git a/.docs/drivetrainControlStrategy.md b/.docs/drivetrainControlStrategy.md
new file mode 100644
index 0000000..6cc7993
--- /dev/null
+++ b/.docs/drivetrainControlStrategy.md
@@ -0,0 +1,30 @@
+# Picking a Drivtrain Control Strategy
+
+There are multiple parts of robot code which might want to control the drivetrain. The following logic is used to determine what is actually controlling it.
+
+## Arbitration
+
+The drivetrain logic uses a "chained" approach to arbitration. Each feature takes in the command from the "upstream" features, has an *opportunity* to modify any part of the command, and passes the result downstream.
+
+As more automatic assist or control features are added, they should be put into this stack. The further "downstream" in the list they are, the higher priority the feature will have.
+
+### Manual Control
+
+Manual control takes velocity commands from the driver's joysticks. There is no specific pose desired, only velocity is commanded.
+
+Manual control must always be available. It is the default option if no other feature is taking control.
+
+### Autonomous Path Following
+
+During autonomous, pre-planned paths are often executed. At each time step, a desired pose and velocity is read out of a file and passed to the drivetrain. This only happens if the autonomous path planning logic is active.
+
+### Teleop Navigation
+
+The navigation stack can create commands to automatically drive the robot toward a goal, avoiding obstacles. This generates velocity commands, and a desired pose.
+
+### TODO - align to gamepices
+
+This logic should create rotational commands which point the robot at a gamepiece, but otherwise do not alter the X/Y velocity commands.
+
+This has not yet been written
+
diff --git a/.docs/localization.md b/.docs/localization.md
new file mode 100644
index 0000000..ecacc20
--- /dev/null
+++ b/.docs/localization.md
@@ -0,0 +1,112 @@
+# Localization
+
+Localization is the process of estimating where, on the field, the robot is at.
+
+This "where" is answered by three numbers:
+
+X position - in meters - from the origin
+Y position - in meters - from the origin
+Rotation - in radians - in angular deflection from the positive X axis, toward the positive Y axis.
+
+Collectively, these are known as a "Pose" - specifically, a Pose2d
+
+The sensors on our robot provide clues as to these numbers, but always some inaccuracy. Theses clues provide overlapping information, which we must *fuse* into a single estimate.
+
+We use WPILib's *Kalman Filter* to fuse these pieces of information together, accounting for the fact each has some inaccuracy.
+
+## About Inaccuracy
+
+All sensors measure a particular quantity, and add some amount of random *noise* to that signal. The noise is in the same units as the quantity being measured.
+
+When looking at sensor data, you can often see the signal "jittering" around the real value. This noise can be at least roughly measured.
+
+The most common way to understand this noise is to assume it is random, with a *Gaussian Distribution*. The amount of noise will be proportional to the *standard deviation* of the noise's distribution.
+
+The Kalman Filter takes the standard deviation of each measurement as an input, as a way to know how much to "trust" the measurement. Measurements with high standard deviations have a lot of noise, and are not particularly trustworthy.
+
+Trustworthy measurements will change the the estimate of the robot's Pose rapidly. Untrustworthy measurements will take a lot of time to have an impact on the Pose estimate.
+
+## Data Sources
+
+### Gyroscope
+
+FRC robots almost always include a gyroscope, which measures rotational velocity. By adding up the rotational velocity measurements over time, the sensor measures changes in the robot's angular position.
+
+The gyroscope is one of the most accurate, least-noisy measurements of robot position. It should only drift by a degree or two every minute. However, it only measures one component of pose. Additionally, it is at best relative to a given starting pose, which must be accurate.
+
+### Swerve Module Encoders
+
+The encoders inside the swerve modules (wheel motors, and absolute encoders measuring azimuth position) provide a good estimate of movement. As long as wheels are rolling along the ground (and not slipping), linear displacement can be derived through determining distance rolled from number of rotations, multiplied by the circumference of the wheel.
+
+The wheel encoders are also generally very accurate, with most noise being driven by slippage during high-acceleration maneuvers. Additionally, it is at best relative to a given starting pose, which must be accurate.
+
+
+### AprilTags
+
+Using a camera and a coprocessor, we can estimate our pose relative to the *fiducial markers* that FIRST provides.
+
+These estimates provide all three components of Pose, and are absolute - they do not care whether the initial pose estimate was accurate or not. However, the signal often has a lot of latency (as it takes 100ms or more to get the image, process it, and send the result to the roboRIO). Additionally, their accuracy varies, depending on how far away the observed tag is, and whether or not [the observed pose is impacted by a common optical illusion.](https://docs.wpilib.org/en/stable/docs/software/vision-processing/apriltag/apriltag-intro.html#d-to-3d-ambiguity).
+
+## Initial Position
+
+Software must make an accurate initial assumption of pose. This is done through one of two ways:
+
+### Autonomous Init
+
+Each autonomous routine must provide an assumed starting pose, where the drive team places the robot at the start of the routine.
+
+This pose is returned from the sequencer, and used by the pose estimation logic to reset pose at the start of the match.
+
+### Teleop Gyro Reset
+
+The code support resetting the rotational component of pose to be aligned toward "downfield" - this helps during development or driver practice where autonomous is not run.
+
+This only fixes the rotational component of pose. While rotational position is most important, X/Y translation can only be corrected by looking at an apriltag.
+
+## Tuning Localization
+
+The core tuning to be done involves picking how much to trust each source of data.
+
+Ultimately, this is done by picking the "standard deviation" for each sensor.
+
+Currently, default values are used from WPILib's pose estimator for gyro and wheels (as implemented in `drivetrainPoseEstimator.py`).
+
+When `WrapperedPoseEstPhotonCamera` is updated, part of the `CameraPoseObservation` includeds the standard deviation of the rotational component (`rotStdDev`), and the translational component (`xyStdDev`) of the pose estimate.
+
+Both of these `StdDev`'s can be thought of as, roughly, how much is the "plus or minus" of the estimated pose.
+
+For example, currently, we assume the pose is accurate in X/Y, plus-or-minus one meter. This is pretty mid - not horrible, but not great either.
+
+We also say the rotation is accurate, plus-or-minus 50 degrees.... which is a roundabout way of saying "we don't trust it much at all"
+
+In the future - differnet pieces of info about the apriltag seen (ambiguity ratio, distance from the tag, overall tag size, etc.) could be used to change those fixed assumptions.
+
+At the end of the day, all these numbers change is how rapidly the robot's pose estimate will "respond to" a particular vision observation.
+
+## Debugging Localization
+
+A set of objects is put into NetworkTables to help debug how the robot is estimating it's position on the field.
+
+[AdvantageScope](https://docs.wpilib.org/en/stable/docs/software/dashboards/advantagescope.html) is the recommended tool for viewing a 3d representation of this. The simulation GUI and [Glass](https://docs.wpilib.org/en/stable/docs/software/dashboards/glass/index.html) also provide 2d views of this.
+
+
+### Field 2d Poses
+
+In `drivetrain/poseEstimation/drivetrainPoseTelemetry.py`, the following 2d and 3d poses are published:
+
+In the `DT Pose 2d` `Field2d` object:
+
+`Robot` - the current estimate of the robot pose
+`ModulePoses` - the current estimated position and state of each swerve module.
+`visionObservations` - any pose the vision system has detected the robot at. 0 or more, depending on how many apriltags are in view.
+`desPose` - the desired pose of the current thing commanding the drivetrain. Might be `null` or just the origin if only velocities are commanded at the moment
+`desTraj` - current autonomous path-planned trajetory (empty if not running)
+`desTrajWaypoints` - Current lookahead waypoints from the Potential Field Auto-drive (empty if not running)
+`curObstaclesFixed` - Poses associated with the obstacles assumed to be always present on the field
+`curObstaclesFull`, `curObstaclesThird`, `curObstaclesAlmostGone` - current transient obstacle poses. As these poses decay in strength, they'll move from one list to the other (this allows the visualization tool to show different colors based on how strong the obstacle is)
+
+### 3d poses
+
+Additionaly, 3d poses are published for the assumed camera locations. This is useful for debugging the camera orientation on the robot with an `axis` object in AdvantageScope's 3d viewer.
+
+`LeftCamPose`, `RightCamPose`, and `FrontCamPose` are all provided. Other poses should be added as we add more cameras.
diff --git a/.docs/navigation.md b/.docs/navigation.md
new file mode 100644
index 0000000..94c8e78
--- /dev/null
+++ b/.docs/navigation.md
@@ -0,0 +1,134 @@
+# Navigation
+
+The navigation stack is a new-for-2025 development the team made to attempt to add flexible, automatic, on-demand driving to teleop.
+
+The driving is aware of obstacles, both those assumed always-present on the field, and one dynamically detected with cameras.
+
+## General Principals
+
+The underlying algorithim is called "Repulsor Fields" or "Potential Fields". See [this Columbia University Lecture](https://www.cs.columbia.edu/~allen/F17/NOTES/potentialfield.pdf) for a brief introduction.
+
+The planner maintains a list of all obstacles on the field, as well as a goal position.
+
+Every loop, the robot's last desired pose is used to calculate the force each obstacle or goal has on the robot.
+
+The sum of these forces produces a vector in a specific direction on the field. The robot path plans one step into that direction. This process is repeated at every loop.
+
+### Advantages of this Method
+
+Compared to other solutions, this method...
+
+1. Is relatively computationally light. It spreads the planning effort over many control loops, only calculating next steps as needed.
+2. Does not require complex logic to replan in the background when obstacles change.
+
+### Disadvantages of this Method
+
+1. It gets stuck at local minima - Other methods can be more aggressive about looking at the whole map, and providing a reasonable path when "boxed-in" by obstacles
+2. Manual Tuning - obstacle/goal force strengths are arbitrary values that were tuned "to taste" in simulation to make paths look nice, and not generate local minima.
+
+## Force Generators
+
+Our codebase defines a number of objects which exert force on a given position on the field.
+
+### Force Shape
+
+All force generating objects use the [*logistic function*](https://en.wikipedia.org/wiki/Logistic_function).
+
+Large distances (far away from the obstacle) correspond to large negative values input to the function. Small distances (very close to the obstacle) should produce large positive values.
+
+While this is not accurate to how forces like gravity work in the real world, it has useful properties for tuning the behavior of the field:
+
+1. Far away from the obstacle (large negative input values to the function), the strength goes to zero.
+2. The X=0 point is always "half strength"
+3. By shifting the X axis left/right, the "radius" of effect of the obstacle can be impacted.
+4. By stretching or shrinking the Y axis, the strength of the force can be changed
+5. By stretching or shrinking the X axis, the "steepness" of the transition from no force to lots of force can be tuned.
+6. The function does not go to infinity at any point.
+
+However, this large number of tuning parameters adds to the complexity of getting a good overall force field, with no local minima.
+
+### PointObstacle
+
+Generates a force that radiates outward from the center point, and whose strength is determined by the logistic function of the distance from that point.
+
+They work well for modeling roughly-circular obstacles (like robots, or posts).
+
+### HorizontalObstacle
+
+Generates a force that radiates toward the positive or negative Y axis. The strength of the force is determined by the logistic function of the distance from a given Y coordinate.
+
+It is useful for keeping the robot away from the longer walls on the field (left or right walls, as viewed from the driver station).
+
+### VerticalObstacle
+
+Generates a force that radiates toward the positive or negative X axis. The strength of the force is determined by the logistic function of the distance from a given X coordinate.
+
+It is useful for keeping the robot away from the shorter walls on the field (red or blue alliance walls).
+
+### Wall
+
+Generates a force that radiates outward, perpendicular to a line segment (defined between two points).
+
+It is useful keeping the robot away from a wall or ramp in the middle of the field.
+
+Note that there is redundancy in behavior between HorizontalObstacle, VerticalObstacle, Wall types. In the future, this should be removed.
+
+### Lane
+
+Generates a force that pushes the robot toward and along a line segment (defined between two points).
+
+It is useful for marking certain areas of the field as "desireable" to travel through, especially if the default strategy of "go toward the goal" is insufficient.
+
+
+### Goal (TODO)
+
+This class has not yet been added. The existing functionality is to apply a constant force in the direction of the goal itself. This should be moved into a more well-defined class.
+
+
+## Repulsor Field Planner
+
+### Transient Obstacles
+
+In general, transient obstacles decay in strength whenever they are not observed, eventually being removed once they have zero strength.
+
+Whenever an obstacle is observed by a camera, the existing list of obstacles is searched to find if any obstacles are close to the new observation. If so, the existing obstacle is reset to max strength, and moved into the observed position. If no obstacle is close enough, a new obstacle is added to the list.
+
+There is a maximum number of observed obstacles - if more than this max are observed, the oldest obstacle is removed from the list.
+
+Fixed obstacles must be tracked in a separate list, as they do not decay and are constant.
+
+### Path Traversal Speed
+
+Two "slowdown factors" are set up:
+
+1) Start Slow Factor - ramps from 0.0 up to 1.0 after the goal changes. This makes sure the commanded position slowly accelerates from a stop, providing a more realistic change in velocity.
+2) End Slow Factor - as the commanded position gets closer to the goal, the commanded speed reduces to slow the robot down as it approaches the goal.
+
+Other than these two factors, the step size will be set to the maximum desired speed of the path planner. This is usually some fraction (~85%?) of the maximum speed of the drivetrain. The bigger this fraction, the less margin the drivetrain has to correct for disturbances.
+
+### Next-Pose Solver
+
+In general, the basic algorithm for going from the current commanded pose to the next one is:
+
+1) Calculate the direction the force on the current commanded pose points in
+2) Take a step of size (max_vel) * (loop time) * (slow_factors) in that direction
+
+However, when the force changes rapidly from one pose to another, instability in the path can result, as the pose bounces back-and-forth across the sharp change in force.
+
+As Larry indicates: the main way to solve this is to take smaller steps.
+
+Right now, we are configured to take smaller "substeps" (half the size of a normal step) in a short loop, until the next pose is at least one full step away from the current commanded pose. Care is taken to rescale this step to the correct length, so the correct velocity is commanded.
+
+The number of subsets must be limited, to prevent an infinite loop in the case of a local minima.
+
+### Stuck Detection
+
+TODO.
+
+Should be looking at a short history of commanded positions, and determining whether "forward progress" is being made. If the last couple points are at similar spots, declare "stuck".
+
+### Lookahead
+
+This functionality is only active in sim, as it is computationally intensive and (so far) only useful for telemetry and testing.
+
+The lookahead operation plans a number of steps ahead of the position, to help visualize what the pathplanner's behavior will be. These poses are sent to the telemetry logic to display, but are otherwise discarded.
\ No newline at end of file
diff --git a/.docs/profiledController.graphml b/.docs/profiledController.graphml
new file mode 100644
index 0000000..1f65dd4
--- /dev/null
+++ b/.docs/profiledController.graphml
@@ -0,0 +1,975 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Target
+ Position
+
+
+
+
+
+
+
+
+
+
+ Profile
+
+
+
+
+
+
+
+
+
+
+ sign(vel) * kS
+
+
+
+
+
+
+
+
+
+
+ vel * kV
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+
+
+
+ Motor Pos Sensor
+
+
+
+
+
+
+
+
+
+
+ +
+-
+
+
+
+
+
+
+
+
+
+
+ Pos Err * kP
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+
+
+
+ Motor Windings
+
+
+
+
+
+
+
+
+
+
+ On RoboRIO
+
+
+
+
+
+
+
+
+
+
+ On SparkMax
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Target
+ Position
+
+
+
+
+
+
+
+
+
+
+ Profile
+
+
+
+
+
+
+
+
+
+
+ sign(vel) * kS
+
+
+
+
+
+
+
+
+
+
+ cos(angle) * kG
+
+
+
+
+
+
+
+
+
+
+ vel * kV
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+
+
+
+ Motor Pos Sensor
+
+
+
+
+
+
+
+
+
+
+ +
+-
+
+
+
+
+
+
+
+
+
+
+ Pos Err * kP
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+
+
+
+ Motor Windings
+
+
+
+
+
+
+
+
+
+
+ On RoboRIO
+
+
+
+
+
+
+
+
+
+
+ On SparkMax
+
+
+
+
+
+
+
+
+
+
+
+ Closed-Loop (FeedForward + FeedBack) Architecture for Position Control
+
+
+
+
+
+
+
+
+
+
+
+ Closed-Loop (FeedForward + FeedBack) Architecture for Position Control With Gravity Compensation Term
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Target
+ Position
+
+
+
+
+
+
+
+
+
+
+ Profile
+
+
+
+
+
+
+
+
+
+
+ sign(vel) * kS
+
+
+
+
+
+
+
+
+
+
+ vel * kV
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+
+
+
+ Motor Pos Sensor
+
+
+
+
+
+
+
+
+
+
+ +
+-
+
+
+
+
+
+
+
+
+
+
+ Pos Err * kP
+
+
+
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+
+
+
+ Motor Windings
+
+
+
+
+
+
+
+
+
+
+ On RoboRIO
+
+
+
+
+
+
+
+
+
+
+ On SparkMax
+
+
+
+
+
+
+
+
+
+
+
+ Closed-Loop (FeedForward + FeedBack) Architecture for Position Control With Acceleration Term
+
+
+
+
+
+
+
+
+
+
+ accel * kA
+
+
+
+
+
+
+
+
+
+
+ Unprofiled
+Desired Position
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Des Vel
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Des Pos
+
+
+
+
+
+
+
+
+
+
+ Act Position
+
+
+
+
+
+
+
+
+
+
+ Position Error
+
+
+
+
+
+
+
+
+
+
+
+
+ FeedForward Voltage
+
+
+
+
+
+
+
+
+
+
+
+
+ FeedBack Voltage
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Unprofiled
+Desired Position
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Des Vel
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Des Pos
+
+
+
+
+
+
+
+
+
+
+ Act Position
+
+
+
+
+
+
+
+
+
+
+ Position Error
+
+
+
+
+
+
+
+
+
+
+
+
+ FeedForward Voltage
+
+
+
+
+
+
+
+
+
+
+
+
+ FeedBack Voltage
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Unprofiled
+Desired Position
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Des Vel
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Des Pos
+
+
+
+
+
+
+
+
+
+
+ Act Position
+
+
+
+
+
+
+
+
+
+
+ Position Error
+
+
+
+
+
+
+
+
+
+
+
+
+ FeedForward Voltage
+
+
+
+
+
+
+
+
+
+
+
+
+ FeedBack Voltage
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Des Accel
+
+
+
+
+
+
+
+
+
diff --git a/.docs/utilites.md b/.docs/utilites.md
new file mode 100644
index 0000000..3db0f01
--- /dev/null
+++ b/.docs/utilites.md
@@ -0,0 +1,154 @@
+# Utilities
+
+A number of small utility functions are provided.
+
+It's possible these get dissolved at some point and show up within WPILib, at which point we can remove our verions.
+
+## Alliance Transform
+
+For past few seasons, the field has not been _diagonally symmetric_. This means that if you comare how the field looks while standing at the red or blue alliance walls, the main field elements will be "mirrored" left/right. A diagonally symmetric field would keep the field appearing the same, no matter which side you are on.
+
+As long as this is the case, we have to use a _truly global_ origin for the field. By convention, we match with WPILib, and pick the corner of the blue alliance wall and the long field wall that would be to your _right_ if you standing as a driver on the blue alliance. Positive X goes "downfield" toward the red alliance, and positive Y goes to your left.
+
+To keep our code simple, we design all goals, autonomous routines, and other logic assuming we are on the blue alliance. However, for any specific measurement or value whcih needs to be _differnt_ for being on the red alliance, we use the `transform()` function to switch the value from blue to red accordingly.
+
+Logic which does this needs to not cache any values, as the alliance can and will change at runtime (either due to the FMS connecting and providing the correct alliance, or from the software team fiddling around and switching alliance while the robot is disabled).
+
+## Calibration
+
+When writing robot code, a frequent issue is having a value which is _usually_ constant during runtime, but might need to be tuned or tweaked by the software team.
+
+PID constants, debounce times, discrete setpoints, and rate limits are common examples of this.
+
+Critically, the deploy process is not _super_ fast - if a value needs to be iterated quickly, it makes sense to write a bit of code to allow it to be cahnged on the fly, without redeploying.
+
+The `Calibration` object helps do this. Users should declare `Calibration` objects with the name and default value. Optionally, units, min, and max values can be provided too.
+
+At runtime, in code, use the `.get()` method to get the current value of the Calibration.
+
+Additionally, an `.isChanged()` API is present to check if the calibration changed since the last time `.get()` was called - this allows the user to trigger one-time operations when the cal cahnges (for example, sending CAN bus messages to a motor controller to change its current limit).
+
+Once declared, a calibration object should show up on the website as availble for calibration.
+
+### Behind the Scenes
+
+When a `Calibration` is declared, it registers itself with the `CalibrationWrangler`. The `CalibrationWrangelr` is a singleton that the web server uses to get the full list of declared calibrations, present them to the user via network tables, and update their values when they changed.
+
+## Crash Logging
+
+Python code has the ability to crash at runtime on the robot. This causes the code on the robot to stop running, restart from scratch, and re-enable (if in match). This process takes a few minutes and, in general, is not desireable.
+
+When this happens on the field, it's useful to generate records of what happened to help the software team debug after the match.
+
+The `CrashLogger`'s job is to facilitate that. Additionally, it provides a way for users to inject their own messages at runtime.
+
+When initialized, the `CrashLogger` sets up a python `logging.FileHandler()` to recieve log messages.
+
+Periodically, the `update()` function checks that the FMS is connected and has provided certain match-specific pieces of information. Once these are available, they are also (once) written to log.
+
+Becuase our file logger is attached to the main logger, python itself facilitates making sure any crash information is piped into `stderr`, which in turn ends up in the log.
+
+## External Drive Management
+
+Casserole has semi-standardized on using an external USB drive to store our runtime time-series logs. This helps prevent the main hard drive (flash rom) of the RIO from filling up from log files.
+
+However, it's not guarnteed we rememberd to plug in a drive, and we'd like our code not to crash in this case.
+
+The `ExtDriveManager` takes care of finding the right path to log to (which is slightly differnet in simulation and real robot), checking the path exists and is writable, making directories as needed, and indicating drive health with a Fault. If the log is not avaialble at startup, crash logging and data logging is disabled.
+
+## Faults
+
+A "Fault" is an abnoral condition on the robot which should prevent the robot from fully functioning. There are often things we can detect in software which we can alert the pit crew to.
+
+The `Fault` object can be declared, with a message to be shown to the user when it is active. Faults start inactive by default.
+
+The `.set()`, `.setFaulted()`, and `.setNoFault()` methods shoudl be called to alter whether the fault is anunciating "active" to the user or not.
+
+Caution should be applied in triggering a fault to be active without something to fix. We want to drive the behavior that when the fault indicator starts blinking, pit crew treats the situation as a "stop to fix" scenario. Having faults that are "supposed to be active" will hide new, true faults that need fixing.
+
+### Behind the Scenes - Fault Wrangler
+
+The `FaultWrangler` is a singleton that all `Fault`'s register themselves with on init.
+
+On `update`, the fault wrangler checks for any active faults. If 1 o rmore faults are active:
+
+1. The NT entry `faultDescription` cycles through all the strings of the active faults
+2. A red LED begins blinking on the robot.
+
+Faults are displayed in a few different ways:
+
+### Driver Dashboard
+
+An Icon/String widget can be used to display the current `faultDescription` string, and blink if the fault Count is greater than 1
+
+### Fault LED
+
+A roboRIO output is used to pulse an LED on and off when one or more faults are active. This is a signal to the pit crew to stop and fix the robot.
+
+### Heartbeat LED
+
+If code is completely frozen, faults will not annunicate. To help make sure code is running, in all conditions, a white led is pulsed on and off slowly. Pit crew should know not to expect normal robot function until this LED is pulsing.
+
+## Map Lookup 2D
+
+A common way to define functions in embedded software is to _sample_ the function's output at various input values. Then, these are used to "map" future inputs to outputs by linearlly interpolating between the known points, at the given input.
+
+We use the `MapLookup2D` object to do this operation.
+
+It is declared with a set of `(input, output)` tuples that represent the points in the map.
+
+The first and last outputs are "sampled and held" for all input values above or below the first and last input values.
+
+For example, y = 2x might be defined as:
+
+```py
+doublerMap = MapLookup2d([
+ (-100,-200),
+ (0,0),
+ (100,200),
+])
+```
+
+THe `.lookup(xval)` method is used to look up an output value for the given `xval` input.
+
+## Power & RIO Monitors
+
+The `PowerMonitor` has largely been stripped for now.
+
+In the future, it's important to make sure that at least battery voltage and current are logged. Per-motor current draw might also be useful to log, if resources exist to do it.
+
+The `RIOMonitor` class has also been failry stripped out. It's job was to monitor specific roboRIO specific things that coudl be useful for debugging issues in the pit afterward. For example:
+
+1. RAM usage
+2. CPU Load
+3. CAN bus load, CAN bus error count
+4. Disk usage
+5. input voltage
+6. voltage rail faults.
+
+In the future we'll want to expand these back out. However, for now, theire funcitonality has been stripped down to prevent high CPU loads.
+
+## Segment Time Tracker
+
+To keep our code running well, our code needs to run fast (at least fast enough to let it execute all periodic actions once every 20ms).
+
+Tracking how fast our code executes is an important component of keeping it running well.
+
+The `SegmentTimeTracker` class can help with this. After init'ing it, call the following:
+
+`start()` at the start of a periodic loop
+`end()` at the end of a periodic loop
+
+These will help produce metrics in the dashboard about how long our code spent running, and what period it was _actually_ called at.
+
+Additionally, `mark(thing)` can be called after certain actions to indiciate that `thing` just finished. If the code starts running slow, a report will occastionally be dumped out to indicate a list of how long each `thing` that was marked took to run.
+
+## Singleton Infrastructure
+
+Singletons are a programming concept for creating a class which has only one instance, and can be accessed anywhere.
+
+This is not super common out in non-robotics programming, but ends up being a pretty useful concept when we create classes designed around a thing on our robot that we can be certain there is only ever one of (drivetrain? arm?).
+
+Declaring a class as inheriting from `Singleton` causes invocation of `MySingletonClass()` constructor to return the same instance of the class.
+
+Note taht for _unit testing purposes specifically_, we need to make sure each class's destructor is called. We do this by calling `destroyAllSingletonInstances` at the end of each testcase to reset the simulation back to "no code running" state.
diff --git a/.docs/vsCodeDeployment.md b/.docs/vsCodeDeployment.md
new file mode 100644
index 0000000..de215f2
--- /dev/null
+++ b/.docs/vsCodeDeployment.md
@@ -0,0 +1,41 @@
+# vsCode Project Setup
+
+A few things were added to our vsCode project to try to make vsCode feel closer to Java's workflow.
+
+## Setup
+
+Configuration is saved in `.json` files saved in the `.vscode` folder.
+
+## Taks - `task.json`
+
+`tasks.json` creates menu items for the common commands we run from robotpy. These include:
+
+. Running the Test Suite in Simulation
+. Deploying code to the RIO (normal)
+. Viewing netconsole (the stdout forwarder that you see in Driver Station)
+. Deploying the code to the RIO (debug mode)
+
+## Debug Configurations - `launch.json`
+
+### Simulation
+
+This invokes the robotpy command to do simulation, but invokes it within a python debugger context.
+
+### On-RIO
+
+This first re-deploys the code with special flags to enable debug, waits for the code to start up, then connects to the RIO remotely (which _should_ be running a debug server at that point)
+
+There is a small bit required inside of robot code to support debugging. In `roboty.py` - the function `remoteRIODebugSupport()` should be invoked early on in execution. This function shall:
+
+1. Detect that we're running robot code on a RIO, with debug mode active.
+2. Import the `debugpy` module JIT (it's not needed otherwise and was causing some sim problems).
+3. Enable remote debug connections from any client IP on port 5678
+4. Wait indefinitely for a client to connect
+
+Step 4 is critical to be sure that a breakpoint set early in initlization is not missed. This was funcitonality specifically not present in robotpy in 2024.
+
+This code is fairly intrusive to robot behavior, and overlaps debug support that robotpy was attempting to add in 2024. It needs review in 2025 to see if it's still needed.
+
+### Test
+
+This invokes the robotpy command to run the test suite, but invokes it within a python debugger context
diff --git a/drivetrain/drivetrainPhysical.py b/drivetrain/drivetrainPhysical.py
index e1167eb..16ca25a 100644
--- a/drivetrain/drivetrainPhysical.py
+++ b/drivetrain/drivetrainPhysical.py
@@ -20,9 +20,6 @@
WHEEL_BASE_HALF_WIDTH_M = inchesToMeters(23.75 / 2.0)
WHEEL_BASE_HALF_LENGTH_M = inchesToMeters(23.75 / 2.0)
-# Additional distance from the wheel contact patch out to the edge of the bumper
-BUMPER_THICKNESS_M = inchesToMeters(2.5)
-
# Total mass includes robot, battery, and bumpers
# more than the "weigh-in" weight
ROBOT_MASS_KG = lbsToKg(140)
diff --git a/drivetrain/poseEstimation/drivetrainPoseEstimator.py b/drivetrain/poseEstimation/drivetrainPoseEstimator.py
index ab04dd4..0d02e8d 100644
--- a/drivetrain/poseEstimation/drivetrainPoseEstimator.py
+++ b/drivetrain/poseEstimation/drivetrainPoseEstimator.py
@@ -96,7 +96,7 @@ def update(self, curModulePositions:posTuple_t, curModuleSpeeds:stateTuple_t):
if(self._useAprilTags):
for cam in self.cams:
cam.update(self._curEstPose)
- observations = cam.getPoseEstimates()
+ observations = cam.getCamPoseObservations()
for observation in observations:
self._poseEst.addVisionMeasurement(
observation.estFieldPose, observation.time, (observation.xyStdDev, observation.xyStdDev, observation.rotStdDev)
diff --git a/navigation/forceGenerators.py b/navigation/forceGenerators.py
index bd0ec40..745cba5 100644
--- a/navigation/forceGenerators.py
+++ b/navigation/forceGenerators.py
@@ -129,7 +129,7 @@ def getTrans(self) -> list[Translation2d]:
Translation2d(self.x,FIELD_Y_M),
]
-class LinearForceGenerator(ForceGenerator):
+class _LinearForceGenerator(ForceGenerator):
"""
A linear force generator creates forces based on the relative position of the robot to a specific line segment
"""
@@ -172,7 +172,7 @@ def getTrans(self) -> list[Translation2d]:
self.end
]
-class Lane(LinearForceGenerator):
+class Lane(_LinearForceGenerator):
"""
A lane is an attractor - it creates a field force that pulls robots toward and along it, "ejecting" them out the far end.
It helps as a "hint" when the robot needs to navigate in a specific way around obstacles, which is not necessarily straight toward the goal.
@@ -195,7 +195,7 @@ def getForceAtPosition(self, position: Translation2d) -> Force:
return Force(unitX*forceMag, unitY*forceMag)
-class Wall(LinearForceGenerator):
+class Wall(_LinearForceGenerator):
"""
Walls obstacles are finite lines between specific coordinates
They push the robot away along a perpendicular direction.
diff --git a/robot.py b/robot.py
index 0ea4818..96e4150 100644
--- a/robot.py
+++ b/robot.py
@@ -174,6 +174,7 @@ def endCompetition(self):
super().endCompetition()
def remoteRIODebugSupport():
+ # TODO - is this still needed in 2025+?
if __debug__ and "run" in sys.argv:
print("Starting Remote Debug Support....")
try:
diff --git a/wrappers/wrapperedPoseEstPhotonCamera.py b/wrappers/wrapperedPoseEstPhotonCamera.py
index a60f982..3400c73 100644
--- a/wrappers/wrapperedPoseEstPhotonCamera.py
+++ b/wrappers/wrapperedPoseEstPhotonCamera.py
@@ -1,7 +1,7 @@
from dataclasses import dataclass
import wpilib
from wpimath.units import feetToMeters, degreesToRadians
-from wpimath.geometry import Pose2d
+from wpimath.geometry import Pose2d, Transform3d
from photonlibpy.photonCamera import PhotonCamera
from photonlibpy.photonCamera import setVersionCheckEnabled
from utils.fieldTagLayout import FieldTagLayout
@@ -20,28 +20,35 @@ class CameraPoseObservation:
# 2 - Convert pose estimates to the field
# 3 - Handle recording latency of when the image was actually seen
class WrapperedPoseEstPhotonCamera:
- def __init__(self, camName, robotToCam):
+ def __init__(self, camName:str, robotToCam:Transform3d):
+ """
+ """
setVersionCheckEnabled(False)
- self.cam = PhotonCamera(camName)
+ self._cam = PhotonCamera(camName)
- self.disconFault = Fault(f"Camera {camName} not sending data")
- self.timeoutSec = 1.0
- self.poseEstimates:list[CameraPoseObservation] = []
- self.robotToCam = robotToCam
+ self._disconFault = Fault(f"Camera {camName} not sending data")
+ self._timeoutSec = 1.0
+ self._poseEstimates:list[CameraPoseObservation] = []
+ self._robotToCam = robotToCam
def update(self, prevEstPose:Pose2d):
+ """
+ Process the all queued results from the camera
+ prevEstPose - the most recent pose estimate. Used to help guess which pose is correct.
+ """
- self.poseEstimates = []
+ self._poseEstimates = []
- if(not self.cam.isConnected()):
+ if(not self._cam.isConnected()):
# Faulted - no estimates, just return.
- self.disconFault.setFaulted()
+ self._disconFault.setFaulted()
return
# Grab whatever the camera last reported for observations in a camera frame
# Note: Results simply report "I processed a frame". There may be 0 or more targets seen in a frame
- res = self.cam.getLatestResult()
+ # TODO: in 2025, there will be a "get all unread results" which read from a queue. Use this, it's better.
+ res = self._cam.getLatestResult()
# hack - results also have a more accurate "getTimestamp()", but this is
# broken in photonvision 2.4.2. Hack with the non-broken latency calcualtion
@@ -51,7 +58,7 @@ def update(self, prevEstPose:Pose2d):
# Update our disconnected fault since we have something from the camera
- self.disconFault.setNoFault()
+ self._disconFault.setNoFault()
# Process each target.
# Each target has multiple solutions for where you could have been at on the field
@@ -101,16 +108,19 @@ def update(self, prevEstPose:Pose2d):
# TODO: we can probably get better standard deviations than just
# assuming the default. Check out what 254 did in 2024:
# https://github.com/Team254/FRC-2024-Public/blob/040f653744c9b18182be5f6bc51a7e505e346e59/src/main/java/com/team254/frc2024/subsystems/vision/VisionSubsystem.java#L381
- self.poseEstimates.append(
+ self._poseEstimates.append(
CameraPoseObservation(obsTime, bestCandidate)
)
- def getPoseEstimates(self):
- return self.poseEstimates
+ def getCamPoseObservations(self) -> list[CameraPoseObservation]:
+ """
+ Returns the list of all camera pose observations deduced from the last call to update.
+ """
+ return self._poseEstimates
def _toFieldPose(self, tgtPose, camToTarget):
camPose = tgtPose.transformBy(camToTarget.inverse())
- return camPose.transformBy(self.robotToCam.inverse()).toPose2d()
+ return camPose.transformBy(self._robotToCam.inverse()).toPose2d()
# Returns true of a pose is on the field, false if it's outside of the field perimieter
def _poseIsOnField(self, pose: Pose2d):