Skip to content

Commit

Permalink
Bugfixes, we're moving! Need to get object detect working still
Browse files Browse the repository at this point in the history
  • Loading branch information
gerth2 committed Oct 25, 2024
1 parent a066854 commit bfe6be0
Show file tree
Hide file tree
Showing 7 changed files with 25 additions and 19 deletions.
2 changes: 1 addition & 1 deletion drivetrain/controlStrategies/autoDrive.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# Maximum speed that we'll attempt to path plan at. Needs to be at least
# slightly less than the maximum physical speed, so the robot can "catch up"
# if it gets off the planned path
MAX_PATHPLAN_SPEED_MPS = 0.85 * MAX_DT_LINEAR_SPEED_MPS
MAX_PATHPLAN_SPEED_MPS = 0.75 * MAX_DT_LINEAR_SPEED_MPS

class AutoDrive(metaclass=Singleton):
def __init__(self):
Expand Down
4 changes: 2 additions & 2 deletions drivetrain/drivetrainPhysical.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,9 @@ def dtMotorRotToLinear(rot):

ROBOT_TO_FRONT_CAM = Transform3d(
Translation3d(
inchesToMeters(12.0), inchesToMeters(0.0), inchesToMeters(20.0) # X # Y # Z
inchesToMeters(14.5), inchesToMeters(2.75), inchesToMeters(8) # X # Y # Z
),
Rotation3d(0.0, -20.0, 0.0), # Roll # Pitch # Yaw
Rotation3d(0.0, 0.0, 0.0), # Roll # Pitch # Yaw
)


Expand Down
4 changes: 2 additions & 2 deletions humanInterface/driverInterface.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ def update(self):
vRotJoyWithDeadband = applyDeadband(vRotJoyRaw, 0.2)

# TODO - if the driver wants a slow or sprint button, add it here.
#slowMult = 1.0 if (self.ctrl.getRightBumper()) else 0.75
slowMult = 1.0
slowMult = 1.0 if (self.ctrl.getRightBumper()) else 0.75
#slowMult = 1.0

# Shape velocity command
velCmdXRaw = vXJoyWithDeadband * MAX_STRAFE_SPEED_MPS * slowMult
Expand Down
6 changes: 3 additions & 3 deletions robot.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,9 @@ def teleopPeriodic(self):
# For test purposes, inject a series of obstacles around the current pose
ct = self.driveTrain.poseEst.getCurEstPose().translation()
tfs = [
Translation2d(1.7, -0.5),
Translation2d(0.75, -0.75),
Translation2d(1.7, 0.5),
#Translation2d(1.7, -0.5),
#Translation2d(0.75, -0.75),
#Translation2d(1.7, 0.5),
Translation2d(0.75, 0.75),
Translation2d(2.0, 0.0),
Translation2d(0.0, 1.0),
Expand Down
4 changes: 3 additions & 1 deletion vectorPlotter.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
from scipy.ndimage import gaussian_filter
from scipy.signal import argrelextrema

from utils.constants import FIELD_X_M, FIELD_Y_M

class VectorPlotter:
def __init__(self, width_meters, height_meters, fig_size=(10, 8), dpi=100):
"""
Expand Down Expand Up @@ -97,7 +99,7 @@ def plot(self):

# Normalize magnitudes for color mapping
print("Normalizing magnitudes for color mapping...")
norm = plt.Normalize(vmin=min_magnitude, vmax=max_magnitude)
norm = plt.Normalize(vmin=min_magnitude, vmax=max_magnitude)
cmap = cm.get_cmap('jet') # Blue -> Green -> Red
colors = cmap(norm(magnitudes))
print("Color normalization complete.")
Expand Down
9 changes: 7 additions & 2 deletions wrappers/wrapperedObstaclePhotonCamera.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def __init__(self, time:float, estFieldPose:Pose2d, trustworthiness=1.0):
self.estFieldPose = estFieldPose
self.trustworthiness = trustworthiness # TODO - not used yet

MIN_AREA=10.0 #idk tune this if we are reacting to small targets
MIN_AREA=1.0 #idk tune this if we are reacting to small targets

def _calculateDistanceToTargetMeters(
cameraHeightMeters:float,
Expand Down Expand Up @@ -68,11 +68,13 @@ def update(self):
# MiniHack - results also have a more accurate "getTimestamp()", but this is
# broken in photonvision 2.4.2. Hack with the non-broken latency calcualtion
latency = res.getLatencyMillis()
obsTime = wpilib.Timer.getFPGATimestamp() - latency
obsTime = wpilib.Timer.getFPGATimestamp()

# Update our disconnected fault since we have something from the camera
self.disconFault.setNoFault()

print("got camera data")

# Process each target.
# Each target has multiple solutions for where you could have been at on the field
# when you observed it
Expand All @@ -83,7 +85,9 @@ def update(self):
# don't make sense.
for target in res.getTargets():
# Transform both poses to on-field poses
print("saw target")
if (target.getArea()>MIN_AREA):
print("big target")
# Use algorithm described at
# https://docs.limelightvision.io/docs/docs-limelight/tutorials/tutorial-estimating-distance
# to estimate distance from the camera to the target.
Expand All @@ -97,6 +101,7 @@ def update(self):
dist,
Rotation2d.fromDegrees(target.getYaw())
)
print(f"{camToObstacle}")

self.obstacleEstimates.append(camToObstacle)

Expand Down
15 changes: 7 additions & 8 deletions wrappers/wrapperedPoseEstPhotonCamera.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def update(self, prevEstPose:Pose2d):
# MiniHack - results also have a more accurate "getTimestamp()", but this is
# broken in photonvision 2.4.2. Hack with the non-broken latency calcualtion
latency = res.getLatencyMillis()
obsTime = wpilib.Timer.getFPGATimestamp() - latency
obsTime = wpilib.Timer.getFPGATimestamp()


# Update our disconnected fault since we have something from the camera
Expand All @@ -67,21 +67,20 @@ def update(self, prevEstPose:Pose2d):
if tagFieldPose is not None:
# Only handle known tags
poseCandidates:list[Pose2d] = []
poseCandidates.append(
self._toFieldPose(tagFieldPose, target.getBestCameraToTarget())
)
poseCandidates.append(
self._toFieldPose(
tagFieldPose, target.getAlternateCameraToTarget()
if target.getPoseAmbiguity() <= .5:
poseCandidates.append(
self._toFieldPose(tagFieldPose, target.getBestCameraToTarget())
)
poseCandidates.append(
self._toFieldPose(tagFieldPose, target.getAlternateCameraToTarget())
)

# Filter candidates in this frame to only the valid ones
filteredCandidates:list[Pose2d] = []
for candidate in poseCandidates:
onField = self._poseIsOnField(candidate)
# Add other filter conditions here
if onField:
if onField and target.getBestCameraToTarget().translation().norm() <= 4:
filteredCandidates.append(candidate)

# Pick the candidate closest to the last estimate
Expand Down

0 comments on commit bfe6be0

Please sign in to comment.