Skip to content

Commit

Permalink
renamed use of goal to node and stopped using translation3d in immuta…
Browse files Browse the repository at this point in the history
…ble format
  • Loading branch information
Lucien Morey committed Jan 22, 2023
1 parent 58cda73 commit 7fb6036
Show file tree
Hide file tree
Showing 5 changed files with 83 additions and 78 deletions.
51 changes: 28 additions & 23 deletions goal_map.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
from helper_types import (
GoalRegionState,
GoalRegionObservation,
GoalRegion,
NodeRegionState,
NodeRegionObservation,
NodeRegion,
ExpectedGamePiece,
)
from wpimath.geometry import Translation3d


class GoalRegionMap:
class NodeRegionMap:
X_OFFSET_TO_GRID: float = 0.0
Y_OFFSET_TO_GRID: float = 516.763
INTAKE_ZONE_OFFSET: float = 0.0
Y_DISTANCE_BETWEEN_GOALS: float = 558.0
Y_DISTANCE_BETWEEN_NODES: float = 558.0

Z_DISTANCE_CUBE_LOOK_UP: list[float] = [1169.988, 865.188, 0.0]
Z_DISTANCE_CONE_LOOK_UP: list[float] = [
Expand All @@ -24,40 +24,45 @@ class GoalRegionMap:
X_DISTANCE_CONE_LOOK_UP: list[float] = [364.231, 795.231, 1167.655]

def __init__(self, on_blue_alliance: bool):
self.map: list[GoalRegionState] = []
self.map: list[NodeRegionState] = []

# This will start from the top row of goals
# This will start from the top row of nodes
for row in range(3):
cone_height = self.Z_DISTANCE_CONE_LOOK_UP[row]
cube_height = self.Z_DISTANCE_CUBE_LOOK_UP[row]
# this will start from the grid closest to the field origin
for grid in range(3):
# this will start from the goal closest to the field origin in each grid
for goal in range(3):
id = row * 9 + grid * 3 + goal
# this will start from the node closest to the field origin in each grid
for node in range(3):
id = row * 9 + grid * 3 + node
expected_game_piece = ExpectedGamePiece.CONE
position = Translation3d()
position.y = self.Y_DISTANCE_BETWEEN_GOALS * (grid + goal)
pos_x = 0.0
pos_y = 0.0
pos_z = 0.0

pos_y = self.Y_DISTANCE_BETWEEN_NODES * (grid + node)
if not on_blue_alliance:
position.y += self.INTAKE_ZONE_OFFSET
position.z = cone_height
position.x = self.X_DISTANCE_CONE_LOOK_UP[grid]
pos_y += self.INTAKE_ZONE_OFFSET
pos_z = cone_height
pos_x = self.X_DISTANCE_CONE_LOOK_UP[grid]

if row == 2:
expected_game_piece = ExpectedGamePiece.BOTH
position.z = cube_height
position.x = self.X_DISTANCE_CUBE_LOOK_UP[grid]
elif goal == 1:
pos_z = cube_height
pos_x = self.X_DISTANCE_CUBE_LOOK_UP[grid]
elif node == 1:
expected_game_piece = ExpectedGamePiece.CUBE
position.z = cube_height
position.x = self.X_DISTANCE_CUBE_LOOK_UP[grid]
pos_z = cube_height
pos_x = self.X_DISTANCE_CUBE_LOOK_UP[grid]

position = Translation3d(pos_x, pos_y, pos_z)

self.map.append[
(GoalRegionState(GoalRegion(id, expected_game_piece, position)))
(NodeRegionState(NodeRegion(id, expected_game_piece, position)))
]

def update(self, goal_observations: list[GoalRegionObservation]):
def update(self, node_observations: list[NodeRegionObservation]):
pass

def get_state(self) -> list[GoalRegionState]:
def get_state(self) -> list[NodeRegionState]:
return self.map
10 changes: 5 additions & 5 deletions helper_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,19 @@ def area(self) -> int:


@dataclass
class GoalRegion:
class NodeRegion:
id: int
expected_game_piece: ExpectedGamePiece
position: Translation3d


@dataclass
class GoalRegionObservation:
class NodeRegionObservation:
bounding_box: BoundingBox
goal_region: GoalRegion
node_region: NodeRegion


@dataclass
class GoalRegionState:
goal_region: GoalRegion
class NodeRegionState:
node_region: NodeRegion
occupied: bool = False
2 changes: 1 addition & 1 deletion magic_numbers.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

CONTOUR_TO_BOUNDING_BOX_AREA_RATIO_THRESHOLD = 0.1

# Gate angle to determine if robot can even see the goal or not to avoid projecting backwards
# Gate angle to determine if robot can even see the node or not to avoid projecting backwards
DIRECTION_GATE_ANGLE = pi

ROBOT_BASE_TO_CAMERA_TRANSLATION = Translation3d(0.0, 0.0, 0.0)
Expand Down
34 changes: 17 additions & 17 deletions test_images.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import cv2
import vision
from helper_types import BoundingBox, ExpectedGamePiece
from goal_map import GoalRegionMap
from node_map import NodeRegionMap
from wpimath.geometry import Pose2d
import numpy as np
from camera_config import CameraParams
Expand All @@ -28,33 +28,33 @@ def read_test_data_csv(fname: str):
return result


def read_goal_region_in_frame_csv(fname: str):
def read_node_region_in_frame_csv(fname: str):
with open(fname) as f:
result = []
for (
image,
goal_region_visible,
node_region_visible,
robot_x,
robot_y,
heading,
goal_region_id,
node_region_id,
) in csv.reader(f):
result.append(
(
image,
goal_region_visible == "True",
node_region_visible == "True",
float(robot_x),
float(robot_y),
float(heading),
int(goal_region_id),
int(node_region_id),
)
)
return result


images = read_test_data_csv("test/expected.csv")
goal_region_in_frame_images = read_goal_region_in_frame_csv(
"test/goal_region_in_frame.csv"
node_region_in_frame_images = read_node_region_in_frame_csv(
"test/node_region_in_frame.csv"
)


Expand Down Expand Up @@ -116,30 +116,30 @@ def test_sample_images(


@pytest.mark.parametrize(
"filename,goal_region_visible,robot_x,robot_y,heading, goal_region_id",
goal_region_in_frame_images,
"filename,node_region_visible,robot_x,robot_y,heading, node_region_id",
node_region_in_frame_images,
)
def test_goal_region_in_frame(
def test_node_region_in_frame(
filename: str,
goal_region_visible: bool,
node_region_visible: bool,
robot_x: float,
robot_y: float,
heading: float,
goal_region_id: int,
node_region_id: int,
):
image = cv2.imread(f"./test/{filename}")
assert image is not None
goal_region_map = GoalRegionMap()
node_region_map = NodeRegionMap()

goal_region = goal_region_map.get_state()[goal_region_id].goal_region
node_region = node_region_map.get_state()[node_region_id].node_region

robot_pose = Pose2d(robot_x, robot_y, heading)

camera_matrix = np.array([1, 0, 0], [0, 1, 0], [0, 0, 1])

assert (
vision.is_goal_region_in_image(image, robot_pose, camera_matrix, goal_region)
== goal_region_visible
vision.is_node_region_in_image(image, robot_pose, camera_matrix, node_region)
== node_region_visible
)


Expand Down
64 changes: 32 additions & 32 deletions vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,22 +14,22 @@
import cv2
import numpy as np
from helper_types import (
GoalRegionObservation,
GoalRegion,
GoalRegionState,
NodeRegionObservation,
NodeRegion,
NodeRegionState,
ExpectedGamePiece,
BoundingBox,
)
from camera_config import CameraParams
from goal_map import GoalRegionMap
from node_map import NodeRegionMap
from wpimath.geometry import Pose2d, Pose3d, Translation3d, Transform3d, Rotation3d


class Vision:
def __init__(self, camera_manager: CameraManager, connection: NTConnection) -> None:
self.camera_manager = camera_manager
self.connection = connection
self.map = GoalRegionMap()
self.map = NodeRegionMap()

def run(self) -> None:
"""Main process function.
Expand Down Expand Up @@ -124,84 +124,84 @@ def is_game_piece_present(

def process_image(frame: np.ndarray, pose: Pose2d):

# visible_goals = self.find_visible_goals(frame, pose)
# visible_nodes = self.find_visible_nodes(frame, pose)

# goal_states = self.detect_goal_state(frame, visible_goals)
# node_states = self.detect_node_state(frame, visible_nodes)

# whatever the update step is
# self.map.update(goal_states)
# self.map.update(node_states)

# map_state = self.map.get_state()

# annotate frame
# annotated_frame = annotate_image(frame, map_state, goal_states)
# annotated_frame = annotate_image(frame, map_state, node_states)

# return state of map (state of all goal regions) and annotated camera stream
# return state of map (state of all node regions) and annotated camera stream
return


def find_visible_goals(frame: np.ndarray, pose: Pose2d) -> list[GoalRegionObservation]:
"""Segment image to find visible goals in a frame
def find_visible_nodes(frame: np.ndarray, pose: Pose2d) -> list[NodeRegionObservation]:
"""Segment image to find visible nodes in a frame
Args:
frame (np.ndarray): New camera frame containing goals
frame (np.ndarray): New camera frame containing nodes
pose (Pose2d): Current robot pose in the world frame
Returns:
List[GoalRegionObservation]: List of goal region observations with no information about occupancy
List[NodeRegionObservation]: List of node region observations with no information about occupancy
"""
pass


def detect_goal_state(
frame: np.ndarray, regions_of_interest: list[GoalRegionObservation]
) -> list[GoalRegionObservation]:
"""Detect goal occupancy in a set of observed goal regions
def detect_node_state(
frame: np.ndarray, regions_of_interest: list[NodeRegionObservation]
) -> list[NodeRegionObservation]:
"""Detect node occupancy in a set of observed node regions
Args:
frame (np.ndarray): New camera frame containing goals
regions_of_interest (List[GoalRegionObservation]): List of goal region observations with no information about occupancy
frame (np.ndarray): New camera frame containing nodes
regions_of_interest (List[NodeRegionObservation]): List of node region observations with no information about occupancy
Returns:
List[GoalRegionObservation]: List of goal region observations
List[NodeRegionObservation]: List of node region observations
"""
pass


def annotate_image(
frame: np.ndarray,
map: list[GoalRegionState],
goal_observations: list[GoalRegionObservation],
map: list[NodeRegionState],
node_observations: list[NodeRegionObservation],
) -> np.ndarray:
"""annotate a frame with projected goal points
"""annotate a frame with projected node points
Args:
frame (np.ndarray): raw image frame without annotation
map (List[GoalState]): current map state with information on occupancy state_
goal_observations (List[GoalRegionObservation]): goal observations in the current time step
map (List[NodeState]): current map state with information on occupancy state_
node_observations (List[NodeRegionObservation]): node observations in the current time step
Returns:
np.ndarray: frame annotated with observed goal regions
np.ndarray: frame annotated with observed node regions
"""
pass


def is_goal_region_in_image(
def is_node_region_in_image(
robot_pose: Pose2d,
camera_params: CameraParams,
goal_region: GoalRegion,
node_region: NodeRegion,
) -> bool:

# create transform to make camera origin
world_to_robot = Transform3d(Pose3d(), Pose3d(robot_pose))
world_to_camera = world_to_robot + camera_params.transform
goal_region_camera_frame = world_to_camera.inverse() + Transform3d(
goal_region.position, Rotation3d()
node_region_camera_frame = world_to_camera.inverse() + Transform3d(
node_region.position, Rotation3d()
)

# Check the robot is facing the right direction for the point by checking it is inside the FOV
return point3d_in_field_of_view(
goal_region_camera_frame.translation(), camera_params
node_region_camera_frame.translation(), camera_params
)


Expand Down

0 comments on commit 7fb6036

Please sign in to comment.