PhotonVision C++ v2026.2.2
Loading...
Searching...
No Matches
VideoSimUtil.h
Go to the documentation of this file.
1/*
2 * MIT License
3 *
4 * Copyright (c) PhotonVision
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#pragma once
26
27#include <algorithm>
28#include <numeric>
29#include <string>
30#include <unordered_map>
31#include <utility>
32#include <vector>
33
34#include <cscore_cv.h>
35#include <frc/apriltag/AprilTag.h>
36#include <opencv2/core.hpp>
37#include <opencv2/imgcodecs.hpp>
38#include <opencv2/objdetect.hpp>
39#include <units/length.h>
40
41#include "SimCameraProperties.h"
43
44namespace mathutil {
45template <typename T>
46int sgn(T val) {
47 return (T(0) < val) - (val < T(0));
48}
49} // namespace mathutil
50
51namespace photon {
52namespace VideoSimUtil {
53// Tag IDs start at 0, this should be set to 1 greater than the maximum tag ID
54// required
55static constexpr int kNumTags36h11 = 40;
56
57static constexpr units::meter_t fieldLength{16.54175_m};
58static constexpr units::meter_t fieldWidth{8.0137_m};
59
60static cv::Mat Get36h11TagImage(int id) {
61 wpi::RawFrame frame;
62 frc::AprilTag::Generate36h11AprilTagImage(&frame, id);
63 cv::Mat markerImage{frame.height, frame.width, CV_8UC1, frame.data,
64 static_cast<size_t>(frame.stride)};
65 cv::Mat markerClone = markerImage.clone();
66 return markerClone;
67}
68
69static std::unordered_map<int, cv::Mat> LoadAprilTagImages() {
70 std::unordered_map<int, cv::Mat> retVal{};
71 for (int i = 0; i < kNumTags36h11; i++) {
72 cv::Mat tagImage = Get36h11TagImage(i);
73 retVal[i] = tagImage;
74 }
75 return retVal;
76}
77
78/**
79 * Gets the points representing the corners of this image. Because image pixels
80 * are accessed through a cv::Mat, the point (0,0) actually represents the
81 * center of the top-left pixel and not the actual top-left corner.
82 *
83 * <p>Order of corners returned is: [BL, BR, TR, TL]
84 *
85 * @param size Size of image
86 * @return The corners
87 */
88static std::vector<cv::Point2f> GetImageCorners(const cv::Size& size) {
89 std::vector<cv::Point2f> retVal{};
90 retVal.emplace_back(cv::Point2f{-0.5f, size.height - 0.5f});
91 retVal.emplace_back(cv::Point2f{size.width - 0.5f, size.height - 0.5f});
92 retVal.emplace_back(cv::Point2f{size.width - 0.5f, -0.5f});
93 retVal.emplace_back(cv::Point2f{-0.5f, -0.5f});
94 return retVal;
95}
96
97/**
98 * Gets the points representing the marker(black square) corners.
99 *
100 * @param scale The scale of the tag image (10*scale x 10*scale image)
101 * @return The points
102 */
103static std::vector<cv::Point2f> Get36h11MarkerPts(int scale) {
104 cv::Rect2f roi36h11{cv::Point2f{1, 1}, cv::Point2f{8, 8}};
105 roi36h11.x *= scale;
106 roi36h11.y *= scale;
107 roi36h11.width *= scale;
108 roi36h11.height *= scale;
109 std::vector<cv::Point2f> pts = GetImageCorners(roi36h11.size());
110 for (size_t i = 0; i < pts.size(); i++) {
111 cv::Point2f pt = pts[i];
112 pts[i] = cv::Point2f{roi36h11.tl().x + pt.x, roi36h11.tl().y + pt.y};
113 }
114 return pts;
115}
116
117/**
118 * Gets the points representing the marker(black square) corners.
119 *
120 * @return The points
121 */
122static std::vector<cv::Point2f> Get36h11MarkerPts() {
123 return Get36h11MarkerPts(1);
124}
125
126static const std::unordered_map<int, cv::Mat> kTag36h11Images =
128static const std::vector<cv::Point2f> kTag36h11MarkPts = Get36h11MarkerPts();
129
130/** Updates the properties of this cs::CvSource video stream with the given
131 * camera properties. */
132[[maybe_unused]] static void UpdateVideoProp(cs::CvSource& video,
133 const SimCameraProperties& prop) {
134 video.SetResolution(prop.GetResWidth(), prop.GetResHeight());
135 video.SetFPS(prop.GetFPS().to<int>());
136}
137/**
138 * Warps the image of a specific 36h11 AprilTag onto the destination image at
139 * the given points.
140 *
141 * @param tagId The id of the specific tag to warp onto the destination image
142 * @param dstPoints Points(4) in destination image where the tag marker(black
143 * square) corners should be warped onto.
144 * @param antialiasing If antialiasing should be performed by automatically
145 * supersampling/interpolating the warped image. This should be used if
146 * better stream quality is desired or target detection is being done on the
147 * stream, but can hurt performance.
148 * @param destination The destination image to place the warped tag image onto.
149 */
150[[maybe_unused]] static void Warp165h5TagImage(
151 int tagId, const std::vector<cv::Point2f>& dstPoints, bool antialiasing,
152 cv::Mat& destination) {
153 if (!kTag36h11Images.contains(tagId)) {
154 return;
155 }
156 cv::Mat tagImage = kTag36h11Images.at(tagId);
157 std::vector<cv::Point2f> tagPoints{kTag36h11MarkPts};
158 std::vector<cv::Point2f> tagImageCorners{GetImageCorners(tagImage.size())};
159 std::vector<cv::Point2f> dstPointMat = dstPoints;
160 cv::Rect boundingRect = cv::boundingRect(dstPointMat);
161 cv::Mat perspecTrf = cv::getPerspectiveTransform(tagPoints, dstPointMat);
162 std::vector<cv::Point2f> extremeCorners{};
163 cv::perspectiveTransform(tagImageCorners, extremeCorners, perspecTrf);
164 boundingRect = cv::boundingRect(extremeCorners);
165
166 double warpedContourArea = cv::contourArea(extremeCorners);
167 double warpedTagUpscale =
168 std::sqrt(warpedContourArea) / std::sqrt(tagImage.size().area());
169 int warpStrat = cv::INTER_NEAREST;
170
171 int supersampling = 6;
172 supersampling = static_cast<int>(std::ceil(supersampling / warpedTagUpscale));
173 supersampling = std::max(std::min(supersampling, 10), 1);
174
175 cv::Mat scaledTagImage{};
176 if (warpedTagUpscale > 2.0) {
177 warpStrat = cv::INTER_LINEAR;
178 int scaleFactor = static_cast<int>(warpedTagUpscale / 3.0) + 2;
179 scaleFactor = std::max(std::min(scaleFactor, 40), 1);
180 scaleFactor *= supersampling;
181 cv::resize(tagImage, scaledTagImage, cv::Size{}, scaleFactor, scaleFactor,
182 cv::INTER_NEAREST);
183 tagPoints = Get36h11MarkerPts(scaleFactor);
184 } else {
185 scaledTagImage = tagImage;
186 }
187
188 boundingRect.x -= 1;
189 boundingRect.y -= 1;
190 boundingRect.width += 2;
191 boundingRect.height += 2;
192 if (boundingRect.x < 0) {
193 boundingRect.width += boundingRect.x;
194 boundingRect.x = 0;
195 }
196 if (boundingRect.y < 0) {
197 boundingRect.height += boundingRect.y;
198 boundingRect.y = 0;
199 }
200 boundingRect.width =
201 std::min(destination.size().width - boundingRect.x, boundingRect.width);
202 boundingRect.height =
203 std::min(destination.size().height - boundingRect.y, boundingRect.height);
204 if (boundingRect.width <= 0 || boundingRect.height <= 0) {
205 return;
206 }
207
208 std::vector<cv::Point2f> scaledDstPts{};
209 if (supersampling > 1) {
210 cv::multiply(dstPointMat,
211 cv::Scalar{static_cast<double>(supersampling),
212 static_cast<double>(supersampling)},
213 scaledDstPts);
214 boundingRect.x *= supersampling;
215 boundingRect.y *= supersampling;
216 boundingRect.width *= supersampling;
217 boundingRect.height *= supersampling;
218 } else {
219 scaledDstPts = dstPointMat;
220 }
221
222 cv::subtract(scaledDstPts,
223 cv::Scalar{static_cast<double>(boundingRect.tl().x),
224 static_cast<double>(boundingRect.tl().y)},
225 scaledDstPts);
226 perspecTrf = cv::getPerspectiveTransform(tagPoints, scaledDstPts);
227
228 cv::Mat tempRoi{};
229 cv::warpPerspective(scaledTagImage, tempRoi, perspecTrf, boundingRect.size(),
230 warpStrat);
231
232 if (supersampling > 1) {
233 boundingRect.x /= supersampling;
234 boundingRect.y /= supersampling;
235 boundingRect.width /= supersampling;
236 boundingRect.height /= supersampling;
237 cv::resize(tempRoi, tempRoi, boundingRect.size(), 0, 0, cv::INTER_AREA);
238 }
239
240 cv::Mat tempMask{cv::Mat::zeros(tempRoi.size(), CV_8UC1)};
241 cv::subtract(extremeCorners,
242 cv::Scalar{static_cast<float>(boundingRect.tl().x),
243 static_cast<float>(boundingRect.tl().y)},
244 extremeCorners);
245 cv::Point2f tempCenter{};
246 tempCenter.x =
247 std::accumulate(extremeCorners.begin(), extremeCorners.end(), 0.0,
248 [extremeCorners](float acc, const cv::Point2f& p2) {
249 return acc + p2.x / extremeCorners.size();
250 });
251 tempCenter.y =
252 std::accumulate(extremeCorners.begin(), extremeCorners.end(), 0.0,
253 [extremeCorners](float acc, const cv::Point2f& p2) {
254 return acc + p2.y / extremeCorners.size();
255 });
256
257 for (auto& corner : extremeCorners) {
258 float xDiff = corner.x - tempCenter.x;
259 float yDiff = corner.y - tempCenter.y;
260 xDiff += 1 * mathutil::sgn(xDiff);
261 yDiff += 1 * mathutil::sgn(yDiff);
262 corner = cv::Point2f{tempCenter.x + xDiff, tempCenter.y + yDiff};
263 }
264
265 std::vector<cv::Point> extremeCornerInt{extremeCorners.begin(),
266 extremeCorners.end()};
267 cv::fillConvexPoly(tempMask, extremeCornerInt, cv::Scalar{255});
268
269 cv::copyTo(tempRoi, destination(boundingRect), tempMask);
270}
271
272/**
273 * Given a line thickness in a 640x480 image, try to scale to the given
274 * destination image resolution.
275 *
276 * @param thickness480p A hypothetical line thickness in a 640x480 image
277 * @param destination The destination image to scale to
278 * @return Scaled thickness which cannot be less than 1
279 */
280static double GetScaledThickness(double thickness480p,
281 const cv::Mat& destination) {
282 double scaleX = destination.size().width / 640.0;
283 double scaleY = destination.size().height / 480.0;
284 double minScale = std::min(scaleX, scaleY);
285 return std::max(thickness480p * minScale, 1.0);
286}
287
288/**
289 * Draw a filled ellipse in the destination image.
290 *
291 * @param dstPoints The points in the destination image representing the
292 * rectangle in which the ellipse is inscribed.
293 * @param color The color of the ellipse. This is a scalar with BGR values
294 * (0-255)
295 * @param destination The destination image to draw onto. The image should be in
296 * the BGR color space.
297 */
298[[maybe_unused]] static void DrawInscribedEllipse(
299 const std::vector<cv::Point2f>& dstPoints, const cv::Scalar& color,
300 cv::Mat& destination) {
301 cv::RotatedRect rect = OpenCVHelp::GetMinAreaRect(dstPoints);
302 cv::ellipse(destination, rect, color, -1, cv::LINE_AA);
303}
304
305static void DrawPoly(const std::vector<cv::Point2f>& dstPoints, int thickness,
306 const cv::Scalar& color, bool isClosed,
307 cv::Mat& destination) {
308 std::vector<cv::Point> intDstPoints{dstPoints.begin(), dstPoints.end()};
309 std::vector<std::vector<cv::Point>> listOfListOfPoints;
310 listOfListOfPoints.emplace_back(intDstPoints);
311 if (thickness > 0) {
312 cv::polylines(destination, listOfListOfPoints, isClosed, color, thickness,
313 cv::LINE_AA);
314 } else {
315 cv::fillPoly(destination, listOfListOfPoints, color, cv::LINE_AA);
316 }
317}
318
319/**
320 * Draws a contour around the given points and text of the id onto the
321 * destination image.
322 *
323 * @param id Fiducial ID number to draw
324 * @param dstPoints Points representing the four corners of the tag marker(black
325 * square) in the destination image.
326 * @param destination The destination image to draw onto. The image should be in
327 * the BGR color space.
328 */
329[[maybe_unused]] static void DrawTagDetection(
330 int id, const std::vector<cv::Point2f>& dstPoints, cv::Mat& destination) {
331 double thickness = GetScaledThickness(1, destination);
332 DrawPoly(dstPoints, static_cast<int>(thickness), cv::Scalar{0, 0, 255}, true,
333 destination);
334 cv::Rect2d rect{cv::boundingRect(dstPoints)};
335 cv::Point2d textPt{rect.x + rect.width, rect.y};
336 textPt.x += thickness;
337 textPt.y += thickness;
338 cv::putText(destination, std::to_string(id), textPt, cv::FONT_HERSHEY_PLAIN,
339 1.5 * thickness, cv::Scalar{0, 200, 0},
340 static_cast<int>(thickness), cv::LINE_AA);
341}
342
343/**
344 * The translations used to draw the field side walls and driver station walls.
345 * It is a vector of vectors because the translations are not all connected.
346 */
347static std::vector<std::vector<frc::Translation3d>> GetFieldWallLines() {
348 std::vector<std::vector<frc::Translation3d>> list;
349
350 const units::meter_t sideHt = 19.5_in;
351 const units::meter_t driveHt = 35_in;
352 const units::meter_t topHt = 78_in;
353
354 // field floor
355 list.emplace_back(std::vector<frc::Translation3d>{
356 frc::Translation3d{0_m, 0_m, 0_m},
357 frc::Translation3d{fieldLength, 0_m, 0_m},
358 frc::Translation3d{fieldLength, fieldWidth, 0_m},
359 frc::Translation3d{0_m, fieldWidth, 0_m},
360 frc::Translation3d{0_m, 0_m, 0_m}});
361
362 // right side wall
363 list.emplace_back(std::vector<frc::Translation3d>{
364 frc::Translation3d{0_m, 0_m, 0_m}, frc::Translation3d{0_m, 0_m, sideHt},
365 frc::Translation3d{fieldLength, 0_m, sideHt},
366 frc::Translation3d{fieldLength, 0_m, 0_m}});
367
368 // red driverstation
369 list.emplace_back(std::vector<frc::Translation3d>{
370 frc::Translation3d{fieldLength, 0_m, sideHt},
371 frc::Translation3d{fieldLength, 0_m, topHt},
372 frc::Translation3d{fieldLength, fieldWidth, topHt},
373 frc::Translation3d{fieldLength, fieldWidth, sideHt},
374 });
375 list.emplace_back(std::vector<frc::Translation3d>{
376 frc::Translation3d{fieldLength, 0_m, driveHt},
377 frc::Translation3d{fieldLength, fieldWidth, driveHt}});
378
379 // left side wall
380 list.emplace_back(std::vector<frc::Translation3d>{
381 frc::Translation3d{0_m, fieldWidth, 0_m},
382 frc::Translation3d{0_m, fieldWidth, sideHt},
383 frc::Translation3d{fieldLength, fieldWidth, sideHt},
384 frc::Translation3d{fieldLength, fieldWidth, 0_m}});
385
386 // blue driverstation
387 list.emplace_back(std::vector<frc::Translation3d>{
388 frc::Translation3d{0_m, 0_m, sideHt},
389 frc::Translation3d{0_m, 0_m, topHt},
390 frc::Translation3d{0_m, fieldWidth, topHt},
391 frc::Translation3d{0_m, fieldWidth, sideHt},
392 });
393 list.emplace_back(std::vector<frc::Translation3d>{
394 frc::Translation3d{0_m, 0_m, driveHt},
395 frc::Translation3d{0_m, fieldWidth, driveHt}});
396
397 return list;
398}
399
400/**
401 * The translations used to draw the field floor subdivisions (not the floor
402 * outline). It is a vector of vectors because the translations are not all
403 * connected.
404 *
405 * @param subdivisions How many "subdivisions" along the width/length of the
406 * floor. E.g. 3 subdivisions would mean 2 lines along the length and 2 lines
407 * along the width creating a 3x3 "grid".
408 */
409static std::vector<std::vector<frc::Translation3d>> GetFieldFloorLines(
410 int subdivisions) {
411 std::vector<std::vector<frc::Translation3d>> list;
412 const units::meter_t subLength = fieldLength / subdivisions;
413 const units::meter_t subWidth = fieldWidth / subdivisions;
414
415 for (int i = 0; i < subdivisions; i++) {
416 list.emplace_back(std::vector<frc::Translation3d>{
417 frc::Translation3d{0_m, subWidth * (i + 1), 0_m},
418 frc::Translation3d{fieldLength, subWidth * (i + 1), 0_m}});
419 list.emplace_back(std::vector<frc::Translation3d>{
420 frc::Translation3d{subLength * (i + 1), 0_m, 0_m},
421 frc::Translation3d{subLength * (i + 1), fieldWidth, 0_m}});
422 }
423 return list;
424}
425
426/**
427 * Convert 3D lines represented by the given series of translations into a
428 * polygon(s) in the camera's image.
429 *
430 * @param camRt The change in basis from world coordinates to camera
431 * coordinates. See RotTrlTransform3d#makeRelativeTo(Pose3d).
432 * @param prop The simulated camera's properties.
433 * @param trls A sequential series of translations defining the polygon to be
434 * drawn.
435 * @param resolution Resolution as a fraction(0 - 1) of the video frame's
436 * diagonal length in pixels. Line segments will be subdivided if they exceed
437 * this resolution.
438 * @param isClosed If the final translation should also draw a line to the first
439 * translation.
440 * @param destination The destination image that is being drawn to.
441 * @return A list of polygons(which are an array of points)
442 */
443static std::vector<std::vector<cv::Point2f>> PolyFrom3dLines(
444 const RotTrlTransform3d& camRt, const SimCameraProperties& prop,
445 const std::vector<frc::Translation3d>& trls, double resolution,
446 bool isClosed, cv::Mat& destination) {
447 resolution = std::hypot(destination.size().height, destination.size().width) *
448 resolution;
449 std::vector<frc::Translation3d> pts{trls};
450 if (isClosed) {
451 pts.emplace_back(pts[0]);
452 }
453 std::vector<std::vector<cv::Point2f>> polyPointList{};
454
455 for (size_t i = 0; i < pts.size() - 1; i++) {
456 frc::Translation3d pta = pts[i];
457 frc::Translation3d ptb = pts[i + 1];
458
459 std::pair<std::optional<double>, std::optional<double>> inter =
460 prop.GetVisibleLine(camRt, pta, ptb);
461 if (!inter.second) {
462 continue;
463 }
464
465 double inter1 = inter.first.value();
466 double inter2 = inter.second.value();
467 frc::Translation3d baseDelta = ptb - pta;
468 frc::Translation3d old_pta = pta;
469 if (inter1 > 0) {
470 pta = old_pta + baseDelta * inter1;
471 }
472 if (inter2 < 1) {
473 ptb = old_pta + baseDelta * inter2;
474 }
475 baseDelta = ptb - pta;
476
477 std::vector<cv::Point2f> poly = OpenCVHelp::ProjectPoints(
478 prop.GetIntrinsics(), prop.GetDistCoeffs(), camRt, {pta, ptb});
479 cv::Point2d pxa = poly[0];
480 cv::Point2d pxb = poly[1];
481
482 double pxDist = std::hypot(pxb.x - pxa.x, pxb.y - pxa.y);
483 int subdivisions = static_cast<int>(pxDist / resolution);
484 frc::Translation3d subDelta = baseDelta / (subdivisions + 1);
485 std::vector<frc::Translation3d> subPts{};
486 for (int j = 0; j < subdivisions; j++) {
487 subPts.emplace_back(pta + (subDelta * (j + 1)));
488 }
489 if (subPts.size() > 0) {
490 std::vector<cv::Point2f> toAdd = OpenCVHelp::ProjectPoints(
491 prop.GetIntrinsics(), prop.GetDistCoeffs(), camRt, subPts);
492 poly.insert(poly.begin() + 1, toAdd.begin(), toAdd.end());
493 }
494
495 polyPointList.emplace_back(poly);
496 }
497
498 return polyPointList;
499}
500
501/**
502 * Draw a wireframe of the field to the given image.
503 *
504 * @param camRt The change in basis from world coordinates to camera
505 * coordinates. See RotTrlTransform3d#makeRelativeTo(frc::Pose3d).
506 * @param prop The simulated camera's properties.
507 * @param resolution Resolution as a fraction(0 - 1) of the video frame's
508 * diagonal length in pixels. Line segments will be subdivided if they exceed
509 * this resolution.
510 * @param wallThickness Thickness of the lines used for drawing the field walls
511 * in pixels. This is scaled by #getScaledThickness(double, cv::Mat).
512 * @param wallColor Color of the lines used for drawing the field walls.
513 * @param floorSubdivisions A NxN "grid" is created from the floor where this
514 * parameter is N, which defines the floor lines.
515 * @param floorThickness Thickness of the lines used for drawing the field floor
516 * grid in pixels. This is scaled by #getScaledThickness(double, cv::Mat).
517 * @param floorColor Color of the lines used for drawing the field floor grid.
518 * @param destination The destination image to draw to.
519 */
520[[maybe_unused]] static void DrawFieldWireFrame(
521 const RotTrlTransform3d& camRt, const SimCameraProperties& prop,
522 double resolution, double wallThickness, const cv::Scalar& wallColor,
523 int floorSubdivisions, double floorThickness, const cv::Scalar& floorColor,
524 cv::Mat& destination) {
525 for (const auto& trls : GetFieldFloorLines(floorSubdivisions)) {
526 auto polys =
527 PolyFrom3dLines(camRt, prop, trls, resolution, false, destination);
528 for (const auto& poly : polys) {
529 DrawPoly(poly,
530 static_cast<int>(
531 std::round(GetScaledThickness(floorThickness, destination))),
532 floorColor, false, destination);
533 }
534 }
535 for (const auto& trls : GetFieldWallLines()) {
536 auto polys =
537 PolyFrom3dLines(camRt, prop, trls, resolution, false, destination);
538 for (const auto& poly : polys) {
539 DrawPoly(poly,
540 static_cast<int>(
541 std::round(GetScaledThickness(wallThickness, destination))),
542 wallColor, false, destination);
543 }
544 }
545}
546} // namespace VideoSimUtil
547} // namespace photon
Definition RotTrlTransform3d.h:27
Calibration and performance values for this camera.
Definition SimCameraProperties.h:59
Eigen::Matrix< double, 8, 1 > GetDistCoeffs() const
Returns the camera calibration's distortion coefficients, in OPENCV8 form.
Definition SimCameraProperties.h:158
int GetResHeight() const
Gets the height of the simulated camera image.
Definition SimCameraProperties.h:137
int GetResWidth() const
Gets the width of the simulated camera image.
Definition SimCameraProperties.h:130
std::pair< std::optional< double >, std::optional< double > > GetVisibleLine(const RotTrlTransform3d &camRt, const frc::Translation3d &a, const frc::Translation3d &b) const
Determines where the line segment defined by the two given translations intersects the camera's frust...
Eigen::Matrix< double, 3, 3 > GetIntrinsics() const
Definition SimCameraProperties.h:150
units::hertz_t GetFPS() const
Gets the FPS of the simulated camera.
Definition SimCameraProperties.h:165
Definition VideoSimUtil.h:44
int sgn(T val)
Definition VideoSimUtil.h:46
static cv::RotatedRect GetMinAreaRect(const std::vector< cv::Point2f > &points)
Definition OpenCVHelp.h:56
static std::vector< cv::Point2f > ProjectPoints(const Eigen::Matrix< double, 3, 3 > &cameraMatrix, const Eigen::Matrix< double, 8, 1 > &distCoeffs, const RotTrlTransform3d &camRt, const std::vector< frc::Translation3d > &objectTranslations)
Definition OpenCVHelp.h:147
static std::vector< std::vector< frc::Translation3d > > GetFieldWallLines()
The translations used to draw the field side walls and driver station walls.
Definition VideoSimUtil.h:347
static std::vector< cv::Point2f > Get36h11MarkerPts()
Gets the points representing the marker(black square) corners.
Definition VideoSimUtil.h:122
static void UpdateVideoProp(cs::CvSource &video, const SimCameraProperties &prop)
Updates the properties of this cs::CvSource video stream with the given camera properties.
Definition VideoSimUtil.h:132
static void DrawFieldWireFrame(const RotTrlTransform3d &camRt, const SimCameraProperties &prop, double resolution, double wallThickness, const cv::Scalar &wallColor, int floorSubdivisions, double floorThickness, const cv::Scalar &floorColor, cv::Mat &destination)
Draw a wireframe of the field to the given image.
Definition VideoSimUtil.h:520
static constexpr units::meter_t fieldLength
Definition VideoSimUtil.h:57
static cv::Mat Get36h11TagImage(int id)
Definition VideoSimUtil.h:60
static void DrawTagDetection(int id, const std::vector< cv::Point2f > &dstPoints, cv::Mat &destination)
Draws a contour around the given points and text of the id onto the destination image.
Definition VideoSimUtil.h:329
static std::vector< std::vector< cv::Point2f > > PolyFrom3dLines(const RotTrlTransform3d &camRt, const SimCameraProperties &prop, const std::vector< frc::Translation3d > &trls, double resolution, bool isClosed, cv::Mat &destination)
Convert 3D lines represented by the given series of translations into a polygon(s) in the camera's im...
Definition VideoSimUtil.h:443
static constexpr units::meter_t fieldWidth
Definition VideoSimUtil.h:58
static std::unordered_map< int, cv::Mat > LoadAprilTagImages()
Definition VideoSimUtil.h:69
static void Warp165h5TagImage(int tagId, const std::vector< cv::Point2f > &dstPoints, bool antialiasing, cv::Mat &destination)
Warps the image of a specific 36h11 AprilTag onto the destination image at the given points.
Definition VideoSimUtil.h:150
static std::vector< cv::Point2f > GetImageCorners(const cv::Size &size)
Gets the points representing the corners of this image.
Definition VideoSimUtil.h:88
static double GetScaledThickness(double thickness480p, const cv::Mat &destination)
Given a line thickness in a 640x480 image, try to scale to the given destination image resolution.
Definition VideoSimUtil.h:280
static void DrawPoly(const std::vector< cv::Point2f > &dstPoints, int thickness, const cv::Scalar &color, bool isClosed, cv::Mat &destination)
Definition VideoSimUtil.h:305
static const std::vector< cv::Point2f > kTag36h11MarkPts
Definition VideoSimUtil.h:128
static void DrawInscribedEllipse(const std::vector< cv::Point2f > &dstPoints, const cv::Scalar &color, cv::Mat &destination)
Draw a filled ellipse in the destination image.
Definition VideoSimUtil.h:298
static constexpr int kNumTags36h11
Definition VideoSimUtil.h:55
static const std::unordered_map< int, cv::Mat > kTag36h11Images
Definition VideoSimUtil.h:126
static std::vector< std::vector< frc::Translation3d > > GetFieldFloorLines(int subdivisions)
The translations used to draw the field floor subdivisions (not the floor outline).
Definition VideoSimUtil.h:409
Definition TargetModel.h:27