PhotonVision C++ v2026.0.1-beta
Loading...
Searching...
No Matches
VideoSimUtil.h
Go to the documentation of this file.
1/*
2 * MIT License
3 *
4 * Copyright (c) PhotonVision
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#pragma once
26
27#include <algorithm>
28#include <numeric>
29#include <string>
30#include <unordered_map>
31#include <utility>
32#include <vector>
33
34#include <cscore_cv.h>
35#include <frc/apriltag/AprilTag.h>
36#include <opencv2/core.hpp>
37#include <opencv2/imgcodecs.hpp>
38#include <opencv2/objdetect.hpp>
39#include <units/length.h>
40
41#include "SimCameraProperties.h"
43
44namespace mathutil {
45template <typename T>
46int sgn(T val) {
47 return (T(0) < val) - (val < T(0));
48}
49} // namespace mathutil
50
51namespace photon {
52namespace VideoSimUtil {
53static constexpr int kNumTags36h11 = 30;
54
55static constexpr units::meter_t fieldLength{16.54175_m};
56static constexpr units::meter_t fieldWidth{8.0137_m};
57
58static cv::Mat Get36h11TagImage(int id) {
59 wpi::RawFrame frame;
60 frc::AprilTag::Generate36h11AprilTagImage(&frame, id);
61 cv::Mat markerImage{frame.height, frame.width, CV_8UC1, frame.data,
62 static_cast<size_t>(frame.stride)};
63 cv::Mat markerClone = markerImage.clone();
64 return markerClone;
65}
66
67static std::unordered_map<int, cv::Mat> LoadAprilTagImages() {
68 std::unordered_map<int, cv::Mat> retVal{};
69 for (int i = 0; i < kNumTags36h11; i++) {
70 cv::Mat tagImage = Get36h11TagImage(i);
71 retVal[i] = tagImage;
72 }
73 return retVal;
74}
75
76static std::vector<cv::Point2f> GetImageCorners(const cv::Size& size) {
77 std::vector<cv::Point2f> retVal{};
78 retVal.emplace_back(cv::Point2f{-0.5f, size.height - 0.5f});
79 retVal.emplace_back(cv::Point2f{size.width - 0.5f, size.height - 0.5f});
80 retVal.emplace_back(cv::Point2f{size.width - 0.5f, -0.5f});
81 retVal.emplace_back(cv::Point2f{-0.5f, -0.5f});
82 return retVal;
83}
84
85static std::vector<cv::Point2f> Get36h11MarkerPts(int scale) {
86 cv::Rect2f roi36h11{cv::Point2f{1, 1}, cv::Point2f{8, 8}};
87 roi36h11.x *= scale;
88 roi36h11.y *= scale;
89 roi36h11.width *= scale;
90 roi36h11.height *= scale;
91 std::vector<cv::Point2f> pts = GetImageCorners(roi36h11.size());
92 for (size_t i = 0; i < pts.size(); i++) {
93 cv::Point2f pt = pts[i];
94 pts[i] = cv::Point2f{roi36h11.tl().x + pt.x, roi36h11.tl().y + pt.y};
95 }
96 return pts;
97}
98
99static std::vector<cv::Point2f> Get36h11MarkerPts() {
100 return Get36h11MarkerPts(1);
101}
102
103static const std::unordered_map<int, cv::Mat> kTag36h11Images =
105static const std::vector<cv::Point2f> kTag36h11MarkPts = Get36h11MarkerPts();
106
107[[maybe_unused]] static void UpdateVideoProp(cs::CvSource& video,
108 const SimCameraProperties& prop) {
109 video.SetResolution(prop.GetResWidth(), prop.GetResHeight());
110 video.SetFPS(prop.GetFPS().to<int>());
111}
112
113[[maybe_unused]] static void Warp165h5TagImage(
114 int tagId, const std::vector<cv::Point2f>& dstPoints, bool antialiasing,
115 cv::Mat& destination) {
116 if (!kTag36h11Images.contains(tagId)) {
117 return;
118 }
119 cv::Mat tagImage = kTag36h11Images.at(tagId);
120 std::vector<cv::Point2f> tagPoints{kTag36h11MarkPts};
121 std::vector<cv::Point2f> tagImageCorners{GetImageCorners(tagImage.size())};
122 std::vector<cv::Point2f> dstPointMat = dstPoints;
123 cv::Rect boundingRect = cv::boundingRect(dstPointMat);
124 cv::Mat perspecTrf = cv::getPerspectiveTransform(tagPoints, dstPointMat);
125 std::vector<cv::Point2f> extremeCorners{};
126 cv::perspectiveTransform(tagImageCorners, extremeCorners, perspecTrf);
127 boundingRect = cv::boundingRect(extremeCorners);
128
129 double warpedContourArea = cv::contourArea(extremeCorners);
130 double warpedTagUpscale =
131 std::sqrt(warpedContourArea) / std::sqrt(tagImage.size().area());
132 int warpStrat = cv::INTER_NEAREST;
133
134 int supersampling = 6;
135 supersampling = static_cast<int>(std::ceil(supersampling / warpedTagUpscale));
136 supersampling = std::max(std::min(supersampling, 10), 1);
137
138 cv::Mat scaledTagImage{};
139 if (warpedTagUpscale > 2.0) {
140 warpStrat = cv::INTER_LINEAR;
141 int scaleFactor = static_cast<int>(warpedTagUpscale / 3.0) + 2;
142 scaleFactor = std::max(std::min(scaleFactor, 40), 1);
143 scaleFactor *= supersampling;
144 cv::resize(tagImage, scaledTagImage, cv::Size{}, scaleFactor, scaleFactor,
145 cv::INTER_NEAREST);
146 tagPoints = Get36h11MarkerPts(scaleFactor);
147 } else {
148 scaledTagImage = tagImage;
149 }
150
151 boundingRect.x -= 1;
152 boundingRect.y -= 1;
153 boundingRect.width += 2;
154 boundingRect.height += 2;
155 if (boundingRect.x < 0) {
156 boundingRect.width += boundingRect.x;
157 boundingRect.x = 0;
158 }
159 if (boundingRect.y < 0) {
160 boundingRect.height += boundingRect.y;
161 boundingRect.y = 0;
162 }
163 boundingRect.width =
164 std::min(destination.size().width - boundingRect.x, boundingRect.width);
165 boundingRect.height =
166 std::min(destination.size().height - boundingRect.y, boundingRect.height);
167 if (boundingRect.width <= 0 || boundingRect.height <= 0) {
168 return;
169 }
170
171 std::vector<cv::Point2f> scaledDstPts{};
172 if (supersampling > 1) {
173 cv::multiply(dstPointMat,
174 cv::Scalar{static_cast<double>(supersampling),
175 static_cast<double>(supersampling)},
176 scaledDstPts);
177 boundingRect.x *= supersampling;
178 boundingRect.y *= supersampling;
179 boundingRect.width *= supersampling;
180 boundingRect.height *= supersampling;
181 } else {
182 scaledDstPts = dstPointMat;
183 }
184
185 cv::subtract(scaledDstPts,
186 cv::Scalar{static_cast<double>(boundingRect.tl().x),
187 static_cast<double>(boundingRect.tl().y)},
188 scaledDstPts);
189 perspecTrf = cv::getPerspectiveTransform(tagPoints, scaledDstPts);
190
191 cv::Mat tempRoi{};
192 cv::warpPerspective(scaledTagImage, tempRoi, perspecTrf, boundingRect.size(),
193 warpStrat);
194
195 if (supersampling > 1) {
196 boundingRect.x /= supersampling;
197 boundingRect.y /= supersampling;
198 boundingRect.width /= supersampling;
199 boundingRect.height /= supersampling;
200 cv::resize(tempRoi, tempRoi, boundingRect.size(), 0, 0, cv::INTER_AREA);
201 }
202
203 cv::Mat tempMask{cv::Mat::zeros(tempRoi.size(), CV_8UC1)};
204 cv::subtract(extremeCorners,
205 cv::Scalar{static_cast<float>(boundingRect.tl().x),
206 static_cast<float>(boundingRect.tl().y)},
207 extremeCorners);
208 cv::Point2f tempCenter{};
209 tempCenter.x =
210 std::accumulate(extremeCorners.begin(), extremeCorners.end(), 0.0,
211 [extremeCorners](float acc, const cv::Point2f& p2) {
212 return acc + p2.x / extremeCorners.size();
213 });
214 tempCenter.y =
215 std::accumulate(extremeCorners.begin(), extremeCorners.end(), 0.0,
216 [extremeCorners](float acc, const cv::Point2f& p2) {
217 return acc + p2.y / extremeCorners.size();
218 });
219
220 for (auto& corner : extremeCorners) {
221 float xDiff = corner.x - tempCenter.x;
222 float yDiff = corner.y - tempCenter.y;
223 xDiff += 1 * mathutil::sgn(xDiff);
224 yDiff += 1 * mathutil::sgn(yDiff);
225 corner = cv::Point2f{tempCenter.x + xDiff, tempCenter.y + yDiff};
226 }
227
228 std::vector<cv::Point> extremeCornerInt{extremeCorners.begin(),
229 extremeCorners.end()};
230 cv::fillConvexPoly(tempMask, extremeCornerInt, cv::Scalar{255});
231
232 cv::copyTo(tempRoi, destination(boundingRect), tempMask);
233}
234
235static double GetScaledThickness(double thickness480p,
236 const cv::Mat& destinationMat) {
237 double scaleX = destinationMat.size().width / 640.0;
238 double scaleY = destinationMat.size().height / 480.0;
239 double minScale = std::min(scaleX, scaleY);
240 return std::max(thickness480p * minScale, 1.0);
241}
242
243[[maybe_unused]] static void DrawInscribedEllipse(
244 const std::vector<cv::Point2f>& dstPoints, const cv::Scalar& color,
245 cv::Mat& destination) {
246 cv::RotatedRect rect = OpenCVHelp::GetMinAreaRect(dstPoints);
247 cv::ellipse(destination, rect, color, -1, cv::LINE_AA);
248}
249
250static void DrawPoly(const std::vector<cv::Point2f>& dstPoints, int thickness,
251 const cv::Scalar& color, bool isClosed,
252 cv::Mat& destination) {
253 std::vector<cv::Point> intDstPoints{dstPoints.begin(), dstPoints.end()};
254 std::vector<std::vector<cv::Point>> listOfListOfPoints;
255 listOfListOfPoints.emplace_back(intDstPoints);
256 if (thickness > 0) {
257 cv::polylines(destination, listOfListOfPoints, isClosed, color, thickness,
258 cv::LINE_AA);
259 } else {
260 cv::fillPoly(destination, listOfListOfPoints, color, cv::LINE_AA);
261 }
262}
263
264[[maybe_unused]] static void DrawTagDetection(
265 int id, const std::vector<cv::Point2f>& dstPoints, cv::Mat& destination) {
266 double thickness = GetScaledThickness(1, destination);
267 DrawPoly(dstPoints, static_cast<int>(thickness), cv::Scalar{0, 0, 255}, true,
268 destination);
269 cv::Rect2d rect{cv::boundingRect(dstPoints)};
270 cv::Point2d textPt{rect.x + rect.width, rect.y};
271 textPt.x += thickness;
272 textPt.y += thickness;
273 cv::putText(destination, std::to_string(id), textPt, cv::FONT_HERSHEY_PLAIN,
274 1.5 * thickness, cv::Scalar{0, 200, 0},
275 static_cast<int>(thickness), cv::LINE_AA);
276}
277
278static std::vector<std::vector<frc::Translation3d>> GetFieldWallLines() {
279 std::vector<std::vector<frc::Translation3d>> list;
280
281 const units::meter_t sideHt = 19.5_in;
282 const units::meter_t driveHt = 35_in;
283 const units::meter_t topHt = 78_in;
284
285 // field floor
286 list.emplace_back(std::vector<frc::Translation3d>{
287 frc::Translation3d{0_m, 0_m, 0_m},
288 frc::Translation3d{fieldLength, 0_m, 0_m},
289 frc::Translation3d{fieldLength, fieldWidth, 0_m},
290 frc::Translation3d{0_m, fieldWidth, 0_m},
291 frc::Translation3d{0_m, 0_m, 0_m}});
292
293 // right side wall
294 list.emplace_back(std::vector<frc::Translation3d>{
295 frc::Translation3d{0_m, 0_m, 0_m}, frc::Translation3d{0_m, 0_m, sideHt},
296 frc::Translation3d{fieldLength, 0_m, sideHt},
297 frc::Translation3d{fieldLength, 0_m, 0_m}});
298
299 // red driverstation
300 list.emplace_back(std::vector<frc::Translation3d>{
301 frc::Translation3d{fieldLength, 0_m, sideHt},
302 frc::Translation3d{fieldLength, 0_m, topHt},
303 frc::Translation3d{fieldLength, fieldWidth, topHt},
304 frc::Translation3d{fieldLength, fieldWidth, sideHt},
305 });
306 list.emplace_back(std::vector<frc::Translation3d>{
307 frc::Translation3d{fieldLength, 0_m, driveHt},
308 frc::Translation3d{fieldLength, fieldWidth, driveHt}});
309
310 // left side wall
311 list.emplace_back(std::vector<frc::Translation3d>{
312 frc::Translation3d{0_m, fieldWidth, 0_m},
313 frc::Translation3d{0_m, fieldWidth, sideHt},
314 frc::Translation3d{fieldLength, fieldWidth, sideHt},
315 frc::Translation3d{fieldLength, fieldWidth, 0_m}});
316
317 // blue driverstation
318 list.emplace_back(std::vector<frc::Translation3d>{
319 frc::Translation3d{0_m, 0_m, sideHt},
320 frc::Translation3d{0_m, 0_m, topHt},
321 frc::Translation3d{0_m, fieldWidth, topHt},
322 frc::Translation3d{0_m, fieldWidth, sideHt},
323 });
324 list.emplace_back(std::vector<frc::Translation3d>{
325 frc::Translation3d{0_m, 0_m, driveHt},
326 frc::Translation3d{0_m, fieldWidth, driveHt}});
327
328 return list;
329}
330
331static std::vector<std::vector<frc::Translation3d>> GetFieldFloorLines(
332 int subdivisions) {
333 std::vector<std::vector<frc::Translation3d>> list;
334 const units::meter_t subLength = fieldLength / subdivisions;
335 const units::meter_t subWidth = fieldWidth / subdivisions;
336
337 for (int i = 0; i < subdivisions; i++) {
338 list.emplace_back(std::vector<frc::Translation3d>{
339 frc::Translation3d{0_m, subWidth * (i + 1), 0_m},
340 frc::Translation3d{fieldLength, subWidth * (i + 1), 0_m}});
341 list.emplace_back(std::vector<frc::Translation3d>{
342 frc::Translation3d{subLength * (i + 1), 0_m, 0_m},
343 frc::Translation3d{subLength * (i + 1), fieldWidth, 0_m}});
344 }
345 return list;
346}
347
348static std::vector<std::vector<cv::Point2f>> PolyFrom3dLines(
349 const RotTrlTransform3d& camRt, const SimCameraProperties& prop,
350 const std::vector<frc::Translation3d>& trls, double resolution,
351 bool isClosed, cv::Mat& destination) {
352 resolution = std::hypot(destination.size().height, destination.size().width) *
353 resolution;
354 std::vector<frc::Translation3d> pts{trls};
355 if (isClosed) {
356 pts.emplace_back(pts[0]);
357 }
358 std::vector<std::vector<cv::Point2f>> polyPointList{};
359
360 for (size_t i = 0; i < pts.size() - 1; i++) {
361 frc::Translation3d pta = pts[i];
362 frc::Translation3d ptb = pts[i + 1];
363
364 std::pair<std::optional<double>, std::optional<double>> inter =
365 prop.GetVisibleLine(camRt, pta, ptb);
366 if (!inter.second) {
367 continue;
368 }
369
370 double inter1 = inter.first.value();
371 double inter2 = inter.second.value();
372 frc::Translation3d baseDelta = ptb - pta;
373 frc::Translation3d old_pta = pta;
374 if (inter1 > 0) {
375 pta = old_pta + baseDelta * inter1;
376 }
377 if (inter2 < 1) {
378 ptb = old_pta + baseDelta * inter2;
379 }
380 baseDelta = ptb - pta;
381
382 std::vector<cv::Point2f> poly = OpenCVHelp::ProjectPoints(
383 prop.GetIntrinsics(), prop.GetDistCoeffs(), camRt, {pta, ptb});
384 cv::Point2d pxa = poly[0];
385 cv::Point2d pxb = poly[1];
386
387 double pxDist = std::hypot(pxb.x - pxa.x, pxb.y - pxa.y);
388 int subdivisions = static_cast<int>(pxDist / resolution);
389 frc::Translation3d subDelta = baseDelta / (subdivisions + 1);
390 std::vector<frc::Translation3d> subPts{};
391 for (int j = 0; j < subdivisions; j++) {
392 subPts.emplace_back(pta + (subDelta * (j + 1)));
393 }
394 if (subPts.size() > 0) {
395 std::vector<cv::Point2f> toAdd = OpenCVHelp::ProjectPoints(
396 prop.GetIntrinsics(), prop.GetDistCoeffs(), camRt, subPts);
397 poly.insert(poly.begin() + 1, toAdd.begin(), toAdd.end());
398 }
399
400 polyPointList.emplace_back(poly);
401 }
402
403 return polyPointList;
404}
405
406[[maybe_unused]] static void DrawFieldWireFrame(
407 const RotTrlTransform3d& camRt, const SimCameraProperties& prop,
408 double resolution, double wallThickness, const cv::Scalar& wallColor,
409 int floorSubdivisions, double floorThickness, const cv::Scalar& floorColor,
410 cv::Mat& destination) {
411 for (const auto& trls : GetFieldFloorLines(floorSubdivisions)) {
412 auto polys =
413 PolyFrom3dLines(camRt, prop, trls, resolution, false, destination);
414 for (const auto& poly : polys) {
415 DrawPoly(poly,
416 static_cast<int>(
417 std::round(GetScaledThickness(floorThickness, destination))),
418 floorColor, false, destination);
419 }
420 }
421 for (const auto& trls : GetFieldWallLines()) {
422 auto polys =
423 PolyFrom3dLines(camRt, prop, trls, resolution, false, destination);
424 for (const auto& poly : polys) {
425 DrawPoly(poly,
426 static_cast<int>(
427 std::round(GetScaledThickness(wallThickness, destination))),
428 wallColor, false, destination);
429 }
430 }
431}
432} // namespace VideoSimUtil
433} // namespace photon
Definition RotTrlTransform3d.h:27
Definition SimCameraProperties.h:43
Eigen::Matrix< double, 8, 1 > GetDistCoeffs() const
Definition SimCameraProperties.h:87
int GetResHeight() const
Definition SimCameraProperties.h:77
int GetResWidth() const
Definition SimCameraProperties.h:75
std::pair< std::optional< double >, std::optional< double > > GetVisibleLine(const RotTrlTransform3d &camRt, const frc::Translation3d &a, const frc::Translation3d &b) const
Eigen::Matrix< double, 3, 3 > GetIntrinsics() const
Definition SimCameraProperties.h:85
units::hertz_t GetFPS() const
Definition SimCameraProperties.h:89
Definition VideoSimUtil.h:44
int sgn(T val)
Definition VideoSimUtil.h:46
static cv::RotatedRect GetMinAreaRect(const std::vector< cv::Point2f > &points)
Definition OpenCVHelp.h:56
static std::vector< cv::Point2f > ProjectPoints(const Eigen::Matrix< double, 3, 3 > &cameraMatrix, const Eigen::Matrix< double, 8, 1 > &distCoeffs, const RotTrlTransform3d &camRt, const std::vector< frc::Translation3d > &objectTranslations)
Definition OpenCVHelp.h:147
static std::vector< std::vector< frc::Translation3d > > GetFieldWallLines()
Definition VideoSimUtil.h:278
static std::vector< cv::Point2f > Get36h11MarkerPts()
Definition VideoSimUtil.h:99
static void UpdateVideoProp(cs::CvSource &video, const SimCameraProperties &prop)
Definition VideoSimUtil.h:107
static void DrawFieldWireFrame(const RotTrlTransform3d &camRt, const SimCameraProperties &prop, double resolution, double wallThickness, const cv::Scalar &wallColor, int floorSubdivisions, double floorThickness, const cv::Scalar &floorColor, cv::Mat &destination)
Definition VideoSimUtil.h:406
static constexpr units::meter_t fieldLength
Definition VideoSimUtil.h:55
static cv::Mat Get36h11TagImage(int id)
Definition VideoSimUtil.h:58
static void DrawTagDetection(int id, const std::vector< cv::Point2f > &dstPoints, cv::Mat &destination)
Definition VideoSimUtil.h:264
static std::vector< std::vector< cv::Point2f > > PolyFrom3dLines(const RotTrlTransform3d &camRt, const SimCameraProperties &prop, const std::vector< frc::Translation3d > &trls, double resolution, bool isClosed, cv::Mat &destination)
Definition VideoSimUtil.h:348
static constexpr units::meter_t fieldWidth
Definition VideoSimUtil.h:56
static std::unordered_map< int, cv::Mat > LoadAprilTagImages()
Definition VideoSimUtil.h:67
static void Warp165h5TagImage(int tagId, const std::vector< cv::Point2f > &dstPoints, bool antialiasing, cv::Mat &destination)
Definition VideoSimUtil.h:113
static std::vector< cv::Point2f > GetImageCorners(const cv::Size &size)
Definition VideoSimUtil.h:76
static void DrawPoly(const std::vector< cv::Point2f > &dstPoints, int thickness, const cv::Scalar &color, bool isClosed, cv::Mat &destination)
Definition VideoSimUtil.h:250
static double GetScaledThickness(double thickness480p, const cv::Mat &destinationMat)
Definition VideoSimUtil.h:235
static const std::vector< cv::Point2f > kTag36h11MarkPts
Definition VideoSimUtil.h:105
static void DrawInscribedEllipse(const std::vector< cv::Point2f > &dstPoints, const cv::Scalar &color, cv::Mat &destination)
Definition VideoSimUtil.h:243
static constexpr int kNumTags36h11
Definition VideoSimUtil.h:53
static const std::unordered_map< int, cv::Mat > kTag36h11Images
Definition VideoSimUtil.h:103
static std::vector< std::vector< frc::Translation3d > > GetFieldFloorLines(int subdivisions)
Definition VideoSimUtil.h:331
Definition TargetModel.h:27