PhotonVision C++ dev-v2025.0.0-beta-8-2-gbd1c5c03
Loading...
Searching...
No Matches
VideoSimUtil.h
Go to the documentation of this file.
1/*
2 * MIT License
3 *
4 * Copyright (c) PhotonVision
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#pragma once
26
27#include <cscore_cv.h>
28
29#include <algorithm>
30#include <numeric>
31#include <string>
32#include <unordered_map>
33#include <utility>
34#include <vector>
35
36#include <frc/apriltag/AprilTag.h>
37#include <opencv2/core.hpp>
38#include <opencv2/imgcodecs.hpp>
39#include <opencv2/objdetect.hpp>
40#include <units/length.h>
41
42#include "SimCameraProperties.h"
44
45namespace mathutil {
46template <typename T>
47int sgn(T val) {
48 return (T(0) < val) - (val < T(0));
49}
50} // namespace mathutil
51
52namespace photon {
53namespace VideoSimUtil {
54static constexpr int kNumTags36h11 = 30;
55
56static constexpr units::meter_t fieldLength{16.54175_m};
57static constexpr units::meter_t fieldWidth{8.0137_m};
58
59static cv::Mat Get36h11TagImage(int id) {
60 wpi::RawFrame frame;
61 frc::AprilTag::Generate36h11AprilTagImage(&frame, id);
62 cv::Mat markerImage{frame.height, frame.width, CV_8UC1, frame.data,
63 static_cast<size_t>(frame.stride)};
64 cv::Mat markerClone = markerImage.clone();
65 return markerClone;
66}
67
68static std::unordered_map<int, cv::Mat> LoadAprilTagImages() {
69 std::unordered_map<int, cv::Mat> retVal{};
70 for (int i = 0; i < kNumTags36h11; i++) {
71 cv::Mat tagImage = Get36h11TagImage(i);
72 retVal[i] = tagImage;
73 }
74 return retVal;
75}
76
77static std::vector<cv::Point2f> GetImageCorners(const cv::Size& size) {
78 std::vector<cv::Point2f> retVal{};
79 retVal.emplace_back(cv::Point2f{-0.5f, -0.5f});
80 retVal.emplace_back(cv::Point2f{size.width - 0.5f, -0.5f});
81 retVal.emplace_back(cv::Point2f{size.width - 0.5f, size.height - 0.5f});
82 retVal.emplace_back(cv::Point2f{-0.5f, size.height - 0.5f});
83 return retVal;
84}
85
86static std::vector<cv::Point2f> Get36h11MarkerPts(int scale) {
87 cv::Rect2f roi36h11{cv::Point2f{1, 1}, cv::Point2f{8, 8}};
88 roi36h11.x *= scale;
89 roi36h11.y *= scale;
90 roi36h11.width *= scale;
91 roi36h11.height *= scale;
92 std::vector<cv::Point2f> pts = GetImageCorners(roi36h11.size());
93 for (size_t i = 0; i < pts.size(); i++) {
94 cv::Point2f pt = pts[i];
95 pts[i] = cv::Point2f{roi36h11.tl().x + pt.x, roi36h11.tl().y + pt.y};
96 }
97 return pts;
98}
99
100static std::vector<cv::Point2f> Get36h11MarkerPts() {
101 return Get36h11MarkerPts(1);
102}
103
104static const std::unordered_map<int, cv::Mat> kTag36h11Images =
106static const std::vector<cv::Point2f> kTag36h11MarkPts = Get36h11MarkerPts();
107
108[[maybe_unused]] static void UpdateVideoProp(cs::CvSource& video,
109 const SimCameraProperties& prop) {
110 video.SetResolution(prop.GetResWidth(), prop.GetResHeight());
111 video.SetFPS(prop.GetFPS().to<int>());
112}
113
114[[maybe_unused]] static void Warp165h5TagImage(
115 int tagId, const std::vector<cv::Point2f>& dstPoints, bool antialiasing,
116 cv::Mat& destination) {
117 if (!kTag36h11Images.contains(tagId)) {
118 return;
119 }
120 cv::Mat tagImage = kTag36h11Images.at(tagId);
121 std::vector<cv::Point2f> tagPoints{kTag36h11MarkPts};
122 std::vector<cv::Point2f> tagImageCorners{GetImageCorners(tagImage.size())};
123 std::vector<cv::Point2f> dstPointMat = dstPoints;
124 cv::Rect boundingRect = cv::boundingRect(dstPointMat);
125 cv::Mat perspecTrf = cv::getPerspectiveTransform(tagPoints, dstPointMat);
126 std::vector<cv::Point2f> extremeCorners{};
127 cv::perspectiveTransform(tagImageCorners, extremeCorners, perspecTrf);
128 boundingRect = cv::boundingRect(extremeCorners);
129
130 double warpedContourArea = cv::contourArea(extremeCorners);
131 double warpedTagUpscale =
132 std::sqrt(warpedContourArea) / std::sqrt(tagImage.size().area());
133 int warpStrat = cv::INTER_NEAREST;
134
135 int supersampling = 6;
136 supersampling = static_cast<int>(std::ceil(supersampling / warpedTagUpscale));
137 supersampling = std::max(std::min(supersampling, 10), 1);
138
139 cv::Mat scaledTagImage{};
140 if (warpedTagUpscale > 2.0) {
141 warpStrat = cv::INTER_LINEAR;
142 int scaleFactor = static_cast<int>(warpedTagUpscale / 3.0) + 2;
143 scaleFactor = std::max(std::min(scaleFactor, 40), 1);
144 scaleFactor *= supersampling;
145 cv::resize(tagImage, scaledTagImage, cv::Size{}, scaleFactor, scaleFactor,
146 cv::INTER_NEAREST);
147 tagPoints = Get36h11MarkerPts(scaleFactor);
148 } else {
149 scaledTagImage = tagImage;
150 }
151
152 boundingRect.x -= 1;
153 boundingRect.y -= 1;
154 boundingRect.width += 2;
155 boundingRect.height += 2;
156 if (boundingRect.x < 0) {
157 boundingRect.width += boundingRect.x;
158 boundingRect.x = 0;
159 }
160 if (boundingRect.y < 0) {
161 boundingRect.height += boundingRect.y;
162 boundingRect.y = 0;
163 }
164 boundingRect.width =
165 std::min(destination.size().width - boundingRect.x, boundingRect.width);
166 boundingRect.height =
167 std::min(destination.size().height - boundingRect.y, boundingRect.height);
168 if (boundingRect.width <= 0 || boundingRect.height <= 0) {
169 return;
170 }
171
172 std::vector<cv::Point2f> scaledDstPts{};
173 if (supersampling > 1) {
174 cv::multiply(dstPointMat,
175 cv::Scalar{static_cast<double>(supersampling),
176 static_cast<double>(supersampling)},
177 scaledDstPts);
178 boundingRect.x *= supersampling;
179 boundingRect.y *= supersampling;
180 boundingRect.width *= supersampling;
181 boundingRect.height *= supersampling;
182 } else {
183 scaledDstPts = dstPointMat;
184 }
185
186 cv::subtract(scaledDstPts,
187 cv::Scalar{static_cast<double>(boundingRect.tl().x),
188 static_cast<double>(boundingRect.tl().y)},
189 scaledDstPts);
190 perspecTrf = cv::getPerspectiveTransform(tagPoints, scaledDstPts);
191
192 cv::Mat tempRoi{};
193 cv::warpPerspective(scaledTagImage, tempRoi, perspecTrf, boundingRect.size(),
194 warpStrat);
195
196 if (supersampling > 1) {
197 boundingRect.x /= supersampling;
198 boundingRect.y /= supersampling;
199 boundingRect.width /= supersampling;
200 boundingRect.height /= supersampling;
201 cv::resize(tempRoi, tempRoi, boundingRect.size(), 0, 0, cv::INTER_AREA);
202 }
203
204 cv::Mat tempMask{cv::Mat::zeros(tempRoi.size(), CV_8UC1)};
205 cv::subtract(extremeCorners,
206 cv::Scalar{static_cast<float>(boundingRect.tl().x),
207 static_cast<float>(boundingRect.tl().y)},
208 extremeCorners);
209 cv::Point2f tempCenter{};
210 tempCenter.x =
211 std::accumulate(extremeCorners.begin(), extremeCorners.end(), 0.0,
212 [extremeCorners](float acc, const cv::Point2f& p2) {
213 return acc + p2.x / extremeCorners.size();
214 });
215 tempCenter.y =
216 std::accumulate(extremeCorners.begin(), extremeCorners.end(), 0.0,
217 [extremeCorners](float acc, const cv::Point2f& p2) {
218 return acc + p2.y / extremeCorners.size();
219 });
220
221 for (auto& corner : extremeCorners) {
222 float xDiff = corner.x - tempCenter.x;
223 float yDiff = corner.y - tempCenter.y;
224 xDiff += 1 * mathutil::sgn(xDiff);
225 yDiff += 1 * mathutil::sgn(yDiff);
226 corner = cv::Point2f{tempCenter.x + xDiff, tempCenter.y + yDiff};
227 }
228
229 std::vector<cv::Point> extremeCornerInt{extremeCorners.begin(),
230 extremeCorners.end()};
231 cv::fillConvexPoly(tempMask, extremeCornerInt, cv::Scalar{255});
232
233 cv::copyTo(tempRoi, destination(boundingRect), tempMask);
234}
235
236static double GetScaledThickness(double thickness480p,
237 const cv::Mat& destinationMat) {
238 double scaleX = destinationMat.size().width / 640.0;
239 double scaleY = destinationMat.size().height / 480.0;
240 double minScale = std::min(scaleX, scaleY);
241 return std::max(thickness480p * minScale, 1.0);
242}
243
244[[maybe_unused]] static void DrawInscribedEllipse(
245 const std::vector<cv::Point2f>& dstPoints, const cv::Scalar& color,
246 cv::Mat& destination) {
247 cv::RotatedRect rect = OpenCVHelp::GetMinAreaRect(dstPoints);
248 cv::ellipse(destination, rect, color, -1, cv::LINE_AA);
249}
250
251static void DrawPoly(const std::vector<cv::Point2f>& dstPoints, int thickness,
252 const cv::Scalar& color, bool isClosed,
253 cv::Mat& destination) {
254 std::vector<cv::Point> intDstPoints{dstPoints.begin(), dstPoints.end()};
255 std::vector<std::vector<cv::Point>> listOfListOfPoints;
256 listOfListOfPoints.emplace_back(intDstPoints);
257 if (thickness > 0) {
258 cv::polylines(destination, listOfListOfPoints, isClosed, color, thickness,
259 cv::LINE_AA);
260 } else {
261 cv::fillPoly(destination, listOfListOfPoints, color, cv::LINE_AA);
262 }
263}
264
265[[maybe_unused]] static void DrawTagDetection(
266 int id, const std::vector<cv::Point2f>& dstPoints, cv::Mat& destination) {
267 double thickness = GetScaledThickness(1, destination);
268 DrawPoly(dstPoints, static_cast<int>(thickness), cv::Scalar{0, 0, 255}, true,
269 destination);
270 cv::Rect2d rect{cv::boundingRect(dstPoints)};
271 cv::Point2d textPt{rect.x + rect.width, rect.y};
272 textPt.x += thickness;
273 textPt.y += thickness;
274 cv::putText(destination, std::to_string(id), textPt, cv::FONT_HERSHEY_PLAIN,
275 1.5 * thickness, cv::Scalar{0, 200, 0},
276 static_cast<int>(thickness), cv::LINE_AA);
277}
278
279static std::vector<std::vector<frc::Translation3d>> GetFieldWallLines() {
280 std::vector<std::vector<frc::Translation3d>> list;
281
282 const units::meter_t sideHt = 19.5_in;
283 const units::meter_t driveHt = 35_in;
284 const units::meter_t topHt = 78_in;
285
286 // field floor
287 list.emplace_back(std::vector<frc::Translation3d>{
288 frc::Translation3d{0_m, 0_m, 0_m},
289 frc::Translation3d{fieldLength, 0_m, 0_m},
290 frc::Translation3d{fieldLength, fieldWidth, 0_m},
291 frc::Translation3d{0_m, fieldWidth, 0_m},
292 frc::Translation3d{0_m, 0_m, 0_m}});
293
294 // right side wall
295 list.emplace_back(std::vector<frc::Translation3d>{
296 frc::Translation3d{0_m, 0_m, 0_m}, frc::Translation3d{0_m, 0_m, sideHt},
297 frc::Translation3d{fieldLength, 0_m, sideHt},
298 frc::Translation3d{fieldLength, 0_m, 0_m}});
299
300 // red driverstation
301 list.emplace_back(std::vector<frc::Translation3d>{
302 frc::Translation3d{fieldLength, 0_m, sideHt},
303 frc::Translation3d{fieldLength, 0_m, topHt},
304 frc::Translation3d{fieldLength, fieldWidth, topHt},
305 frc::Translation3d{fieldLength, fieldWidth, sideHt},
306 });
307 list.emplace_back(std::vector<frc::Translation3d>{
308 frc::Translation3d{fieldLength, 0_m, driveHt},
309 frc::Translation3d{fieldLength, fieldWidth, driveHt}});
310
311 // left side wall
312 list.emplace_back(std::vector<frc::Translation3d>{
313 frc::Translation3d{0_m, fieldWidth, 0_m},
314 frc::Translation3d{0_m, fieldWidth, sideHt},
315 frc::Translation3d{fieldLength, fieldWidth, sideHt},
316 frc::Translation3d{fieldLength, fieldWidth, 0_m}});
317
318 // blue driverstation
319 list.emplace_back(std::vector<frc::Translation3d>{
320 frc::Translation3d{0_m, 0_m, sideHt},
321 frc::Translation3d{0_m, 0_m, topHt},
322 frc::Translation3d{0_m, fieldWidth, topHt},
323 frc::Translation3d{0_m, fieldWidth, sideHt},
324 });
325 list.emplace_back(std::vector<frc::Translation3d>{
326 frc::Translation3d{0_m, 0_m, driveHt},
327 frc::Translation3d{0_m, fieldWidth, driveHt}});
328
329 return list;
330}
331
332static std::vector<std::vector<frc::Translation3d>> GetFieldFloorLines(
333 int subdivisions) {
334 std::vector<std::vector<frc::Translation3d>> list;
335 const units::meter_t subLength = fieldLength / subdivisions;
336 const units::meter_t subWidth = fieldWidth / subdivisions;
337
338 for (int i = 0; i < subdivisions; i++) {
339 list.emplace_back(std::vector<frc::Translation3d>{
340 frc::Translation3d{0_m, subWidth * (i + 1), 0_m},
341 frc::Translation3d{fieldLength, subWidth * (i + 1), 0_m}});
342 list.emplace_back(std::vector<frc::Translation3d>{
343 frc::Translation3d{subLength * (i + 1), 0_m, 0_m},
344 frc::Translation3d{subLength * (i + 1), fieldWidth, 0_m}});
345 }
346 return list;
347}
348
349static std::vector<std::vector<cv::Point2f>> PolyFrom3dLines(
350 const RotTrlTransform3d& camRt, const SimCameraProperties& prop,
351 const std::vector<frc::Translation3d>& trls, double resolution,
352 bool isClosed, cv::Mat& destination) {
353 resolution = std::hypot(destination.size().height, destination.size().width) *
354 resolution;
355 std::vector<frc::Translation3d> pts{trls};
356 if (isClosed) {
357 pts.emplace_back(pts[0]);
358 }
359 std::vector<std::vector<cv::Point2f>> polyPointList{};
360
361 for (size_t i = 0; i < pts.size() - 1; i++) {
362 frc::Translation3d pta = pts[i];
363 frc::Translation3d ptb = pts[i + 1];
364
365 std::pair<std::optional<double>, std::optional<double>> inter =
366 prop.GetVisibleLine(camRt, pta, ptb);
367 if (!inter.second) {
368 continue;
369 }
370
371 double inter1 = inter.first.value();
372 double inter2 = inter.second.value();
373 frc::Translation3d baseDelta = ptb - pta;
374 frc::Translation3d old_pta = pta;
375 if (inter1 > 0) {
376 pta = old_pta + baseDelta * inter1;
377 }
378 if (inter2 < 1) {
379 ptb = old_pta + baseDelta * inter2;
380 }
381 baseDelta = ptb - pta;
382
383 std::vector<cv::Point2f> poly = OpenCVHelp::ProjectPoints(
384 prop.GetIntrinsics(), prop.GetDistCoeffs(), camRt, {pta, ptb});
385 cv::Point2d pxa = poly[0];
386 cv::Point2d pxb = poly[1];
387
388 double pxDist = std::hypot(pxb.x - pxa.x, pxb.y - pxa.y);
389 int subdivisions = static_cast<int>(pxDist / resolution);
390 frc::Translation3d subDelta = baseDelta / (subdivisions + 1);
391 std::vector<frc::Translation3d> subPts{};
392 for (int j = 0; j < subdivisions; j++) {
393 subPts.emplace_back(pta + (subDelta * (j + 1)));
394 }
395 if (subPts.size() > 0) {
396 std::vector<cv::Point2f> toAdd = OpenCVHelp::ProjectPoints(
397 prop.GetIntrinsics(), prop.GetDistCoeffs(), camRt, subPts);
398 poly.insert(poly.begin() + 1, toAdd.begin(), toAdd.end());
399 }
400
401 polyPointList.emplace_back(poly);
402 }
403
404 return polyPointList;
405}
406
407[[maybe_unused]] static void DrawFieldWireFrame(
408 const RotTrlTransform3d& camRt, const SimCameraProperties& prop,
409 double resolution, double wallThickness, const cv::Scalar& wallColor,
410 int floorSubdivisions, double floorThickness, const cv::Scalar& floorColor,
411 cv::Mat& destination) {
412 for (const auto& trls : GetFieldFloorLines(floorSubdivisions)) {
413 auto polys =
414 PolyFrom3dLines(camRt, prop, trls, resolution, false, destination);
415 for (const auto& poly : polys) {
416 DrawPoly(poly,
417 static_cast<int>(
418 std::round(GetScaledThickness(floorThickness, destination))),
419 floorColor, false, destination);
420 }
421 }
422 for (const auto& trls : GetFieldWallLines()) {
423 auto polys =
424 PolyFrom3dLines(camRt, prop, trls, resolution, false, destination);
425 for (const auto& poly : polys) {
426 DrawPoly(poly,
427 static_cast<int>(
428 std::round(GetScaledThickness(wallThickness, destination))),
429 wallColor, false, destination);
430 }
431 }
432}
433} // namespace VideoSimUtil
434} // namespace photon
Definition RotTrlTransform3d.h:27
Definition SimCameraProperties.h:44
Eigen::Matrix< double, 8, 1 > GetDistCoeffs() const
Definition SimCameraProperties.h:88
int GetResHeight() const
Definition SimCameraProperties.h:78
int GetResWidth() const
Definition SimCameraProperties.h:76
std::pair< std::optional< double >, std::optional< double > > GetVisibleLine(const RotTrlTransform3d &camRt, const frc::Translation3d &a, const frc::Translation3d &b) const
Eigen::Matrix< double, 3, 3 > GetIntrinsics() const
Definition SimCameraProperties.h:86
units::hertz_t GetFPS() const
Definition SimCameraProperties.h:90
Definition VideoSimUtil.h:45
int sgn(T val)
Definition VideoSimUtil.h:47
static cv::RotatedRect GetMinAreaRect(const std::vector< cv::Point2f > &points)
Definition OpenCVHelp.h:56
static std::vector< cv::Point2f > ProjectPoints(const Eigen::Matrix< double, 3, 3 > &cameraMatrix, const Eigen::Matrix< double, 8, 1 > &distCoeffs, const RotTrlTransform3d &camRt, const std::vector< frc::Translation3d > &objectTranslations)
Definition OpenCVHelp.h:147
static std::vector< std::vector< frc::Translation3d > > GetFieldWallLines()
Definition VideoSimUtil.h:279
static std::vector< cv::Point2f > Get36h11MarkerPts()
Definition VideoSimUtil.h:100
static void UpdateVideoProp(cs::CvSource &video, const SimCameraProperties &prop)
Definition VideoSimUtil.h:108
static void DrawFieldWireFrame(const RotTrlTransform3d &camRt, const SimCameraProperties &prop, double resolution, double wallThickness, const cv::Scalar &wallColor, int floorSubdivisions, double floorThickness, const cv::Scalar &floorColor, cv::Mat &destination)
Definition VideoSimUtil.h:407
static constexpr units::meter_t fieldLength
Definition VideoSimUtil.h:56
static cv::Mat Get36h11TagImage(int id)
Definition VideoSimUtil.h:59
static void DrawTagDetection(int id, const std::vector< cv::Point2f > &dstPoints, cv::Mat &destination)
Definition VideoSimUtil.h:265
static std::vector< std::vector< cv::Point2f > > PolyFrom3dLines(const RotTrlTransform3d &camRt, const SimCameraProperties &prop, const std::vector< frc::Translation3d > &trls, double resolution, bool isClosed, cv::Mat &destination)
Definition VideoSimUtil.h:349
static constexpr units::meter_t fieldWidth
Definition VideoSimUtil.h:57
static std::unordered_map< int, cv::Mat > LoadAprilTagImages()
Definition VideoSimUtil.h:68
static void Warp165h5TagImage(int tagId, const std::vector< cv::Point2f > &dstPoints, bool antialiasing, cv::Mat &destination)
Definition VideoSimUtil.h:114
static std::vector< cv::Point2f > GetImageCorners(const cv::Size &size)
Definition VideoSimUtil.h:77
static void DrawPoly(const std::vector< cv::Point2f > &dstPoints, int thickness, const cv::Scalar &color, bool isClosed, cv::Mat &destination)
Definition VideoSimUtil.h:251
static double GetScaledThickness(double thickness480p, const cv::Mat &destinationMat)
Definition VideoSimUtil.h:236
static const std::vector< cv::Point2f > kTag36h11MarkPts
Definition VideoSimUtil.h:106
static void DrawInscribedEllipse(const std::vector< cv::Point2f > &dstPoints, const cv::Scalar &color, cv::Mat &destination)
Definition VideoSimUtil.h:244
static constexpr int kNumTags36h11
Definition VideoSimUtil.h:54
static const std::unordered_map< int, cv::Mat > kTag36h11Images
Definition VideoSimUtil.h:104
static std::vector< std::vector< frc::Translation3d > > GetFieldFloorLines(int subdivisions)
Definition VideoSimUtil.h:332
Definition VisionEstimation.h:32