30#include <unordered_map>
35#include <frc/apriltag/AprilTag.h>
36#include <opencv2/core.hpp>
37#include <opencv2/imgcodecs.hpp>
38#include <opencv2/objdetect.hpp>
39#include <units/length.h>
47 return (T(0) < val) - (val < T(0));
52namespace VideoSimUtil {
62 frc::AprilTag::Generate36h11AprilTagImage(&frame,
id);
63 cv::Mat markerImage{frame.height, frame.width, CV_8UC1, frame.data,
64 static_cast<size_t>(frame.stride)};
65 cv::Mat markerClone = markerImage.clone();
70 std::unordered_map<int, cv::Mat> retVal{};
89 std::vector<cv::Point2f> retVal{};
90 retVal.emplace_back(cv::Point2f{-0.5f, size.height - 0.5f});
91 retVal.emplace_back(cv::Point2f{size.width - 0.5f, size.height - 0.5f});
92 retVal.emplace_back(cv::Point2f{size.width - 0.5f, -0.5f});
93 retVal.emplace_back(cv::Point2f{-0.5f, -0.5f});
104 cv::Rect2f roi36h11{cv::Point2f{1, 1}, cv::Point2f{8, 8}};
107 roi36h11.width *= scale;
108 roi36h11.height *= scale;
110 for (
size_t i = 0; i < pts.size(); i++) {
111 cv::Point2f pt = pts[i];
112 pts[i] = cv::Point2f{roi36h11.tl().x + pt.x, roi36h11.tl().y + pt.y};
135 video.SetFPS(prop.
GetFPS().to<
int>());
151 int tagId,
const std::vector<cv::Point2f>& dstPoints,
bool antialiasing,
152 cv::Mat& destination) {
158 std::vector<cv::Point2f> tagImageCorners{
GetImageCorners(tagImage.size())};
159 std::vector<cv::Point2f> dstPointMat = dstPoints;
160 cv::Rect boundingRect = cv::boundingRect(dstPointMat);
161 cv::Mat perspecTrf = cv::getPerspectiveTransform(tagPoints, dstPointMat);
162 std::vector<cv::Point2f> extremeCorners{};
163 cv::perspectiveTransform(tagImageCorners, extremeCorners, perspecTrf);
164 boundingRect = cv::boundingRect(extremeCorners);
166 double warpedContourArea = cv::contourArea(extremeCorners);
167 double warpedTagUpscale =
168 std::sqrt(warpedContourArea) / std::sqrt(tagImage.size().area());
169 int warpStrat = cv::INTER_NEAREST;
171 int supersampling = 6;
172 supersampling =
static_cast<int>(std::ceil(supersampling / warpedTagUpscale));
173 supersampling = std::max(std::min(supersampling, 10), 1);
175 cv::Mat scaledTagImage{};
176 if (warpedTagUpscale > 2.0) {
177 warpStrat = cv::INTER_LINEAR;
178 int scaleFactor =
static_cast<int>(warpedTagUpscale / 3.0) + 2;
179 scaleFactor = std::max(std::min(scaleFactor, 40), 1);
180 scaleFactor *= supersampling;
181 cv::resize(tagImage, scaledTagImage, cv::Size{}, scaleFactor, scaleFactor,
185 scaledTagImage = tagImage;
190 boundingRect.width += 2;
191 boundingRect.height += 2;
192 if (boundingRect.x < 0) {
193 boundingRect.width += boundingRect.x;
196 if (boundingRect.y < 0) {
197 boundingRect.height += boundingRect.y;
201 std::min(destination.size().width - boundingRect.x, boundingRect.width);
202 boundingRect.height =
203 std::min(destination.size().height - boundingRect.y, boundingRect.height);
204 if (boundingRect.width <= 0 || boundingRect.height <= 0) {
208 std::vector<cv::Point2f> scaledDstPts{};
209 if (supersampling > 1) {
210 cv::multiply(dstPointMat,
211 cv::Scalar{
static_cast<double>(supersampling),
212 static_cast<double>(supersampling)},
214 boundingRect.x *= supersampling;
215 boundingRect.y *= supersampling;
216 boundingRect.width *= supersampling;
217 boundingRect.height *= supersampling;
219 scaledDstPts = dstPointMat;
222 cv::subtract(scaledDstPts,
223 cv::Scalar{
static_cast<double>(boundingRect.tl().x),
224 static_cast<double>(boundingRect.tl().y)},
226 perspecTrf = cv::getPerspectiveTransform(tagPoints, scaledDstPts);
229 cv::warpPerspective(scaledTagImage, tempRoi, perspecTrf, boundingRect.size(),
232 if (supersampling > 1) {
233 boundingRect.x /= supersampling;
234 boundingRect.y /= supersampling;
235 boundingRect.width /= supersampling;
236 boundingRect.height /= supersampling;
237 cv::resize(tempRoi, tempRoi, boundingRect.size(), 0, 0, cv::INTER_AREA);
240 cv::Mat tempMask{cv::Mat::zeros(tempRoi.size(), CV_8UC1)};
241 cv::subtract(extremeCorners,
242 cv::Scalar{
static_cast<float>(boundingRect.tl().x),
243 static_cast<float>(boundingRect.tl().y)},
245 cv::Point2f tempCenter{};
247 std::accumulate(extremeCorners.begin(), extremeCorners.end(), 0.0,
248 [extremeCorners](
float acc,
const cv::Point2f& p2) {
249 return acc + p2.x / extremeCorners.size();
252 std::accumulate(extremeCorners.begin(), extremeCorners.end(), 0.0,
253 [extremeCorners](
float acc,
const cv::Point2f& p2) {
254 return acc + p2.y / extremeCorners.size();
257 for (
auto& corner : extremeCorners) {
258 float xDiff = corner.x - tempCenter.x;
259 float yDiff = corner.y - tempCenter.y;
262 corner = cv::Point2f{tempCenter.x + xDiff, tempCenter.y + yDiff};
265 std::vector<cv::Point> extremeCornerInt{extremeCorners.begin(),
266 extremeCorners.end()};
267 cv::fillConvexPoly(tempMask, extremeCornerInt, cv::Scalar{255});
269 cv::copyTo(tempRoi, destination(boundingRect), tempMask);
281 const cv::Mat& destination) {
282 double scaleX = destination.size().width / 640.0;
283 double scaleY = destination.size().height / 480.0;
284 double minScale = std::min(scaleX, scaleY);
285 return std::max(thickness480p * minScale, 1.0);
299 const std::vector<cv::Point2f>& dstPoints,
const cv::Scalar& color,
300 cv::Mat& destination) {
302 cv::ellipse(destination, rect, color, -1, cv::LINE_AA);
305static void DrawPoly(
const std::vector<cv::Point2f>& dstPoints,
int thickness,
306 const cv::Scalar& color,
bool isClosed,
307 cv::Mat& destination) {
308 std::vector<cv::Point> intDstPoints{dstPoints.begin(), dstPoints.end()};
309 std::vector<std::vector<cv::Point>> listOfListOfPoints;
310 listOfListOfPoints.emplace_back(intDstPoints);
312 cv::polylines(destination, listOfListOfPoints, isClosed, color, thickness,
315 cv::fillPoly(destination, listOfListOfPoints, color, cv::LINE_AA);
330 int id,
const std::vector<cv::Point2f>& dstPoints, cv::Mat& destination) {
332 DrawPoly(dstPoints,
static_cast<int>(thickness), cv::Scalar{0, 0, 255},
true,
334 cv::Rect2d rect{cv::boundingRect(dstPoints)};
335 cv::Point2d textPt{rect.x + rect.width, rect.y};
336 textPt.x += thickness;
337 textPt.y += thickness;
338 cv::putText(destination, std::to_string(
id), textPt, cv::FONT_HERSHEY_PLAIN,
339 1.5 * thickness, cv::Scalar{0, 200, 0},
340 static_cast<int>(thickness), cv::LINE_AA);
348 std::vector<std::vector<frc::Translation3d>> list;
350 const units::meter_t sideHt = 19.5_in;
351 const units::meter_t driveHt = 35_in;
352 const units::meter_t topHt = 78_in;
355 list.emplace_back(std::vector<frc::Translation3d>{
356 frc::Translation3d{0_m, 0_m, 0_m},
360 frc::Translation3d{0_m, 0_m, 0_m}});
363 list.emplace_back(std::vector<frc::Translation3d>{
364 frc::Translation3d{0_m, 0_m, 0_m}, frc::Translation3d{0_m, 0_m, sideHt},
369 list.emplace_back(std::vector<frc::Translation3d>{
375 list.emplace_back(std::vector<frc::Translation3d>{
380 list.emplace_back(std::vector<frc::Translation3d>{
387 list.emplace_back(std::vector<frc::Translation3d>{
388 frc::Translation3d{0_m, 0_m, sideHt},
389 frc::Translation3d{0_m, 0_m, topHt},
393 list.emplace_back(std::vector<frc::Translation3d>{
394 frc::Translation3d{0_m, 0_m, driveHt},
395 frc::Translation3d{0_m,
fieldWidth, driveHt}});
411 std::vector<std::vector<frc::Translation3d>> list;
412 const units::meter_t subLength =
fieldLength / subdivisions;
413 const units::meter_t subWidth =
fieldWidth / subdivisions;
415 for (
int i = 0; i < subdivisions; i++) {
416 list.emplace_back(std::vector<frc::Translation3d>{
417 frc::Translation3d{0_m, subWidth * (i + 1), 0_m},
418 frc::Translation3d{
fieldLength, subWidth * (i + 1), 0_m}});
419 list.emplace_back(std::vector<frc::Translation3d>{
420 frc::Translation3d{subLength * (i + 1), 0_m, 0_m},
421 frc::Translation3d{subLength * (i + 1),
fieldWidth, 0_m}});
445 const std::vector<frc::Translation3d>& trls,
double resolution,
446 bool isClosed, cv::Mat& destination) {
447 resolution = std::hypot(destination.size().height, destination.size().width) *
449 std::vector<frc::Translation3d> pts{trls};
451 pts.emplace_back(pts[0]);
453 std::vector<std::vector<cv::Point2f>> polyPointList{};
455 for (
size_t i = 0; i < pts.size() - 1; i++) {
456 frc::Translation3d pta = pts[i];
457 frc::Translation3d ptb = pts[i + 1];
459 std::pair<std::optional<double>, std::optional<double>> inter =
465 double inter1 = inter.first.value();
466 double inter2 = inter.second.value();
467 frc::Translation3d baseDelta = ptb - pta;
468 frc::Translation3d old_pta = pta;
470 pta = old_pta + baseDelta * inter1;
473 ptb = old_pta + baseDelta * inter2;
475 baseDelta = ptb - pta;
479 cv::Point2d pxa = poly[0];
480 cv::Point2d pxb = poly[1];
482 double pxDist = std::hypot(pxb.x - pxa.x, pxb.y - pxa.y);
483 int subdivisions =
static_cast<int>(pxDist / resolution);
484 frc::Translation3d subDelta = baseDelta / (subdivisions + 1);
485 std::vector<frc::Translation3d> subPts{};
486 for (
int j = 0; j < subdivisions; j++) {
487 subPts.emplace_back(pta + (subDelta * (j + 1)));
489 if (subPts.size() > 0) {
492 poly.insert(poly.begin() + 1, toAdd.begin(), toAdd.end());
495 polyPointList.emplace_back(poly);
498 return polyPointList;
522 double resolution,
double wallThickness,
const cv::Scalar& wallColor,
523 int floorSubdivisions,
double floorThickness,
const cv::Scalar& floorColor,
524 cv::Mat& destination) {
528 for (
const auto& poly : polys) {
532 floorColor,
false, destination);
538 for (
const auto& poly : polys) {
542 wallColor,
false, destination);
Calibration and performance values for this camera.
Definition SimCameraProperties.h:59
Eigen::Matrix< double, 8, 1 > GetDistCoeffs() const
Returns the camera calibration's distortion coefficients, in OPENCV8 form.
Definition SimCameraProperties.h:158
int GetResHeight() const
Gets the height of the simulated camera image.
Definition SimCameraProperties.h:137
int GetResWidth() const
Gets the width of the simulated camera image.
Definition SimCameraProperties.h:130
std::pair< std::optional< double >, std::optional< double > > GetVisibleLine(const RotTrlTransform3d &camRt, const frc::Translation3d &a, const frc::Translation3d &b) const
Determines where the line segment defined by the two given translations intersects the camera's frust...
Eigen::Matrix< double, 3, 3 > GetIntrinsics() const
Definition SimCameraProperties.h:150
units::hertz_t GetFPS() const
Gets the FPS of the simulated camera.
Definition SimCameraProperties.h:165
Definition VideoSimUtil.h:44
int sgn(T val)
Definition VideoSimUtil.h:46
static cv::RotatedRect GetMinAreaRect(const std::vector< cv::Point2f > &points)
Definition OpenCVHelp.h:56
static std::vector< cv::Point2f > ProjectPoints(const Eigen::Matrix< double, 3, 3 > &cameraMatrix, const Eigen::Matrix< double, 8, 1 > &distCoeffs, const RotTrlTransform3d &camRt, const std::vector< frc::Translation3d > &objectTranslations)
Definition OpenCVHelp.h:147
static std::vector< std::vector< frc::Translation3d > > GetFieldWallLines()
The translations used to draw the field side walls and driver station walls.
Definition VideoSimUtil.h:347
static std::vector< cv::Point2f > Get36h11MarkerPts()
Gets the points representing the marker(black square) corners.
Definition VideoSimUtil.h:122
static void UpdateVideoProp(cs::CvSource &video, const SimCameraProperties &prop)
Updates the properties of this cs::CvSource video stream with the given camera properties.
Definition VideoSimUtil.h:132
static void DrawFieldWireFrame(const RotTrlTransform3d &camRt, const SimCameraProperties &prop, double resolution, double wallThickness, const cv::Scalar &wallColor, int floorSubdivisions, double floorThickness, const cv::Scalar &floorColor, cv::Mat &destination)
Draw a wireframe of the field to the given image.
Definition VideoSimUtil.h:520
static constexpr units::meter_t fieldLength
Definition VideoSimUtil.h:57
static cv::Mat Get36h11TagImage(int id)
Definition VideoSimUtil.h:60
static void DrawTagDetection(int id, const std::vector< cv::Point2f > &dstPoints, cv::Mat &destination)
Draws a contour around the given points and text of the id onto the destination image.
Definition VideoSimUtil.h:329
static std::vector< std::vector< cv::Point2f > > PolyFrom3dLines(const RotTrlTransform3d &camRt, const SimCameraProperties &prop, const std::vector< frc::Translation3d > &trls, double resolution, bool isClosed, cv::Mat &destination)
Convert 3D lines represented by the given series of translations into a polygon(s) in the camera's im...
Definition VideoSimUtil.h:443
static constexpr units::meter_t fieldWidth
Definition VideoSimUtil.h:58
static std::unordered_map< int, cv::Mat > LoadAprilTagImages()
Definition VideoSimUtil.h:69
static void Warp165h5TagImage(int tagId, const std::vector< cv::Point2f > &dstPoints, bool antialiasing, cv::Mat &destination)
Warps the image of a specific 36h11 AprilTag onto the destination image at the given points.
Definition VideoSimUtil.h:150
static std::vector< cv::Point2f > GetImageCorners(const cv::Size &size)
Gets the points representing the corners of this image.
Definition VideoSimUtil.h:88
static double GetScaledThickness(double thickness480p, const cv::Mat &destination)
Given a line thickness in a 640x480 image, try to scale to the given destination image resolution.
Definition VideoSimUtil.h:280
static void DrawPoly(const std::vector< cv::Point2f > &dstPoints, int thickness, const cv::Scalar &color, bool isClosed, cv::Mat &destination)
Definition VideoSimUtil.h:305
static const std::vector< cv::Point2f > kTag36h11MarkPts
Definition VideoSimUtil.h:128
static void DrawInscribedEllipse(const std::vector< cv::Point2f > &dstPoints, const cv::Scalar &color, cv::Mat &destination)
Draw a filled ellipse in the destination image.
Definition VideoSimUtil.h:298
static constexpr int kNumTags36h11
Definition VideoSimUtil.h:55
static const std::unordered_map< int, cv::Mat > kTag36h11Images
Definition VideoSimUtil.h:126
static std::vector< std::vector< frc::Translation3d > > GetFieldFloorLines(int subdivisions)
The translations used to draw the field floor subdivisions (not the floor outline).
Definition VideoSimUtil.h:409
Definition TargetModel.h:27