Skip to content

Commit e6ac495

Browse files
authored
Merge pull request #809 from luxonis/update_depth_visualziation
Updated depth colorization logic
2 parents f5117cc + 42d9f27 commit e6ac495

9 files changed

+47
-29
lines changed

docs/source/samples/SpatialDetection/spatial_calculator_multi_roi.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ scanning camera for mobile robots.
1111
Demo
1212
####
1313

14-
.. image:: https://user-images.githubusercontent.com/18037362/190861621-b57fd1e3-5a3d-4d79-b1a7-d17a0b78c63e.gif
14+
.. image:: https://user-images.githubusercontent.com/18037362/231822498-6e3699a0-039e-424b-acb2-b246575e91ee.png
1515

1616
Setup
1717
#####

docs/source/tutorials/code_samples.rst

+1
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,7 @@ are presented with code.
126126

127127
.. rubric:: SpatialDetection
128128

129+
- :ref:`Spatial Calculator Multi-ROI` - Selects multiple ROIs and calculates spatial coordinates for each of them
129130
- :ref:`Spatial location calculator` - Demonstrates how to use the spatial location calculator
130131
- :ref:`RGB & MobilenetSSD with spatial data` - Displays RGB frames with MobileNet detections and spatial coordinates on them
131132
- :ref:`Mono & MobilenetSSD with spatial data` - Displays mono frames with MobileNet detections and spatial coordinates on them

examples/SpatialDetection/spatial_calculator_multi_roi.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import cv2
44
import depthai as dai
55
import math
6+
import numpy as np
67

78
# Create pipeline
89
pipeline = dai.Pipeline()
@@ -29,7 +30,7 @@
2930

3031
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
3132
stereo.setLeftRightCheck(True)
32-
stereo.setExtendedDisparity(True)
33+
stereo.setSubpixel(True)
3334
spatialLocationCalculator.inputConfig.setWaitForMessage(False)
3435

3536
# Create 10 ROIs
@@ -65,8 +66,10 @@
6566

6667
depthFrame = inDepth.getFrame() # depthFrame values are in millimeters
6768

68-
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
69-
depthFrameColor = cv2.equalizeHist(depthFrameColor)
69+
depth_downscaled = depthFrame[::4]
70+
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
71+
max_depth = np.percentile(depth_downscaled, 99)
72+
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
7073
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
7174

7275
spatialData = spatialCalcQueue.get().getSpatialLocations()

examples/SpatialDetection/spatial_location_calculator.py

+11-12
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import cv2
44
import depthai as dai
5-
5+
import numpy as np
66
stepSize = 0.05
77

88
newConfig = False
@@ -30,12 +30,9 @@
3030
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
3131
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
3232

33-
lrcheck = False
34-
subpixel = False
35-
3633
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
37-
stereo.setLeftRightCheck(lrcheck)
38-
stereo.setSubpixel(subpixel)
34+
stereo.setLeftRightCheck(True)
35+
stereo.setSubpixel(True)
3936

4037
# Config
4138
topLeft = dai.Point2f(0.4, 0.4)
@@ -77,8 +74,10 @@
7774

7875
depthFrame = inDepth.getFrame() # depthFrame values are in millimeters
7976

80-
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
81-
depthFrameColor = cv2.equalizeHist(depthFrameColor)
77+
depth_downscaled = depthFrame[::4]
78+
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
79+
max_depth = np.percentile(depth_downscaled, 99)
80+
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
8281
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
8382

8483
spatialData = spatialCalcQueue.get().getSpatialLocations()
@@ -94,10 +93,10 @@
9493
depthMax = depthData.depthMax
9594

9695
fontType = cv2.FONT_HERSHEY_TRIPLEX
97-
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
98-
cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, 255)
99-
cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, 255)
100-
cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, 255)
96+
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1)
97+
cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, color)
98+
cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, color)
99+
cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, color)
101100
# Show the frame
102101
cv2.imshow("depth", depthFrameColor)
103102

examples/SpatialDetection/spatial_mobilenet.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@
6060
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
6161
# Align depth map to the perspective of RGB camera, on which inference is done
6262
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
63+
stereo.setSubpixel(True)
6364
stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight())
6465

6566
spatialDetectionNetwork.setBlobPath(nnBlobPath)
@@ -113,8 +114,10 @@
113114

114115
depthFrame = depth.getFrame() # depthFrame values are in millimeters
115116

116-
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
117-
depthFrameColor = cv2.equalizeHist(depthFrameColor)
117+
depth_downscaled = depthFrame[::4]
118+
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
119+
max_depth = np.percentile(depth_downscaled, 99)
120+
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
118121
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
119122

120123
detections = inDet.detections
@@ -132,7 +135,7 @@
132135
ymin = int(topLeft.y)
133136
xmax = int(bottomRight.x)
134137
ymax = int(bottomRight.y)
135-
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
138+
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1)
136139

137140
# Denormalize bounding box
138141
x1 = int(detection.xmin * width)

examples/SpatialDetection/spatial_mobilenet_mono.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@
5959

6060
# StereoDepth
6161
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
62+
stereo.setSubpixel(True)
6263

6364
# Define a neural network that will make predictions based on the source frames
6465
spatialDetectionNetwork.setConfidenceThreshold(0.5)
@@ -116,8 +117,10 @@
116117

117118
depthFrame = inDepth.getFrame() # depthFrame values are in millimeters
118119

119-
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
120-
depthFrameColor = cv2.equalizeHist(depthFrameColor)
120+
depth_downscaled = depthFrame[::4]
121+
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
122+
max_depth = np.percentile(depth_downscaled, 99)
123+
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
121124
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
122125

123126
detections = inDet.detections

examples/SpatialDetection/spatial_tiny_yolo.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,7 @@
8484
# Align depth map to the perspective of RGB camera, on which inference is done
8585
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
8686
stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight())
87+
stereo.setSubpixel(True)
8788

8889
spatialDetectionNetwork.setBlobPath(nnBlobPath)
8990
spatialDetectionNetwork.setConfidenceThreshold(0.5)
@@ -146,8 +147,10 @@
146147
frame = inPreview.getCvFrame()
147148
depthFrame = depth.getFrame() # depthFrame values are in millimeters
148149

149-
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
150-
depthFrameColor = cv2.equalizeHist(depthFrameColor)
150+
depth_downscaled = depthFrame[::4]
151+
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
152+
max_depth = np.percentile(depth_downscaled, 99)
153+
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
151154
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
152155

153156
counter+=1
@@ -172,7 +175,7 @@
172175
ymin = int(topLeft.y)
173176
xmax = int(bottomRight.x)
174177
ymax = int(bottomRight.y)
175-
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
178+
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1)
176179

177180
# Denormalize bounding box
178181
x1 = int(detection.xmin * width)

examples/StereoDepth/depth_crop_control.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
manip.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y)
4141
manip.setMaxOutputFrameSize(monoRight.getResolutionHeight()*monoRight.getResolutionWidth()*3)
4242
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
43+
stereo.setSubpixel(True)
4344

4445
# Linking
4546
configIn.out.link(manip.inputConfig)
@@ -62,8 +63,10 @@
6263
depthFrame = inDepth.getFrame() # depthFrame values are in millimeters
6364

6465
# Frame is transformed, the color map will be applied to highlight the depth info
65-
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
66-
depthFrameColor = cv2.equalizeHist(depthFrameColor)
66+
depth_downscaled = depthFrame[::4]
67+
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
68+
max_depth = np.percentile(depth_downscaled, 99)
69+
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
6770
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
6871

6972
# Frame is ready to be shown

examples/mixed/rotated_spatial_detections.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import sys
55
import cv2
66
import depthai as dai
7-
7+
import numpy as np
88
'''
99
Spatial object detections demo for 180° rotated OAK camera.
1010
'''
@@ -58,6 +58,7 @@
5858
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
5959
# Align depth map to the perspective of RGB camera, on which inference is done
6060
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
61+
stereo.setSubpixel(True)
6162
stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight())
6263

6364
rotate_stereo_manip = pipeline.createImageManip()
@@ -104,8 +105,10 @@
104105
frame = inPreview.getCvFrame()
105106
depthFrame = depth.getFrame() # depthFrame values are in millimeters
106107

107-
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
108-
depthFrameColor = cv2.equalizeHist(depthFrameColor)
108+
depth_downscaled = depthFrame[::4]
109+
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
110+
max_depth = np.percentile(depth_downscaled, 99)
111+
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
109112
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
110113

111114
detections = inDet.detections

0 commit comments

Comments
 (0)