-
Notifications
You must be signed in to change notification settings - Fork 373
/
Copy pathmain.py
68 lines (54 loc) · 2.65 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import depthai as dai
from depthai_nodes.node import ParsingNeuralNetwork, ApplyColormap
from util.arguments import initialize_argparser
from util.depth_driven_focus import DepthDrivenFocus
from util.depth_merger import DepthMerger
_, args = initialize_argparser()
visualizer = dai.RemoteConnection(httpPort=8082)
device = dai.Device(dai.DeviceInfo(args.device)) if args.device else dai.Device()
with dai.Pipeline(device) as pipeline:
print("Creating pipeline...")
model_description = dai.NNModelDescription("luxonis/yunet:640x480")
platform = device.getPlatformAsString()
model_description.platform = platform
nn_archive = dai.NNArchive(dai.getModelFromZoo(model_description))
cam = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_A)
color_out = cam.requestOutput(
nn_archive.getInputSize(), type=dai.ImgFrame.Type.NV12, fps=args.fps_limit
)
left = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B)
left_out = left.requestOutput(
nn_archive.getInputSize(), type=dai.ImgFrame.Type.NV12, fps=args.fps_limit
)
right = pipeline.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C)
right_out = right.requestOutput(
nn_archive.getInputSize(), type=dai.ImgFrame.Type.NV12, fps=args.fps_limit
)
stereo = pipeline.create(dai.node.StereoDepth).build(left=left_out, right=right_out)
# print(stereo.initialConfig.getConfidenceThreshold())
stereo.initialConfig.setConfidenceThreshold(240)
stereo.setLeftRightCheck(True)
stereo.setRectification(True)
stereo.setExtendedDisparity(True)
face_det_nn = pipeline.create(ParsingNeuralNetwork).build(cam, nn_archive)
depth_merger = pipeline.create(DepthMerger).build(
face_det_nn.out, stereo.depth, device.readCalibration2()
)
depth_color_transform = pipeline.create(ApplyColormap).build(stereo.disparity)
depth_color_transform.setMaxValue(int(stereo.initialConfig.getMaxDisparity()))
depth_driven_focus = pipeline.create(DepthDrivenFocus).build(
control_queue=cam.inputControl.createInputQueue(),
face_detection=depth_merger.output,
)
visualizer.addTopic("Video", color_out, "images")
visualizer.addTopic("Visualizations", face_det_nn.out, "images")
visualizer.addTopic("Depth", depth_color_transform.out, "images")
visualizer.addTopic("Focus distance", depth_driven_focus.output, "images")
print("Pipeline created.")
pipeline.start()
visualizer.registerPipeline(pipeline)
while pipeline.isRunning():
key = visualizer.waitKey(1)
if key == ord("q"):
print("Got q key from the remote connection!")
break