Skip to content

Commit 0a1a745

Browse files
authored
Add files via upload
1 parent 95a2558 commit 0a1a745

File tree

3 files changed

+156
-0
lines changed

3 files changed

+156
-0
lines changed

Diff for: README.md

+44
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
# Detect an object with OpenCV-Python
2+
## _JKL404_
3+
4+
5+
[![Build Status](https://travis-ci.org/joemccann/dillinger.svg?branch=master)](https://travis-ci.org/joemccann/dillinger)
6+
7+
OpenCV is the huge open-source library for computer vision, machine learning, and image processing and now it plays a major role in real-time operation which is very important in today’s systems. By using it, one can process images and videos to identify objects, faces, or even the handwriting of a human. This article focuses on detecting objects.
8+
![Cloud Vision](https://nanonets.com/blog/content/images/2021/04/cloud--1--1.gif)
9+
10+
## Features
11+
12+
- Use machine learning to understand your images with industry-leading prediction accuracy
13+
- Train machine learning models that classify images by your custom labels
14+
- Detect objects and faces
15+
16+
17+
## Installation
18+
19+
20+
![google cloud](https://images.idgesg.net/images/article/2018/04/google-vision-api-screen-1-100755937-medium.jpg?auto=webp&quality=85,70)
21+
22+
23+
24+
25+
Install the dependencies and devDependencies and start the server.
26+
27+
```sh
28+
pip install flask, python, opencv-python, numpy, matplotlib
29+
python main.py
30+
```
31+
32+
Verify the deployment by navigating to your server address in
33+
your preferred browser.
34+
35+
```sh
36+
127.0.0.1:8000
37+
```
38+
## Demo
39+
40+
## License
41+
42+
MIT
43+
44+
**Free Software, Hell Yeah!**

Diff for: app.py

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
from flask import Flask
2+
3+
UPLOAD_FOLDER = 'static/uploads/'
4+
5+
app = Flask(__name__)
6+
app.secret_key = "secret key"
7+
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
8+
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
9+
10+
#export GOOGLE_APPLICATION_CREDENTIALS=

Diff for: main.py

+102
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
import os
2+
from app import app
3+
import urllib.request
4+
from werkzeug.utils import secure_filename
5+
from flask import Flask, flash, request, redirect, url_for, render_template
6+
7+
8+
@app.route('/')
9+
def home():
10+
return render_template('index.html')
11+
12+
13+
@app.route('/textdetect/')
14+
def upload_form():
15+
return render_template('upload.html')
16+
17+
@app.route('/textdetect/', methods=['POST'])
18+
def upload_image():
19+
#Code to run main scan file
20+
import cv2
21+
import numpy as np
22+
import matplotlib.pyplot as plt
23+
import time
24+
# Load webcam
25+
font = cv2.FONT_HERSHEY_SIMPLEX
26+
starting_time = time.time()
27+
frame_id = 0
28+
net = cv2.dnn.readNet("./weights/yolov3-tiny.weights", "./configuration/yolov3-tiny.cfg")
29+
### Change here for custom classes for trained model
30+
classes = []
31+
mylist = []
32+
flag = 0
33+
with open("./configuration/coco.names", "r") as f:
34+
classes = [line.strip() for line in f.readlines()]
35+
# Load webcam
36+
cap = cv2.VideoCapture(0)
37+
colors = np.random.uniform(0, 255, size=(len(classes), 3))
38+
while 1:
39+
_, img = cap.read()
40+
frame_id += 1
41+
img = cv2.resize(img,(1280,720))
42+
hight,width,_ = img.shape
43+
blob = cv2.dnn.blobFromImage(img, 1/255,(416,416),(0,0,0),swapRB = True,crop= False)
44+
45+
net.setInput(blob)
46+
47+
output_layers_name = net.getUnconnectedOutLayersNames()
48+
49+
layerOutputs = net.forward(output_layers_name)
50+
51+
boxes =[]
52+
confidences = []
53+
class_ids = []
54+
55+
56+
for output in layerOutputs:
57+
for detection in output:
58+
score = detection[5:]
59+
class_id = np.argmax(score)
60+
confidence = score[class_id]
61+
if confidence > 0.1:
62+
center_x = int(detection[0] * width)
63+
center_y = int(detection[1] * hight)
64+
w = int(detection[2] * width)
65+
h = int(detection[3]* hight)
66+
x = int(center_x - w/2)
67+
y = int(center_y - h/2)
68+
boxes.append([x,y,w,h])
69+
confidences.append((float(confidence)))
70+
class_ids.append(class_id)
71+
72+
indexes = cv2.dnn.NMSBoxes(boxes,confidences, 0.8, 0.3)
73+
for i in range(len(boxes)):
74+
if i in indexes:
75+
x, y, w, h = boxes[i]
76+
label = str(classes[class_ids[i]])
77+
confidence = confidences[i]
78+
color = colors[class_ids[i]]
79+
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
80+
cv2.putText(img, label + " " + str(round(confidence, 2)), (x, y + 30), font, 3, color, 3)
81+
flag=0
82+
for ls in mylist:
83+
if ls is label:
84+
flag=1
85+
if flag != 1:
86+
mylist.append(label)
87+
elapsed_time = time.time() - starting_time
88+
fps = frame_id / elapsed_time
89+
cv2.putText(img, "FPS: " + str(round(fps, 2)), (40, 670), font, .7, (0, 255, 255), 1)
90+
cv2.putText(img, "press [esc] to exit", (40, 690), font, .45, (0, 255, 255), 1)
91+
cv2.imshow("Image", img)
92+
key = cv2.waitKey(1)
93+
if key == 27:
94+
print("[button pressed] ///// [esc].")
95+
print("[feedback] ///// Videocapturing succesfully stopped")
96+
break
97+
cap.release()
98+
cv2.destroyAllWindows()
99+
return render_template('message.html' , itemss=mylist )
100+
101+
if __name__ == "__main__":
102+
app.run()

0 commit comments

Comments
 (0)