-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathnumber.py
414 lines (315 loc) · 13.8 KB
/
number.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
import cv2
import imutils
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
import easyocr
import pandas as pd
import os.path
from local_utils import detect_lp
from tensorflow.keras.models import model_from_json
from tensorflow.keras.models import load_model as load_keras_model
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from os.path import splitext,basename
from PIL import Image, ImageChops
import math
from scipy import spatial
import streamlit as st
import requests
import io
import base64
import pickle
from pathlib import Path
import streamlit_authenticator as stauth
import hashlib
#----------------------------- CACHE DATABASE -----------------------------------
database_faces=[]
# -----------------------------------------------------------------------------------
# ------------------------- FACE DETECTION and Face ID with database images ---------
#perform face detection with the help of faceplusAPI
def dofaceplusAPI(image):
original_image = image
http_url = 'https://api-us.faceplusplus.com/facepp/v3/detect'
key = "LXCYl-Fuc_erkrCY_iQfhYEYfttcXn4P"
secret = "WY6zwvR8wZ42wX621cGvIIo-JC8YN5NS"
#conver the numpy array into an Image type object
h , w , c = image.shape
image = np.reshape(image,(h,w,c))
image = Image.fromarray(image, 'RGB')
#convert image to bytes as api requests are in that format
buf = io.BytesIO()
image.save(buf,format = 'JPEG')
byte_im = base64.b64encode(buf.getvalue())
payload = {
'api_key': key,
'api_secret': secret,
'image_base64':byte_im,
}
try:
# send request to API and get detection information
res = requests.post(http_url, data=payload)
json_response = res.json()
# get face info and draw bounding box
# st.write(json_response["faces"])
for faces in json_response["faces"]:
# get coordinate, height and width of fece detection
x , y , w , h = faces["face_rectangle"].values()
# Note: x<->y coordinate interchange during cropping
face = original_image[x:x+h,y:y+w]
# Compare cropped face with faces present in the database
match = check_face(face)
percentage=match*100
# select color for matched faces
if percentage>95:
color = (0, 255, 0)
else:
color = (255, 0 , 0)
# Draw bounding box
cv2.rectangle(original_image, (y , x), (y+w, x+h),color,2)
match = str(round(percentage,2))
cv2.putText(original_image , match ,(y , x),fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=1, color=(0, 95, 255))
except Exception as e:
print('Error:')
print(e)
# Display image with detections
original_image= cv2.cvtColor(original_image ,cv2.COLOR_BGR2RGB )
return original_image
# Compare detected faces with faces present in the database
def check_face(face):
# Compare Faces detected with images present in the database
http_url = 'https://api-us.faceplusplus.com/facepp/v3/detect'
key = "LXCYl-Fuc_erkrCY_iQfhYEYfttcXn4P"
secret = "WY6zwvR8wZ42wX621cGvIIo-JC8YN5NS"
max_Similariy = 0
for image in database_faces:
original_img = image
# perform face detection on the faces in database
#conver the numpy array into an Image type object
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
h , w , c = image.shape
image = np.reshape(image,(h,w,c))
image = Image.fromarray(image, 'RGB')
#convert image to bytes as api requests are in that format
buf = io.BytesIO()
image.save(buf,format = 'JPEG')
byte_im = base64.b64encode(buf.getvalue())
# perform face detection
payload = {
'api_key': key,
'api_secret': secret,
'image_base64':byte_im,
}
try:
# send request to API and get detection information
res = requests.post(http_url, data=payload)
json_response = res.json()
# get face info and draw bounding box
# st.write(json_response["faces"])
for faces in json_response["faces"]:
# get coordinate, height and width of fece detection
x , y , w , h = faces["face_rectangle"].values()
# Note: x<->y coordinate interchange during cropping
faces = original_img[x:x+h,y:y+w]
# Resize detected image and database image to be of same size
height = 200
width = 200
dim = (height, width)
face = cv2.resize(face , dim)
faces = cv2.resize(faces , dim)
# st.image(face)
# st.image(faces)
face1 = np.array(face)
face1 = face1.flatten()
face1 = face1/255
face2 = np.array(faces)
face2 = face2.flatten()
face2 = face2/255
sim = -1 * (spatial.distance.cosine(face1, face2) - 1)
if sim >max_Similariy:
max_Similariy = sim
except Exception as e:
print('Error:')
print(e)
return max_Similariy
# Upload images in to database which will be used for ID purpose
def upload_database_faces():
uploaded_files = st.file_uploader("Choose database images", type="jpg",accept_multiple_files=True)
if uploaded_files is not None:
for uploaded_file in uploaded_files:
# Convert the file to an opencv image.
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
opencv_image = cv2.imdecode(file_bytes, 1)
database_faces.append(opencv_image)
return True
else:
return False
# ----------------------------------------------------------------------------------
#USER AUTHENTICATION---
#names = ["SecurityGuard1", "SecurityGuard2"]
#usernames = ["SecurityGuard1", "SecurityGuard2"]
# load hashed passwords
#file_path = Path(__file__).parent / "hashed_pw.pkl"
#with file_path.open("rb") as file:
#hashed_passwords = pickle.load(file)
#authenticator = stauth.Authenticate(names, usernames, hashed_passwords,
#"dashboard", "abcdef", cookie_expiry_days=30)
#name, authentication_status, username = authenticator.login("Login", "main")
#if authentication_status == False:
#st.error("Username/password is incorrect")
#if authentication_status == None:
# st.warning("Please enter your username and password")
#if authentication_status:
firebaseConfig = {
'apiKey': "AIzaSyDUhH5kaFObMYlcjPylLbY5FOeEU4mOhsE",
'authDomain': "anpr-d6144.firebaseapp.com",
'projectId': "anpr-d6144",
'storageBucket': "anpr-d6144.appspot.com",
'messagingSenderId': "113685971720",
'appId': "1:113685971720:web:a6f4f83f3279310bf6a2bd",
'measurementId': "G-5TN897NJ44"
}
#Modules
import pyrebase
from datetime import datetime
#Configuration key
firebaseConfig = {
'apiKey': "AIzaSyDUhH5kaFObMYlcjPylLbY5FOeEU4mOhsE",
'authDomain': "anpr-d6144.firebaseapp.com",
'projectId': "anpr-d6144",
'databaseURL': "https://console.firebase.google.com/u/0/project/anpr-d6144/database/anpr-d6144-default-rtdb/data/~2F",
'storageBucket': "anpr-d6144.appspot.com",
'messagingSenderId': "113685971720",
'appId': "1:113685971720:web:a6f4f83f3279310bf6a2bd",
'measurementId': "G-5TN897NJ44"
}
#Authentication
def login():
firebase = pyrebase.initialize_app(firebaseConfig)
auth = firebase.auth()
#database
db = firebase.database()
storage = firebase.storage()
def login():
st.sidebar.title("ANPR APP Login")
#Authentication
choice = st.sidebar.selectbox('login/Signup',['Login', 'Sign up'])
email = st.sidebar.text_input('Please enter your email address')
password = st.sidebar.text_input('Please enter your password')
if choice == 'Sign up':
name = st.sidebar.text_input('Please input your name',value='Default')
submit = st.sidebar.button('Create my account')
if submit:
auth = auth.create_user_with_email_and_password(email,password)
st.success('Your account is created successfully')
st.balloons()
#Sign in
auth = auth.sign_in_with_email_and_password(email,password)
email = "colinrop68@gmail.com"
user_id = user['ImdpmjDd53WAVoWqjEBqAevWNIT2']
else:
submit = st.sidebar.button('Sign in')
if submit:
user = auth.sign_in_with_email_and_password(email, password)
email = "colinrop68@gmail.com"
user_id = user['ImdpmjDd53WAVoWqjEBqAevWNIT2']
st.success("Logged in successfully!")
st.error("Incorrect email or password")
#---------------------------- Number Plate Detection --------------------------------
st.sidebar.title("ANPR Dashboard")
page = st.sidebar.selectbox("Select a page", ["Homepage","Number Plate Recognition", "Vehicle records"])
# Define the function to display the homepage
def show_homepage():
st.title("ANPR Dashboard")
st.write("Number Plate Extraction")
st.write("With this tool, you can upload an image of a vehicle and extract the number plate from the image.")
st.write("To get started, select 'Number Plate Recognition' from the sidebar and upload an image.")
footer = """
---
© 2023 KABU ANPR App
"""
st.markdown(footer)
def show_numberplate():
st.title("Number Plate Recognition")
st.write("Upload an image to perform Number plate extraction.")
# Update ANPR using Numberplaterecognition
# Define the function to display the statistics page
def show_stats():
st.title("Vehicle data")
st.write("View the vehicle records")
data = pd.read_csv("vehicle_records.csv")
# Display the data in a table
st.write(data)
# Compute and display some basic statistics about the data
st.write("Basic Statistics:")
# Define the function to display the license plate recognition page
def numberplateRecognizer(image):
original_image = image
#conver the numpy array into an Image type object
h , w , c = image.shape
image = np.reshape(image,(h,w,c))
image = Image.fromarray(image, 'RGB')
#convert image to bytes as api requests are in that format
buf = io.BytesIO()
image.save(buf,format = 'JPEG')
byte_im = buf.getvalue()
response = requests.post(
'https://api.platerecognizer.com/v1/plate-reader/',
# data=dict(regions='hk'), # Optional
files=dict(upload=byte_im),
headers={'Authorization': 'Token f184ad4cc54df68357e7873baea13a1d596ad6e4'}
)
json_response = response.json()
# for key, value in json_response.items():
# st.write(key, "->", value)
#Find number of number plate present in the image
number_plates = len(json_response['results'])
st.subheader("Total Number Plates Detected in this image:{}".format(number_plates))
st.subheader("Number Plate Information")
# get number plate and borders from the image
for plate in json_response['results']:
#Get numberplate and borders
car_plate = plate['plate']
xmin , ymin , xmax , ymax = plate['box'].values()
#crop number plate and show borders
numberplate_crop = image.crop((xmin , ymin , xmax , ymax))
st.image(numberplate_crop)
st.write("Number Plate Digits: {}".format(car_plate))
# Draw Borders and show numberplate on image
cv2.rectangle(original_image, (xmin, ymin), (xmax, ymax),(255,0,0),5)
cv2.putText(original_image , car_plate ,(xmin , ymin),fontFace=cv2.FONT_HERSHEY_TRIPLEX, fontScale=1, color=(0, 255, 0))
original_image= cv2.cvtColor(original_image ,cv2.COLOR_BGR2RGB )
# display number above number plate
st.image(original_image)
#-----------------------------------------------------------------------------------
# Streamlit Main app
def main():
st.title("Automatic Number Plate Recognition")
#st.subheader("")
uploaded_file = st.file_uploader("Choose a image file", type="jpg")
if uploaded_file is not None:
# Convert the file to an opencv image.
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
original = cv2.imdecode(file_bytes, 1)
opencv_image = cv2.imdecode(file_bytes, 1)
display = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB)
# Display Image after loading in streamlit
st.image(display)
#st.subheader("Do Number plate extraction")
#upload database image for matching
if upload_database_faces():
if len(database_faces) !=0:
original_image = dofaceplusAPI(opencv_image)
st.image(original_image)
# If button is clicked then it will perform Number Plate extraction and OCR
if st.button("Extract Number Plate"):
numberplateRecognizer(original)
# Display the appropriate page based on the user's selection
if page == "Homepage":
show_homepage()
elif page == "Vehicle records":
show_stats()
else:
show_numberplate()
if __name__ == "__main__":
main()