diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..01f8dcd
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+env
+
+.DS_Store
+
+__pycache__
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
index 62e5bf4..b55540c 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2019 Mahesh Sawant
+Copyright (c) 2019 Mahesh Sawant | Mahimai Raja J
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/README.md b/README.md
index 791dae7..9e956b9 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# Gender-and-Age-Detection
+#
For this python project, I had used the Adience dataset; the dataset is available in the public domain and you can find it here. This dataset serves as a benchmark for face photos and is inclusive of various real-world imaging conditions like noise, lighting, pose, and appearance. The images have been collected from Flickr albums and distributed under the Creative Commons (CC) license. It has a total of 26,580 photos of 2,284 subjects in eight age ranges (as mentioned above) and is about 1GB in size. The models I used had been trained on this dataset.
-For face detection, we have a .pb file- this is a protobuf file (protocol buffer); it holds the graph definition and the trained weights of the model. We can use this to run the trained model. And while a .pb file holds the protobuf in binary format, one with the .pbtxt extension holds it in text format. These are TensorFlow files. For age and gender, the .prototxt files describe the network configuration and the .caffemodel file defines the internal states of the parameters of the layers.
@@ -41,13 +36,13 @@Note: The Image should be present in same folder where all the files are present
NOTE:- I downloaded the images from Google,if you have any query or problem i can remove them, i just used it for Educational purpose.
- >python detect.py --image girl1.jpg + >python demo.py --image asset/img/girl1.jpg Gender: Female Age: 25-32 years
- >python detect.py --image girl2.jpg
+ >python demo.py --image asset/img/girl2.jpg
Gender: Female
Age: 8-12 years
- >python detect.py --image kid1.jpg
+ >python demo.py --image asset/img/kid1.jpg
Gender: Male
Age: 4-6 years
- >python detect.py --image kid2.jpg
+ >python demo.py --image asset/img/kid2.jpg
Gender: Female
Age: 4-6 years
- >python detect.py --image man1.jpg
+ >python demo.py --image asset/img/man1.jpg
Gender: Male
Age: 38-43 years
- >python detect.py --image man2.jpg
+ >python demo.py --image asset/img/man2.jpg
Gender: Male
Age: 25-32 years
- >python detect.py --image woman1.jpg
+ >python demo.py --image asset/img/woman1.jpg
Gender: Female
Age: 38-43 years
diff --git a/girl1.jpg b/asset/img/girl1.jpg
similarity index 100%
rename from girl1.jpg
rename to asset/img/girl1.jpg
diff --git a/girl2.jpg b/asset/img/girl2.jpg
similarity index 100%
rename from girl2.jpg
rename to asset/img/girl2.jpg
diff --git a/kid1.jpg b/asset/img/kid1.jpg
similarity index 100%
rename from kid1.jpg
rename to asset/img/kid1.jpg
diff --git a/kid2.jpg b/asset/img/kid2.jpg
similarity index 100%
rename from kid2.jpg
rename to asset/img/kid2.jpg
diff --git a/man1.jpg b/asset/img/man1.jpg
similarity index 100%
rename from man1.jpg
rename to asset/img/man1.jpg
diff --git a/man2.jpg b/asset/img/man2.jpg
similarity index 100%
rename from man2.jpg
rename to asset/img/man2.jpg
diff --git a/woman1.jpg b/asset/img/woman1.jpg
similarity index 100%
rename from woman1.jpg
rename to asset/img/woman1.jpg
diff --git a/age_deploy.prototxt b/asset/model/age_deploy.prototxt
similarity index 100%
rename from age_deploy.prototxt
rename to asset/model/age_deploy.prototxt
diff --git a/age_net.caffemodel b/asset/model/age_net.caffemodel
similarity index 100%
rename from age_net.caffemodel
rename to asset/model/age_net.caffemodel
diff --git a/gender_deploy.prototxt b/asset/model/gender_deploy.prototxt
similarity index 100%
rename from gender_deploy.prototxt
rename to asset/model/gender_deploy.prototxt
diff --git a/gender_net.caffemodel b/asset/model/gender_net.caffemodel
similarity index 100%
rename from gender_net.caffemodel
rename to asset/model/gender_net.caffemodel
diff --git a/opencv_face_detector.pbtxt b/asset/model/opencv_face_detector.pbtxt
similarity index 100%
rename from opencv_face_detector.pbtxt
rename to asset/model/opencv_face_detector.pbtxt
diff --git a/opencv_face_detector_uint8.pb b/asset/model/opencv_face_detector_uint8.pb
similarity index 100%
rename from opencv_face_detector_uint8.pb
rename to asset/model/opencv_face_detector_uint8.pb
diff --git a/demo.py b/demo.py
new file mode 100644
index 0000000..749e4fd
--- /dev/null
+++ b/demo.py
@@ -0,0 +1,13 @@
+from utils.detect import display_frame
+import argparse
+import cv2
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--image')
+ args=parser.parse_args()
+ video = cv2.VideoCapture(args.image if args.image else 0)
+ padding : int = 20
+ display_frame(video, padding)
+ cv2.destroyAllWindows()
\ No newline at end of file
diff --git a/detect.py b/detect.py
deleted file mode 100644
index 268408d..0000000
--- a/detect.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#A Gender and Age Detection program by Mahesh Sawant
-
-import cv2
-import math
-import argparse
-
-def highlightFace(net, frame, conf_threshold=0.7):
- frameOpencvDnn=frame.copy()
- frameHeight=frameOpencvDnn.shape[0]
- frameWidth=frameOpencvDnn.shape[1]
- blob=cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)
-
- net.setInput(blob)
- detections=net.forward()
- faceBoxes=[]
- for i in range(detections.shape[2]):
- confidence=detections[0,0,i,2]
- if confidence>conf_threshold:
- x1=int(detections[0,0,i,3]*frameWidth)
- y1=int(detections[0,0,i,4]*frameHeight)
- x2=int(detections[0,0,i,5]*frameWidth)
- y2=int(detections[0,0,i,6]*frameHeight)
- faceBoxes.append([x1,y1,x2,y2])
- cv2.rectangle(frameOpencvDnn, (x1,y1), (x2,y2), (0,255,0), int(round(frameHeight/150)), 8)
- return frameOpencvDnn,faceBoxes
-
-
-parser=argparse.ArgumentParser()
-parser.add_argument('--image')
-
-args=parser.parse_args()
-
-faceProto="opencv_face_detector.pbtxt"
-faceModel="opencv_face_detector_uint8.pb"
-ageProto="age_deploy.prototxt"
-ageModel="age_net.caffemodel"
-genderProto="gender_deploy.prototxt"
-genderModel="gender_net.caffemodel"
-
-MODEL_MEAN_VALUES=(78.4263377603, 87.7689143744, 114.895847746)
-ageList=['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
-genderList=['Male','Female']
-
-faceNet=cv2.dnn.readNet(faceModel,faceProto)
-ageNet=cv2.dnn.readNet(ageModel,ageProto)
-genderNet=cv2.dnn.readNet(genderModel,genderProto)
-
-video=cv2.VideoCapture(args.image if args.image else 0)
-padding=20
-while cv2.waitKey(1)<0 :
- hasFrame,frame=video.read()
- if not hasFrame:
- cv2.waitKey()
- break
-
- resultImg,faceBoxes=highlightFace(faceNet,frame)
- if not faceBoxes:
- print("No face detected")
-
- for faceBox in faceBoxes:
- face=frame[max(0,faceBox[1]-padding):
- min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding)
- :min(faceBox[2]+padding, frame.shape[1]-1)]
-
- blob=cv2.dnn.blobFromImage(face, 1.0, (227,227), MODEL_MEAN_VALUES, swapRB=False)
- genderNet.setInput(blob)
- genderPreds=genderNet.forward()
- gender=genderList[genderPreds[0].argmax()]
- print(f'Gender: {gender}')
-
- ageNet.setInput(blob)
- agePreds=ageNet.forward()
- age=ageList[agePreds[0].argmax()]
- print(f'Age: {age[1:-1]} years')
-
- cv2.putText(resultImg, f'{gender}, {age}', (faceBox[0], faceBox[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2, cv2.LINE_AA)
- cv2.imshow("Detecting age and gender", resultImg)
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..ea395bd
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,4 @@
+# python version - Python 3.9.12
+
+numpy==1.25.0
+opencv-python==4.8.0.74
diff --git a/utils/__init__.py b/utils/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/utils/detect.py b/utils/detect.py
new file mode 100644
index 0000000..becda1d
--- /dev/null
+++ b/utils/detect.py
@@ -0,0 +1,84 @@
+import cv2
+from typing import List
+
+def highlightFace(net, frame, conf_threshold=0.7):
+ '''
+ To detect face and highlight it with rectangle box
+ '''
+ frameOpencvDnn=frame.copy()
+ frameHeight=frameOpencvDnn.shape[0]
+ frameWidth=frameOpencvDnn.shape[1]
+ blob=cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)
+
+ net.setInput(blob)
+ detections=net.forward()
+ faceBoxes=[]
+ for i in range(detections.shape[2]):
+ confidence=detections[0,0,i,2]
+ if confidence>conf_threshold:
+ x1=int(detections[0,0,i,3]*frameWidth)
+ y1=int(detections[0,0,i,4]*frameHeight)
+ x2=int(detections[0,0,i,5]*frameWidth)
+ y2=int(detections[0,0,i,6]*frameHeight)
+ faceBoxes.append([x1,y1,x2,y2])
+ cv2.rectangle(frameOpencvDnn, (x1,y1), (x2,y2), (0,255,0), int(round(frameHeight/150)), 8)
+ return frameOpencvDnn,faceBoxes
+
+def load_model_network():
+ '''
+ To load model and network using opencv dnn module
+ with prototxt and caffemodel file
+ '''
+ faceProto : str = "asset/model/opencv_face_detector.pbtxt"
+ faceModel : str = "asset/model/opencv_face_detector_uint8.pb"
+ ageProto : str = "asset/model/age_deploy.prototxt"
+ ageModel : str = "asset/model/age_net.caffemodel"
+ genderProto : str = "asset/model/gender_deploy.prototxt"
+ genderModel : str = "asset/model/gender_net.caffemodel"
+
+ faceNet = cv2.dnn.readNet(faceModel,faceProto)
+ ageNet = cv2.dnn.readNet(ageModel,ageProto)
+ genderNet = cv2.dnn.readNet(genderModel,genderProto)
+ return faceNet, ageNet, genderNet
+
+def display_frame(video, padding):
+ '''
+ To display frame with rectangle box and text
+ '''
+ MODEL_MEAN_VALUES : tuple = (78.4263377603, 87.7689143744, 114.895847746)
+ genderList = ['Male','Female']
+ ageList : List[str] =['(0-2)', '(4-6)', '(8-12)', \
+ '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
+
+ faceNet, ageNet, genderNet = load_model_network()
+
+ while cv2.waitKey(1)<0 :
+ hasFrame, frame = video.read()
+ if not hasFrame:
+ cv2.waitKey()
+ break
+
+ resultImg,faceBoxes = highlightFace(faceNet,frame)
+ if not faceBoxes:
+ print("No face detected")
+
+ for faceBox in faceBoxes:
+ face : List[tuple] = frame[max(0,faceBox[1]-padding):
+ min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding)
+ :min(faceBox[2]+padding, frame.shape[1]-1)]
+
+ blob = cv2.dnn.blobFromImage(face, 1.0, (227,227), MODEL_MEAN_VALUES, swapRB=False)
+ genderNet.setInput(blob)
+ genderPreds = genderNet.forward()
+ gender : str = genderList[genderPreds[0].argmax()]
+ print(f'Gender: {gender}')
+
+ ageNet.setInput(blob)
+ agePreds = ageNet.forward()
+ age : str = ageList[agePreds[0].argmax()]
+ print(f'Age: {age[1:-1]} years')
+
+ cv2.putText(resultImg, f'{gender}, {age}', (faceBox[0], faceBox[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2, cv2.LINE_AA)
+ cv2.imshow("Detecting age and gender", resultImg)
+
+