diff --git a/fps_detector b/fps_detector new file mode 100644 index 0000000..9928a4a --- /dev/null +++ b/fps_detector @@ -0,0 +1,38 @@ +import cv2 +import mediapipe as mp +import time #this is for fps + +cap = cv2.VideoCapture(0) + +mpHands = mp.solutions.hands +hands = mpHands.Hands() +mpDraw = mp.solutions.drawing_utils + + +pTime = 0 #previous time +cTime = 0 #current time + +while True : + + success, img = cap.read() + imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + results = hands.process(imgRGB) + + + if results.multi_hand_landmarks: + for handlmks in results.multi_hand_landmarks: + mpDraw.draw_landmarks(img, handlmks,mpHands.HAND_CONNECTIONS) + + cTime = time.time() #this gets time + fps = (1/(cTime-pTime)) #this calculates fps + pTime = cTime #this updates fps + + cv2.putText(img, str(int(fps)), (10,70), cv2.FONT_HERSHEY_COMPLEX, 3, (255, 0,255), 3) #this displays fps @ img as str which has int values at position 10,70 aat some font with scale 3 color purple and thickness 3 + + + + cv2.imshow("image", img) + + key = cv2.waitKey(1) + if key == ord('q'): + break diff --git a/zoom.py b/zoom.py new file mode 100644 index 0000000..c1d9df1 --- /dev/null +++ b/zoom.py @@ -0,0 +1,50 @@ +import cv2 +from cvzone.HandTrackingModule import HandDetector + +cap = cv2.VideoCapture(0) +cap.set(3, 1280) +cap.set(4, 720) + +detector = HandDetector(detectionCon=1) +startDist = None +scale = 0 +cx, cy = 500,500 +while True: + success, img = cap.read() + hands, img = detector.findHands(img) + img1 = cv2.imread("download.jpg") + + if len(hands) == 2: + # print(detector.fingersUp(hands[0]), detector.fingersUp(hands[1])) + if detector.fingersUp(hands[0]) == [1, 1, 0, 0, 0] and \ + detector.fingersUp(hands[1]) == [1, 1, 0, 0, 0]: + # print("Zoom Gesture") + lmList1 = hands[0]["lmList"] + lmList2 = hands[1]["lmList"] + # point 8 is the tip of the index finger + if startDist is None: + #length, info, img = detector.findDistance(lmList1[8], lmList2[8], img) + length, info, img = detector.findDistance(hands[0]["center"], hands[1]["center"], img) + + startDist = length + + #length, info, img = detector.findDistance(lmList1[8], lmList2[8], img) + length, info, img = detector.findDistance(hands[0]["center"], hands[1]["center"], img) + + scale = int((length - startDist) // 2) + cx, cy = info[4:] + print(scale) + else: + startDist = None + + try: + h1, w1, _= img1.shape + newH, newW = ((h1+scale)//2)*2, ((w1+scale)//2)*2 + img1 = cv2.resize(img1, (newW,newH)) + + img[cy-newH//2:cy+ newH//2, cx-newW//2:cx+ newW//2] = img1 + except: + pass + + cv2.imshow("Image", img) + cv2.waitKey(1) diff --git a/zoom_func.py b/zoom_func.py new file mode 100644 index 0000000..c1d9df1 --- /dev/null +++ b/zoom_func.py @@ -0,0 +1,50 @@ +import cv2 +from cvzone.HandTrackingModule import HandDetector + +cap = cv2.VideoCapture(0) +cap.set(3, 1280) +cap.set(4, 720) + +detector = HandDetector(detectionCon=1) +startDist = None +scale = 0 +cx, cy = 500,500 +while True: + success, img = cap.read() + hands, img = detector.findHands(img) + img1 = cv2.imread("download.jpg") + + if len(hands) == 2: + # print(detector.fingersUp(hands[0]), detector.fingersUp(hands[1])) + if detector.fingersUp(hands[0]) == [1, 1, 0, 0, 0] and \ + detector.fingersUp(hands[1]) == [1, 1, 0, 0, 0]: + # print("Zoom Gesture") + lmList1 = hands[0]["lmList"] + lmList2 = hands[1]["lmList"] + # point 8 is the tip of the index finger + if startDist is None: + #length, info, img = detector.findDistance(lmList1[8], lmList2[8], img) + length, info, img = detector.findDistance(hands[0]["center"], hands[1]["center"], img) + + startDist = length + + #length, info, img = detector.findDistance(lmList1[8], lmList2[8], img) + length, info, img = detector.findDistance(hands[0]["center"], hands[1]["center"], img) + + scale = int((length - startDist) // 2) + cx, cy = info[4:] + print(scale) + else: + startDist = None + + try: + h1, w1, _= img1.shape + newH, newW = ((h1+scale)//2)*2, ((w1+scale)//2)*2 + img1 = cv2.resize(img1, (newW,newH)) + + img[cy-newH//2:cy+ newH//2, cx-newW//2:cx+ newW//2] = img1 + except: + pass + + cv2.imshow("Image", img) + cv2.waitKey(1)