最终版
This commit is contained in:
parent
78ade849df
commit
0032d7f6da
|
@ -4,7 +4,7 @@
|
|||
<content url="file://$MODULE_DIR$">
|
||||
<excludeFolder url="file://$MODULE_DIR$/venv" />
|
||||
</content>
|
||||
<orderEntry type="jdk" jdkName="Python 3.7 (graduation_project)" jdkType="Python SDK" />
|
||||
<orderEntry type="jdk" jdkName="Python 3.7 (graduation_project2)" jdkType="Python SDK" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
|
@ -1,4 +1,4 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7 (graduation_project)" project-jdk-type="Python SDK" />
|
||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7 (graduation_project2)" project-jdk-type="Python SDK" />
|
||||
</project>
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -53,7 +53,7 @@ class YOLOV5(object):
|
|||
iou_thres=0.45,
|
||||
classes=None,
|
||||
imgsz=640,
|
||||
weights="./yolov5s.pt"):
|
||||
weights="C:/Users/Dinger/Desktop/work/lesson/graduation_project/graduation-project/resource/yolov5s.pt"):
|
||||
# 超参数设置
|
||||
self.conf_thres = conf_thres # 置信度阈值
|
||||
self.iou_thres = iou_thres # iou阈值
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 15 KiB |
33
identify.py
33
identify.py
|
@ -101,6 +101,8 @@ class Identify:
|
|||
self.last_control_flag = 0
|
||||
self.page_up_count = 0
|
||||
self.page_down_count = 0
|
||||
self.end_count = 0
|
||||
self.pencil_count = 0
|
||||
self.step_up = 0
|
||||
self.step_down = 0
|
||||
self.last_wrist_point = (0, 0)
|
||||
|
@ -142,15 +144,15 @@ class Identify:
|
|||
# self.ml_identify.infer(image=self.image)
|
||||
# continue
|
||||
# self.catch_person_flag = True
|
||||
# if not self.catch_person_flag:
|
||||
# self.catch_person_flag = True
|
||||
if not self.catch_person_flag:
|
||||
# self.catch_person_flag = True
|
||||
self.person_results.clear()
|
||||
self.left_hands.clear()
|
||||
self.right_hands.clear()
|
||||
self.deal_with_image()
|
||||
self.find_points()
|
||||
x1, y1, x2, y2 = self.find_target_person()
|
||||
|
||||
# print("no no no no no no no no no no no no no")
|
||||
else:
|
||||
# print("in True")
|
||||
|
@ -223,7 +225,7 @@ class Identify:
|
|||
if not judge_zero(self.is_right_finger_straight):
|
||||
return False
|
||||
if util.Util.get_distance(self.left_hand_points[6], self.right_hand_points[6]) / \
|
||||
util.Util.get_distance(self.left_hand_points[5], self.left_hand_points[9]) > 2:
|
||||
util.Util.get_distance(self.left_hand_points[5], self.left_hand_points[9]) > 2.5:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
@ -291,6 +293,8 @@ class Identify:
|
|||
if not person_result[2]:
|
||||
continue
|
||||
standard_distance = util.Util.get_distance(person_result[1][7], person_result[1][8])
|
||||
if standard_distance == 0:
|
||||
continue
|
||||
if util.Util.get_distance(person_result[1][4], person_result[2][4]) / standard_distance > 2:
|
||||
continue
|
||||
if util.Util.get_distance(person_result[1][8], person_result[2][8]) / standard_distance > 1:
|
||||
|
@ -454,7 +458,15 @@ class Identify:
|
|||
return False
|
||||
|
||||
def judge_end(self):
|
||||
if self.left_hand_flag and self.right_hand_flag and judge_zero():
|
||||
if not self.left_hand_flag:
|
||||
self.end_count = 0
|
||||
return False
|
||||
if not judge_zero(self.is_left_finger_straight):
|
||||
self.end_count = 0
|
||||
return False
|
||||
self.end_count += 1
|
||||
if self.end_count > 10:
|
||||
self.end_count = 0
|
||||
return True
|
||||
return False
|
||||
|
||||
|
@ -464,11 +476,22 @@ class Identify:
|
|||
if not self.judge_one(self.is_right_finger_straight):
|
||||
return False
|
||||
standard_distance = util.Util.get_distance(self.left_hand_points[7], self.right_hand_points[8])
|
||||
if standard_distance == 0:
|
||||
return False
|
||||
# print(util.Util.get_distance(self.left_hand_points[8], self.right_hand_points[8]) / standard_distance)
|
||||
if util.Util.get_distance(self.left_hand_points[8], self.right_hand_points[8]) / standard_distance < 1:
|
||||
return True
|
||||
return False
|
||||
|
||||
# def judge_pencil(self):
|
||||
# if not self.judge_one(self.is_left_finger_straight):
|
||||
# return False
|
||||
# if self.right_hand_flag:
|
||||
# return False
|
||||
# self.pencil_count += 1
|
||||
# if self.pencil_count > 5:
|
||||
# self.pencil_count = 0
|
||||
|
||||
def judge_control(self):
|
||||
if self.is_identify:
|
||||
# print("len = " + str(len(self.identify_results.multi_handedness)))
|
||||
|
@ -489,7 +512,7 @@ class Identify:
|
|||
elif self.judge_one(self.is_left_finger_straight) and judge_zero(self.is_right_finger_straight):
|
||||
# print("5")
|
||||
return 5
|
||||
elif self.left_hand_flag and judge_zero(self.is_left_finger_straight):
|
||||
elif self.judge_end():
|
||||
# print("6")
|
||||
return 6
|
||||
elif self.judge_system_over():
|
||||
|
|
26
main.py
26
main.py
|
@ -1,3 +1,4 @@
|
|||
#!C:\Users\Dinger\anaconda3\envs\graduation_project2\python.exe
|
||||
import multiprocessing
|
||||
import sys
|
||||
import tkinter
|
||||
|
@ -24,8 +25,8 @@ def control_page_down():
|
|||
|
||||
|
||||
def control_ppt_begin():
|
||||
win32api.keybd_event(116, 0, 0, 0) # 代表按下f键
|
||||
win32api.keybd_event(16, 0, 0, 0) # 代表按下f键
|
||||
win32api.keybd_event(116, 0, 0, 0) # 代表按下f键
|
||||
time.sleep(0.02)
|
||||
win32api.keybd_event(116, 0, win32con.KEYEVENTF_KEYUP, 0) # 释放f键
|
||||
win32api.keybd_event(16, 0, win32con.KEYEVENTF_KEYUP, 0) # 释放f键
|
||||
|
@ -103,11 +104,6 @@ def control_thread(control_number, array, over_flag):
|
|||
control_ppt_end()
|
||||
step = 0
|
||||
|
||||
# if control_flag == 5:
|
||||
# control_open_pencil()
|
||||
# if control_flag == 6:
|
||||
# control_draw()
|
||||
|
||||
|
||||
def identify_thread(control_number, array, over_flag):
|
||||
identify = Identify(control_number, array)
|
||||
|
@ -116,29 +112,16 @@ def identify_thread(control_number, array, over_flag):
|
|||
|
||||
def open_file():
|
||||
file_path = askopenfilename(title=u'选择文件')
|
||||
# file_path = askopenfilename(title=u'选择文件', initialdir=(os.path.expanduser('H:/')))
|
||||
if not open_ppt(file_path):
|
||||
return False
|
||||
control_number = multiprocessing.Value('i', 0)
|
||||
array = multiprocessing.Array('i', 4)
|
||||
over_flag = multiprocessing.Value('i', 0)
|
||||
# array2 = multiprocessing.Array()
|
||||
p1 = multiprocessing.Process(target=identify_thread, args=(control_number, array, over_flag))
|
||||
p2 = multiprocessing.Process(target=control_thread, args=(control_number, array, over_flag))
|
||||
# p3 = multiprocessing.Process(target=show_thread, args=(value, array))
|
||||
p1.start()
|
||||
# p1.terminate()
|
||||
p2.start()
|
||||
return True
|
||||
# p3.start()
|
||||
# identify_t = threading.Thread(target=identify_thread)
|
||||
# print("control_flag1 = " + str(control_flag))
|
||||
# control_t = threading.Thread(target=control_thread)
|
||||
# print("control_flag2 = " + str(control_flag))
|
||||
# identify_t.setDaemon(True)
|
||||
# control_t.setDaemon(True)
|
||||
# identify_t.start()
|
||||
# control_t.start()
|
||||
|
||||
|
||||
def judge_ppt(file_path):
|
||||
|
@ -166,15 +149,10 @@ def open_ppt(file_path):
|
|||
return True
|
||||
|
||||
|
||||
# def make_button(window):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
window = tkinter.Tk()
|
||||
window.title("会议PPT选择")
|
||||
window.geometry("200x100")
|
||||
bt1 = tkinter.Button(window, text='打开文件', width=15, height=15, command=open_file)
|
||||
bt1.pack()
|
||||
# show_text = tkinter.Text(window, height=2)
|
||||
# show_text.pack()
|
||||
window.mainloop()
|
||||
|
|
Binary file not shown.
|
@ -301,24 +301,25 @@ def parse_model(d, ch): # model_dict, input_channels(3)
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
|
||||
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
parser.add_argument('--profile', action='store_true', help='profile model speed')
|
||||
opt = parser.parse_args()
|
||||
opt.cfg = check_yaml(opt.cfg) # check YAML
|
||||
print_args(FILE.stem, opt)
|
||||
set_logging()
|
||||
device = select_device(opt.device)
|
||||
|
||||
# Create model
|
||||
model = Model(opt.cfg).to(device)
|
||||
model.train()
|
||||
|
||||
# Profile
|
||||
if opt.profile:
|
||||
img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
|
||||
y = model(img, profile=True)
|
||||
print("in yolo")
|
||||
# parser = argparse.ArgumentParser()
|
||||
# parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
|
||||
# parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||
# parser.add_argument('--profile', action='store_true', help='profile model speed')
|
||||
# opt = parser.parse_args()
|
||||
# opt.cfg = check_yaml(opt.cfg) # check YAML
|
||||
# print_args(FILE.stem, opt)
|
||||
# set_logging()
|
||||
# device = select_device(opt.device)
|
||||
#
|
||||
# # Create model
|
||||
# model = Model(opt.cfg).to(device)
|
||||
# model.train()
|
||||
#
|
||||
# # Profile
|
||||
# if opt.profile:
|
||||
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
|
||||
# y = model(img, profile=True)
|
||||
|
||||
# Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898)
|
||||
# from torch.utils.tensorboard import SummaryWriter
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.0 # model depth multiple
|
||||
width_multiple: 1.0 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
|
@ -1,48 +0,0 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.67 # model depth multiple
|
||||
width_multiple: 0.75 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
|
@ -1,48 +0,0 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 0.33 # model depth multiple
|
||||
width_multiple: 0.25 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
|
@ -1,48 +0,0 @@
|
|||
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
||||
|
||||
# Parameters
|
||||
nc: 80 # number of classes
|
||||
depth_multiple: 1.33 # model depth multiple
|
||||
width_multiple: 1.25 # layer channel multiple
|
||||
anchors:
|
||||
- [10,13, 16,30, 33,23] # P3/8
|
||||
- [30,61, 62,45, 59,119] # P4/16
|
||||
- [116,90, 156,198, 373,326] # P5/32
|
||||
|
||||
# YOLOv5 v6.0 backbone
|
||||
backbone:
|
||||
# [from, number, module, args]
|
||||
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
|
||||
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
|
||||
[-1, 3, C3, [128]],
|
||||
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
|
||||
[-1, 6, C3, [256]],
|
||||
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
|
||||
[-1, 9, C3, [512]],
|
||||
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
|
||||
[-1, 3, C3, [1024]],
|
||||
[-1, 1, SPPF, [1024, 5]], # 9
|
||||
]
|
||||
|
||||
# YOLOv5 v6.0 head
|
||||
head:
|
||||
[[-1, 1, Conv, [512, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 6], 1, Concat, [1]], # cat backbone P4
|
||||
[-1, 3, C3, [512, False]], # 13
|
||||
|
||||
[-1, 1, Conv, [256, 1, 1]],
|
||||
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
||||
[[-1, 4], 1, Concat, [1]], # cat backbone P3
|
||||
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
|
||||
|
||||
[-1, 1, Conv, [256, 3, 2]],
|
||||
[[-1, 14], 1, Concat, [1]], # cat head P4
|
||||
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
|
||||
|
||||
[-1, 1, Conv, [512, 3, 2]],
|
||||
[[-1, 10], 1, Concat, [1]], # cat head P5
|
||||
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
|
||||
|
||||
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
||||
]
|
Binary file not shown.
165
test.py
165
test.py
|
@ -1,165 +0,0 @@
|
|||
import cv2
|
||||
import mediapipe as mp
|
||||
import detect
|
||||
import numpy as np
|
||||
# !/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Time : 2021/04/08
|
||||
# @Author : Devil_Xiao
|
||||
# Purpose: This script is used to change the video_speed
|
||||
# You can choose to specify the fps,
|
||||
# or you can choose to change the multiple of the original playback speed
|
||||
|
||||
import cv2
|
||||
from cv2 import VideoWriter, VideoWriter_fourcc
|
||||
import argparse
|
||||
|
||||
|
||||
def video_speed(video_root, out_root, fps=None, scale=1):
|
||||
"""When fps and scale are specified at the same time, fps is the dominant"""
|
||||
# cap = cv2.VideoCapture(video_root)
|
||||
cap = cv2.VideoCapture(0)
|
||||
video_width = int(cap.get(3))
|
||||
video_height = int(cap.get(4))
|
||||
print(video_width)
|
||||
print(video_height)
|
||||
# fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
|
||||
# if fps:
|
||||
# videoWriter = cv2.VideoWriter(out_root, fourcc, fps, (video_width, video_height))
|
||||
# else:
|
||||
# fps = int(cap.get(cv2.CAP_PROP_FPS) * scale)
|
||||
# videoWriter = cv2.VideoWriter(out_root, fourcc, fps, (video_width, video_height))
|
||||
flag = cap.isOpened()
|
||||
mp_drawing = mp.solutions.drawing_utils
|
||||
mp_drawing_styles = mp.solutions.drawing_styles
|
||||
mp_hands = mp.solutions.hands
|
||||
mp_poses = mp.solutions.pose
|
||||
hands = mp_hands.Hands(
|
||||
static_image_mode=False,
|
||||
max_num_hands=2,
|
||||
min_detection_confidence=0.75,
|
||||
min_tracking_confidence=0.75)
|
||||
poses = mp_poses.Pose(
|
||||
min_detection_confidence=0.5,
|
||||
min_tracking_confidence=0.5)
|
||||
|
||||
# yolov5 = detect.YOLOV5()
|
||||
# blank_image = np.zeros((1000, 1000, 3), np.uint8)
|
||||
# blank_image.fill(255)
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
# blank_image = frame.copy()
|
||||
# blank_image.fill(255)
|
||||
# image = detect.run(image_input=frame)
|
||||
# result = yolov5.infer(image=frame)
|
||||
# print("")
|
||||
|
||||
frame = cv2.flip(frame, 1)
|
||||
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
||||
results = hands.process(frame_rgb)
|
||||
# results = poses.process(image)
|
||||
# print("*********************************")
|
||||
# if results.pose_landmarks:
|
||||
# for
|
||||
# print(results.pose_landmarks.landmark[17])
|
||||
# mp_drawing.draw_landmarks(
|
||||
# blank_image,
|
||||
# results.pose_landmarks,
|
||||
# mp_poses.POSE_CONNECTIONS,
|
||||
# landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())
|
||||
# if results.
|
||||
# print(results.multi_hand_landmarks)
|
||||
if results.multi_handedness:
|
||||
# if results.multi_handedness[0].classification[0].label == "Left":
|
||||
for hand_landmarks in results.multi_hand_landmarks:
|
||||
# 关键点可视化
|
||||
mp_drawing.draw_landmarks(
|
||||
frame, hand_landmarks, mp_hands.HAND_CONNECTIONS)
|
||||
# if results.multi_handedness[0].classification[0].label == "Right":
|
||||
# print(results.multi_handedness[0].classification[0])
|
||||
cv2.imshow('MediaPipe Hands', frame)
|
||||
# cv2.imshow('black_image', blank_image)
|
||||
# videoWriter.write(blank_image)
|
||||
# count = count - 1
|
||||
cv2.imwrite("picture3.jpg", frame)
|
||||
if cv2.waitKey(3000) & 0xFF == 27:
|
||||
break
|
||||
cap.release()
|
||||
# videoWriter.release()
|
||||
# while (flag):
|
||||
# flag, frame = cap.read()
|
||||
# videoWriter.write(frame)
|
||||
# videoWriter.release()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--video_name', type=str, default=r'video.mp4', help='original video name')
|
||||
parser.add_argument('--result_name', type=str, default=r'result.mp4', help='result name')
|
||||
parser.add_argument('--fps', type=int, default=None, help='Specify the playback frame rate')
|
||||
parser.add_argument('--scale', type=float, default='0.5', help='Change the original video speed')
|
||||
opt = parser.parse_args()
|
||||
print(opt)
|
||||
video_speed(opt.video_name, opt.result_name, opt.fps, 2)
|
||||
|
||||
# if __name__ == '__main__':
|
||||
# parser = argparse.ArgumentParser()
|
||||
# parser.add_argument('--video_name', type=str, default=r'video.mp4', help='original video name')
|
||||
# parser.add_argument('--result_name', type=str, default=r'result.mp4', help='result name')
|
||||
# parser.add_argument('--fps', type=int, default=None, help='Specify the playback frame rate')
|
||||
# parser.add_argument('--scale', type=float, default='0.5', help='Change the original video speed')
|
||||
# opt = parser.parse_args()
|
||||
# mp_drawing = mp.solutions.drawing_utils
|
||||
# mp_drawing_styles = mp.solutions.drawing_styles
|
||||
# mp_hands = mp.solutions.hands
|
||||
# mp_poses = mp.solutions.pose
|
||||
# hands = mp_hands.Hands(
|
||||
# static_image_mode=False,
|
||||
# max_num_hands=2,
|
||||
# min_detection_confidence=0.75,
|
||||
# min_tracking_confidence=0.75)
|
||||
# poses = mp_poses.Pose(
|
||||
# min_detection_confidence=0.5,
|
||||
# min_tracking_confidence=0.5)
|
||||
# cap = cv2.VideoCapture("D:\\Program Files\\JiJiDown\\Download\\Thank_you.mp4")
|
||||
# yolov5 = detect.YOLOV5()
|
||||
# blank_image = np.zeros((1000, 1000, 3), np.uint8)
|
||||
# blank_image.fill(255)
|
||||
# while True:
|
||||
# ret, frame = cap.read()
|
||||
# blank_image = np.zeros((1000, 1000, 3), np.uint8)
|
||||
# blank_image.fill(255)
|
||||
# # image = detect.run(image_input=frame)
|
||||
# # result = yolov5.infer(image=frame)
|
||||
# # print("")
|
||||
# # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
# # frame = cv2.flip(frame, 1)
|
||||
# image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
||||
# # results = hands.process(image)
|
||||
# results = poses.process(image)
|
||||
# # print("*********************************")
|
||||
# # if results.pose_landmarks:
|
||||
# # for
|
||||
# # print(results.pose_landmarks.landmark[17])
|
||||
# mp_drawing.draw_landmarks(
|
||||
# blank_image,
|
||||
# results.pose_landmarks,
|
||||
# mp_poses.POSE_CONNECTIONS,
|
||||
# landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style())
|
||||
# # if results.
|
||||
# # print(results.multi_hand_landmarks)
|
||||
# # if results.multi_handedness:
|
||||
# # # if results.multi_handedness[0].classification[0].label == "Left":
|
||||
# # for hand_landmarks in results.multi_hand_landmarks:
|
||||
# # # 关键点可视化
|
||||
# # mp_drawing.draw_landmarks(
|
||||
# # frame, hand_landmarks, mp_hands.HAND_CONNECTIONS)
|
||||
# # if results.multi_handedness[0].classification[0].label == "Right":
|
||||
# # print(results.multi_handedness[0].classification[0])
|
||||
# cv2.imshow('MediaPipe Hands', frame)
|
||||
# cv2.imshow('black_image', blank_image)
|
||||
# # cv2.imwrite("picture3.jpg", frame)
|
||||
# if cv2.waitKey(1) & 0xFF == 27:
|
||||
# break
|
||||
# cap.release()
|
3
util.py
3
util.py
|
@ -27,7 +27,8 @@ class Util:
|
|||
def get_distance(point1, point2):
|
||||
return math.sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)
|
||||
|
||||
def is_in_rectangle(target_point, rectangle):
|
||||
def \
|
||||
is_in_rectangle(target_point, rectangle):
|
||||
if target_point[0] > rectangle[0] and target_point[0] < rectangle[2] and target_point[1] > rectangle[1] and \
|
||||
target_point[1] < rectangle[3]:
|
||||
return True
|
||||
|
|
Loading…
Reference in New Issue