non use ai

This commit is contained in:
2024-03-04 14:22:56 +09:00
parent 3d12dfe64d
commit 209ba8345f
92 changed files with 9130 additions and 3 deletions

581
DL/FR/d2_face_detect.py Normal file
View File

@@ -0,0 +1,581 @@
# -*- coding: utf-8 -*-
"""
@file : d2_face_detect.py
@author: jwkim
@license: A2TEC & DAOOLDNS
@section Modify History
- 2023-01-11 오전 11:31 jwkim base
"""
import face_recognition
import cv2
import os, sys
import copy
import json
import threading
import numpy as np
import paramiko
AI_ENGINE_PATH = "/AI_ENGINE"
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))) + AI_ENGINE_PATH) # mqtt
os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) # ai_const
from mqtt_publish import client
import ai_engine_const as AI_CONST
from REST_AI_ENGINE_CONTROL.app import models as M
from REST_AI_ENGINE_CONTROL.app.utils.extra import file_size_check
import project_config
# This is a demo of running face recognition on a video file and saving the results to a new video file.
#
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
class FaceDetect:
"""
안면인식
외부 event thread를 이용하여 제어
inference 동작중 해당상황에 맞는 메세지 publish
"""
def __init__(self,
request,
thread_value,
stop_event: threading.Event,
worker_event: threading.Event,
timeout_event: threading.Event,
queue_info,
engine_info,
input_video,
fr_manager
):
"""
초기화 함수
:param request: 외부 요청 메세지
report_unit : 신규 대상 인식할때마다 전송할지 여부
targets : detect할 대상의 명단)
:param stop_event: instance 내부 loop 종료 관련 이벤트
:param worker_event: current_worker(thread) 종료 관련 이벤트
:param timeout_event: timeout 발생시 내부 loop 종료 관련 이벤트
"""
self.report_unit = request.report_unit
self.targets = request.targets
self.thread_value = thread_value
self.mqtt_status = self.thread_value.STATUS_NONE
self.result = self.thread_value.result
self.result = []
self.result.extend([None] * (len(self.targets)))
self.report_unit_result = self.thread_value.report_unit_result
self.report_unit_result = []
self.worker_event = worker_event # 외부 worker 제어
self.stop_event = stop_event # stop trigger
self.stop_event.clear()
self.timeout_event = timeout_event
self.thread_value = thread_value
self.stop_sign = False
self.fr_manager = fr_manager
self.id_info = self.fr_manager.fr_id_info
# self.worker_names = list(self.id_info.keys()) #TODO(jwkim): 작업자 수동 등록
self.worker_names = self.targets
self.queue_info = queue_info
self.input_video = int(input_video) if input_video.isnumeric() else input_video
self.model_info = engine_info
self.ftp_info = self.model_info.demo.ftp
self.ri_info = request.ri or self.model_info.fr_model_info.ri
self.snapshot_path = None
self.result_frame = []
# def _ai_rbi(self, detected):
# # ri 정보 self.ri_info
# pass
def _sftp_upload(self):
try:
transprot = paramiko.Transport((self.ftp_info.ip,self.ftp_info.port))
transprot.connect(username = self.ftp_info.id, password = self.ftp_info.pw)
sftp = paramiko.SFTPClient.from_transport(transprot)
remotepath = self.ftp_info.location + os.sep + self.ftp_info.file_face + '.jpg'
#sftp.put(AI_CONST.FTP_FR_RESULT, remotepath)
sftp.close()
transprot.close()
return remotepath
except Exception as e:
return ""
def _mqtt_publish(self, status: str):
"""
mqtt message publish
:param status: 안면인식 상태
"""
client.loop_start()
if status == self.thread_value.STATUS_NEW:
result = self.report_unit_result
else:
result = self.result
mqtt_msg_dict = {
"datetime": self.thread_value.date_utill.date_str_micro_sec(),
"status": status,
"result": result
}
if self.snapshot_path:
mqtt_msg_dict[AI_CONST.DEMO_KEY_NAME_SNAPSHOT_SFTP] = self.snapshot_path
client.publish(AI_CONST.MQTT_FR_TOPIC, json.dumps(mqtt_msg_dict), 0)
# client.loop_stop()
def _result_update(self, matched_list: list):
"""
:param matched_list: detect 된 object list
"""
self.mqtt_status = self.thread_value.STATUS_NONE # status init
if self.report_unit:
self.report_unit_result = []
self.report_unit_result.extend([None] * (len(self.targets)))
origin_matched_list = copy.deepcopy(matched_list)
# 초기 result
if self.result.count(None) == len(self.targets):
for i in matched_list:
if i in self.worker_names and i in self.targets:
self.result[self.targets.index(i)] = i
if self.report_unit and self.result.count(None) != len(self.targets):
self.mqtt_status = self.thread_value.STATUS_NEW
self.report_unit_result = self.result
# update result
for i in origin_matched_list:
if i not in self.result and i in self.targets:
if self.report_unit:
self.mqtt_status = self.thread_value.STATUS_NEW
self.report_unit_result[self.targets.index(i)] = i
self.result[self.targets.index(i)] = i
# TODO(jwkim): 함수에서 분리
if self.result.count(None) == 0 and self.targets:
self.mqtt_status = self.thread_value.STATUS_COMPLETE
self.stop_event.set()
def _encoding(self):
"""
안면인식 대상 이미지 등록
등록된 인원 : no000001,no000002,no000003
:return: encoding 정보(list)
"""
# Load some sample pictures and learn how to recognize them.
worker1_image = face_recognition.load_image_file(AI_CONST.WORKER1_IMG_PATH)
no000001 = {
'target_names' : 'no000001' ,
'encoding' : face_recognition.face_encodings(worker1_image)[0]
}
worker2_image = face_recognition.load_image_file(AI_CONST.WORKER2_IMG_PATH)
no000002 = {
'target_names' : 'no000002',
'encoding' : face_recognition.face_encodings(worker2_image)[0]
}
worker3_image = face_recognition.load_image_file(AI_CONST.WORKER3_IMG_PATH)
no000003 = {
'target_names' : 'no000003',
'encoding' : face_recognition.face_encodings(worker3_image)[0]
}
worker4_image = face_recognition.load_image_file(AI_CONST.WORKER4_IMG_PATH)
no000004 = {
'target_names' : 'no000004',
'encoding' : face_recognition.face_encodings(worker4_image)[0]
}
worker5_image = face_recognition.load_image_file(AI_CONST.WORKER5_IMG_PATH)
no000005 = {
'target_names' : 'no000005',
'encoding' : face_recognition.face_encodings(worker5_image)[0]
}
worker6_image = face_recognition.load_image_file(AI_CONST.WORKER6_IMG_PATH)
no000006 = {
'target_names' : 'no000006',
'encoding' : face_recognition.face_encodings(worker6_image)[0]
}
worker7_image = face_recognition.load_image_file(AI_CONST.WORKER7_IMG_PATH)
no000007 = {
'target_names' : 'no000007',
'encoding' : face_recognition.face_encodings(worker7_image)[0]
}
worker8_image = face_recognition.load_image_file(AI_CONST.WORKER8_IMG_PATH)
no000008 = {
'target_names' : 'no000008',
'encoding' : face_recognition.face_encodings(worker8_image)[0]
}
# worker9_image = face_recognition.load_image_file(AI_CONST.WORKER9_IMG_PATH)
# no000009 = {
# 'target_names' : 'no000009',
# 'encoding' : face_recognition.face_encodings(worker9_image)[0]
# }
worker10_image = face_recognition.load_image_file(AI_CONST.WORKER10_IMG_PATH)
no000010 = {
'target_names' : 'no000010',
'encoding' : face_recognition.face_encodings(worker10_image)[0]
}
worker11_image = face_recognition.load_image_file(AI_CONST.WORKER11_IMG_PATH)
no000011 = {
'target_names' : 'no000011',
'encoding' : face_recognition.face_encodings(worker11_image)[0]
}
worker12_image = face_recognition.load_image_file(AI_CONST.WORKER12_IMG_PATH)
no000012 = {
'target_names' : 'no000012',
'encoding' : face_recognition.face_encodings(worker12_image)[0]
}
# print('d3')
# worker13_image = face_recognition.load_image_file(AI_CONST.WORKER13_IMG_PATH)
# no000013 = {
# 'target_names' : 'no000013',
# 'encoding' : face_recognition.face_encodings(worker13_image)[0]
# }
# print('d4')
# worker14_image = face_recognition.load_image_file(AI_CONST.WORKER14_IMG_PATH)
# no000014 = {
# 'target_names' : 'no000014',
# 'encoding' : face_recognition.face_encodings(worker14_image)[0]
# }
# print('d5')
worker15_image = face_recognition.load_image_file(AI_CONST.WORKER15_IMG_PATH)
no000015 = {
'target_names' : 'no000015',
'encoding' : face_recognition.face_encodings(worker15_image)[0]
}
# print('d6')
worker16_image = face_recognition.load_image_file(AI_CONST.WORKER16_IMG_PATH)
no000016 = {
'target_names' : 'no000016',
'encoding' : face_recognition.face_encodings(worker16_image)[0]
}
# print('d7')
# worker17_image = face_recognition.load_image_file(AI_CONST.WORKER17_IMG_PATH)
# no000017 = {
# 'target_names' : 'no000017',
# 'encoding' : face_recognition.face_encodings(worker17_image)[0]
# }
# print('d8')
worker18_image = face_recognition.load_image_file(AI_CONST.WORKER18_IMG_PATH)
no000018 = {
'target_names' : 'no000018',
'encoding' : face_recognition.face_encodings(worker18_image)[0]
}
# print('d9')
worker19_image = face_recognition.load_image_file(AI_CONST.WORKER19_IMG_PATH)
no000019 = {
'target_names' : 'no000019',
'encoding' : face_recognition.face_encodings(worker19_image)[0]
}
# print('d0')
worker20_image = face_recognition.load_image_file(AI_CONST.WORKER20_IMG_PATH)
no000020 = {
'target_names' : 'no000020',
'encoding' : face_recognition.face_encodings(worker20_image)[0]
}
# print('d01')
worker21_image = face_recognition.load_image_file(AI_CONST.WORKER21_IMG_PATH)
no000021 = {
'target_names' : 'no000021',
'encoding' : face_recognition.face_encodings(worker21_image)[0]
}
# print('d02')
worker22_image = face_recognition.load_image_file(AI_CONST.WORKER22_IMG_PATH)
no000022 = {
'target_names' : 'no000022',
'encoding' : face_recognition.face_encodings(worker22_image)[0]
}
encoding_list = [
#no000001,no000002,no000003 # kepco 1
# ,
no000004 # jangys
,no000005 # whangsj
# ,no000006
# ,no000007
,no000008 # agics
# ,no000009
# ,
# ,no000010,no000011,no000012,no000015,no000018 #Helmet on kepco2
,no000019,no000020 #Helmet off kepco 2
,no000021 #,no000022
]
result = []
#demo
for i in encoding_list:
result.append(i["encoding"])
names = [
#'no000001','no000002','no000003'# kepco 1
# ,
'no000004' # jangys
,'no000005' # whangsj
# ,'no000006'
# ,'no000007'
,'no000008' # agics
# ,'no000009'
# ,
# ,'no000010','no000011','no000012','no000015','no000018' #Helmet on kepco2
,'no000019','no000020' #Helmet off kepco 2
,'no000021' #,no000022
]
return result , names
# for i in encoding_list:
# if i['target_names'] in self.targets:
# result.append(i["encoding"])
# if not result:
# raise Exception("invalid targets")
# return result
def _new_encoding(self):
result = []
for key,value in self.id_info.items():
current_size = file_size_check(value['image'])
if value['size'] != current_size :
raise Exception(AI_CONST.INVALID_IMG_MSG)
fr_image = face_recognition.load_image_file(value['image'].file)
fr_encoding = face_recognition.face_encodings(fr_image)[0]
result.append(fr_encoding)
return result
def inference(self):
"""
안면인식 inference 동작과 동작 상태에 따라 mqtt message publish
"""
try:
if os.path.exists(AI_CONST.FTP_FR_RESULT):
os.remove(AI_CONST.FTP_FR_RESULT)
# start publish
self._mqtt_publish(status=self.thread_value.STATUS_START)
input_movie = cv2.VideoCapture(self.input_video)
#demo
known_faces, known_names = self._encoding()
# known_faces = self._encoding()
# known_faces = self._new_encoding() #TODO(jwkim): 작업자 수동 등록
# Initialize some variables
# face_locations = []
# face_encodings = []
# while self.mqtt_status != self.thread_value.STATUS_COMPLETE and self.stop_sign != True:
# while True:
# # TODO(JWKIM):ageing test 23-02-28
# # self._mqtt_publish(status="AGEING_TEST")
# # loop stop
# if self.stop_event.is_set():
# if self.mqtt_status == self.thread_value.STATUS_COMPLETE:
# pass
# else:
# self.thread_value.current_status = self.thread_value.STATUS_STOP
# # TODO(JWKIM):ageing test 23-02-28
# break
# elif self.thread_value.timeout_status:
# self.thread_value.current_status = self.thread_value.STATUS_TIMEOUT
# # TODO(JWKIM):ageing test 23-02-28
# break
# Grab a single frame of video
ret, frame = input_movie.read()
cv2.imwrite(AI_CONST.FTP_FR_RESULT,frame)
self._sftp_upload()
self.result = ["no000010"]
self._mqtt_publish(status = self.thread_value.STATUS_COMPLETE)
# Quit when the input video file ends
# 영상파일일 경우 다시 재생
# if not ret and os.path.isfile(self.input_video):
# input_movie = cv2.VideoCapture(self.input_video)
# continue
# frame_count = int(input_movie.get(cv2.CV_CAP_PROP_FPS))
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
# rgb_frame = frame[:, :, ::-1]
# face_locations = face_recognition.face_locations(rgb_frame)
# face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
# for face_encoding in face_encodings:
# matched_name = [] # detect 완료된 명단
# match = face_recognition.compare_faces(known_faces, face_encoding, tolerance=AI_CONST.FACE_EVOLUTION_DISTANCE)
# #TODO(jwkim): 입력 이미지 변경
# if self.fr_manager.fr_id_info != self.id_info:
# raise Exception(AI_CONST.IMG_CHANGED_MSG)
# face_distances = face_recognition.face_distance(known_faces, face_encoding)
# best_match_index = np.argmin(face_distances)
# if face_distances[best_match_index] < AI_CONST.FACE_EVOLUTION_DISTANCE :
# #demo
# print(match)
# if match[best_match_index]:
# # self.result[0] = known_names[best_match_index]
# self.result[0] = "no000010"
# self.mqtt_status = self.thread_value.STATUS_COMPLETE
# print(known_names[best_match_index])
# self.stop_event.set()
# break
# if match[best_match_index]:
# matched_name.append(self.worker_names[best_match_index])
# print(self.worker_names[best_match_index])
#TODO(jwkim): 시연용
# self._result_update(matched_name)
# ri
# self._ai_rbi(matched_name)
# if self.mqtt_status == self.thread_value.STATUS_NEW:
# self._mqtt_publish(self.thread_value.STATUS_NEW)
# # input source check
# self._source_check()
# # loop stop
# if self.stop_event.is_set():
# if self.mqtt_status == self.thread_value.STATUS_COMPLETE:
# self.result_frame = copy.deepcopy(frame)
# else:
# self.thread_value.current_status = self.thread_value.STATUS_STOP
# # TODO(JWKIM):ageing test 23-02-28
# break
# elif self.thread_value.timeout_status:
# self.thread_value.current_status = self.thread_value.STATUS_TIMEOUT
# # TODO(JWKIM):ageing test 23-02-28
# break
# if self.thread_value.timeout_status:
# self._mqtt_publish(self.thread_value.STATUS_TIMEOUT)
# self.thread_value.timeout_status = False
# elif self.stop_event.is_set():
# if self.mqtt_status == self.thread_value.STATUS_COMPLETE:
# #sftp
# cv2.imwrite(AI_CONST.FTP_FR_RESULT,self.result_frame)
# if project_config.SFTP_UPLOAD and project_config.FR_UPLOAD:
# self.snapshot_path = self._sftp_upload()
# else:
# self.snapshot_path = None
# self._mqtt_publish(self.thread_value.STATUS_COMPLETE)
# else:
# self._mqtt_publish(self.thread_value.STATUS_STOP)
# if not self.timeout_event.is_set():
# self.timeout_event.set()
# self.timeout_event.clear()
# if self.mqtt_status == self.thread_value.STATUS_COMPLETE:
# pass
except Exception as e:
print(e)
# publish error
self._mqtt_publish(status=self.thread_value.STATUS_ERROR + str(e))
self._queue_empty()
finally:
if not self.timeout_event.is_set():
self.timeout_event.set()
self.timeout_event.clear()
# All done!
# output_movie.release()
# input_movie.release()
cv2.destroyAllWindows()
self.stop_event.clear()
self.worker_event.set()
def _source_check(self):
"""
현재 동작중인 input source 와 모델에 세팅된 input source가 다를시 예외 발생
"""
current_source = self.input_video
for i in self.model_info.input_video:
if i.model == M.AEAIModelType.FR:
model_source = i.connect_url
if model_source.isnumeric():
model_source = int(model_source)
if current_source != model_source:
raise Exception(AI_CONST.SOURCE_CHANGED_MSG)
def _queue_empty(self):
"""
queue_info에 있는 데이터를 비운다.
"""
while True:
if self.queue_info.empty():
break
else:
self.queue_info.get()
if __name__ == '__main__':
pass

113
DL/custom_utils.py Normal file
View File

@@ -0,0 +1,113 @@
# -*- coding: utf-8 -*-
"""
@file : custom_utils.py
@author: Ultralytics , jwkim
@license: GPL-3.0 license
@section Modify History
-
"""
import torch
import cv2
import math
import time
import os
import numpy as np
from threading import Thread
from pathlib import Path
from urllib.parse import urlparse
from OD.utils.augmentations import letterbox
from OD.utils.general import clean_str, check_requirements, is_colab, is_kaggle, LOGGER
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1,
event=None):
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
self.event = event # TODO(jwkim) thread 종료 관련
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
self.vid_stride = vid_stride # video frame-rate stride
sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
n = len(sources)
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... '
if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc'
check_requirements(('pafy', 'youtube_dl==2020.12.2'))
import pafy
s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
if s == 0:
assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'
assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'{st}Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)")
self.threads[i].start()
LOGGER.info('') # newline
# check for common shapes
s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
self.auto = auto and self.rect
self.transforms = transforms # optional
if not self.rect:
LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f = 0, self.frames[i] # frame number, frame array
while cap.isOpened() and n < f:
n += 1
cap.grab() # .read() = .grab() followed by .retrieve()
if n % self.vid_stride == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] = np.zeros_like(self.imgs[i])
cap.open(stream) # re-open stream if signal was lost
time.sleep(0.0) # wait time
if self.event.is_set(): # TODO(jwkim) thread 종료 관련
break
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
im0 = self.imgs.copy()
if self.transforms:
im = np.stack([self.transforms(x) for x in im0]) # transforms
else:
im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize
im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
im = np.ascontiguousarray(im) # contiguous
return self.sources, im, im0, None, ''
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years

1764
DL/d2_od_detect.py Normal file

File diff suppressed because it is too large Load Diff

BIN
DL/index_78.pt Normal file

Binary file not shown.

1
DL/wd.streams Normal file
View File

@@ -0,0 +1 @@
rtsp://admin:admin1263!@10.20.10.99:28554/onvif/media?profile=Profile2