feat: 人脸数据保存在数据库中

This commit is contained in:
caoqianming 2023-09-11 15:21:45 +08:00
parent 5dded4d235
commit 96ac80361e
8 changed files with 150 additions and 27 deletions

View File

@ -1,4 +1,5 @@
from django.apps import AppConfig
from django.core.cache import cache
class HrmConfig(AppConfig):
@ -6,4 +7,8 @@ class HrmConfig(AppConfig):
verbose_name = '人力资源管理'
def ready(self):
import apps.hrm.signals
if cache.get('update_global_face_pd_task', True):
from apps.hrm.tasks import update_global_face_pd
update_global_face_pd.delay()
cache.set('update_global_face_pd_task', False, timeout=30)
return super().ready()

View File

@ -0,0 +1,18 @@
# Generated by Django 3.2.12 on 2023-09-11 07:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hrm', '0011_alter_certificate_number'),
]
operations = [
migrations.AddField(
model_name='employee',
name='facenet512_data',
field=models.JSONField(blank=True, null=True, verbose_name='人脸数据'),
),
]

View File

@ -44,7 +44,7 @@ class Employee(CommonBModel):
not_work_remark = models.CharField('当前未打卡说明', null=True, blank=True, max_length=200)
third_info = models.JSONField('三方信息', default=dict, null=False, blank=True) # 主要是定位卡信息
post = models.ForeignKey(Post, verbose_name='所属岗位', on_delete=models.SET_NULL, null=True, blank=True)
facenet512_data = models.JSONField('人脸数据', null=True, blank=True)
class Meta:
verbose_name = '员工补充信息'
verbose_name_plural = verbose_name

View File

@ -13,6 +13,8 @@ from apps.system.serializers import DeptSimpleSerializer, UserSimpleSerializer
from django.db import transaction
from django.core.cache import cache
from apps.utils.tools import check_id_number_e, get_info_from_id
from rest_framework.exceptions import ParseError
from django.conf import settings
class EmployeeSimpleSerializer(CustomModelSerializer):
@ -69,8 +71,14 @@ class EmployeeCreateUpdateSerializer(CustomModelSerializer):
old_name = instance.name
instance = super().update(instance, validated_data)
if old_photo != instance.photo: # 如果照片有变动,需要更新人脸库
from apps.hrm.tasks import delete_face_pkl
delete_face_pkl.delay(instance.id)
face_data, msg = HrmService.get_facedata_from_img(settings.BASE_DIR + instance.photo)
if face_data:
instance.facenet512_data = face_data
instance.save()
else:
raise ParseError(msg)
from apps.hrm.tasks import update_global_face_pd
update_global_face_pd.delay()
if instance.user and instance != old_name:
instance.user.name = instance.name
instance.user.save()

View File

@ -15,9 +15,11 @@ from apps.third.dahua import dhClient
from apps.third.models import TDevice
from apps.third.tapis import dhapis
from apps.utils.tools import rannum, ranstr
from apps.utils.face import face_find
myLogger = logging.getLogger('log')
global_face_df = None #全局人脸库dataframe
class HrmService:
@ -404,33 +406,25 @@ class HrmService:
@classmethod
def face_find_from_base64(cls, base64_data):
from deepface import DeepFace
img_name = str(uuid.uuid4())
img_path = settings.BASE_DIR +'/temp/face_' + img_name +'.jpg'
with open(img_path, 'wb') as f:
f.write(base64_data)
db_path = os.path.join(settings.BASE_DIR, 'media/face')
dfs = DeepFace.find(img_path=img_path, db_path=db_path, model_name='Facenet512')
# db_path = os.path.join(settings.BASE_DIR, 'media/face')
# cache_face_db = cache
dfs = face_find(img_path=img_path, global_df=global_face_df, model_name='Facenet512')
df = dfs[0]
if not df.empty:
matched = df.iloc[0].identity
epId = matched.split('/')[-1].split('.')[0]
return Employee.objects.get(id=epId), ''
return Employee.objects.get(id=matched), ''
else:
return None, '人脸未匹配,请调整位置'
# def VGGFaceloadModel(weight_path):
# from deepface.basemodels import VGGFace
# from keras.src.engine.training import Model
# model = VGGFace.baseModel()
# model.load_weights(weight_path)
# vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
# return vgg_face_descriptor
# def Facenet512loadModel(weight_path):
# from deepface.basemodels import Facenet
# model = Facenet.InceptionResNetV2(dimension=512)
# model.load_weights(weight_path)
# return model
@classmethod
def get_facedata_from_img(cls, img_path):
try:
from deepface import DeepFace
embedding_objs = DeepFace.represent(img_path=img_path, model_name='Facenet512')
return embedding_objs[0]["embedding"], ''
except Exception as e:
return None, '人脸数据获取失败请重新上传图片'

View File

@ -8,7 +8,7 @@ from dateutil import tz
from django.core.cache import cache
from apps.hrm.models import Employee
from apps.hrm.services import HrmService
from apps.hrm.services import HrmService, global_face_df
from apps.third.dahua import dhClient
from apps.third.tapis import dhapis
from apps.utils.tasks import CustomTask
@ -125,3 +125,11 @@ def delete_face_pkl(epId):
except Exception as e:
delete_face_pkl.apply_async(countdown=5)
@shared_task(base=CustomTask)
def update_global_face_pd():
import pandas as pd
facedata = Employee.objects.filter(facenet512_data__isnull=False,
user__is_active=True).values_list('id', 'facenet512_data', flat=True)
cache.set('global_face_data', facedata, timeout=None)
global_face_df = pd.DataFrame(list(facedata), columns=["identity", "Facenet512_representation"])

90
apps/utils/face.py Normal file
View File

@ -0,0 +1,90 @@
from deepface import DeepFace
from deepface.commons import functions, distance as dst
import pandas as pd
import time
def face_find(
img_path,
global_df,
model_name="Facenet512",
distance_metric="cosine",
enforce_detection=True,
detector_backend="opencv",
align=True,
normalization="base",
silent=False,
):
tic = time.time()
target_size = functions.find_target_size(model_name=model_name)
# now, we got representations for facial database
df = global_df
# img path might have more than once face
target_objs = functions.extract_faces(
img=img_path,
target_size=target_size,
detector_backend=detector_backend,
grayscale=False,
enforce_detection=enforce_detection,
align=align,
)
resp_obj = []
for target_img, target_region, _ in target_objs:
target_embedding_obj = DeepFace.represent(
img_path=target_img,
model_name=model_name,
enforce_detection=enforce_detection,
detector_backend="skip",
align=align,
normalization=normalization,
)
target_representation = target_embedding_obj[0]["embedding"]
result_df = df.copy() # df will be filtered in each img
result_df["source_x"] = target_region["x"]
result_df["source_y"] = target_region["y"]
result_df["source_w"] = target_region["w"]
result_df["source_h"] = target_region["h"]
distances = []
for index, instance in df.iterrows():
source_representation = instance[f"{model_name}_representation"]
if distance_metric == "cosine":
distance = dst.findCosineDistance(source_representation, target_representation)
elif distance_metric == "euclidean":
distance = dst.findEuclideanDistance(source_representation, target_representation)
elif distance_metric == "euclidean_l2":
distance = dst.findEuclideanDistance(
dst.l2_normalize(source_representation),
dst.l2_normalize(target_representation),
)
else:
raise ValueError(f"invalid distance metric passes - {distance_metric}")
distances.append(distance)
# ---------------------------
result_df[f"{model_name}_{distance_metric}"] = distances
threshold = dst.findThreshold(model_name, distance_metric)
result_df = result_df.drop(columns=[f"{model_name}_representation"])
result_df = result_df[result_df[f"{model_name}_{distance_metric}"] <= threshold]
result_df = result_df.sort_values(
by=[f"{model_name}_{distance_metric}"], ascending=True
).reset_index(drop=True)
resp_obj.append(result_df)
# -----------------------------------
toc = time.time()
if not silent:
print("find function lasts ", toc - tic, " seconds")
return resp_obj

View File

@ -202,7 +202,7 @@ MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# 人脸库配置
# 如果地址不存在,则自动创建
# 如果地址不存在,则自动创建/现在直接存库可不用
FACE_PATH= os.path.join(BASE_DIR, 'media/face')
if not os.path.exists(FACE_PATH):
os.makedirs(FACE_PATH)