首页
学习
活动
专区
圈层
工具
发布
首页
学习
活动
专区
圈层
工具
MCP广场
社区首页 >专栏 >一键生成眨眼照片,人脸识别图片眨眼生成器,基于python神经学开发

一键生成眨眼照片,人脸识别图片眨眼生成器,基于python神经学开发

原创
作者头像
用户11719788
发布2025-07-02 11:38:42
发布2025-07-02 11:38:42
3270
举报

实现了完整的眨眼动画生成功能,包含人脸检测、关键点定位、眼睛区域提取和动画生成。代码使用了dlib进行人脸检测和关键点定位,OpenCV进行图像处理,PIL生成GIF动画。要使用此代码,您需要下载shape_predictor_68_face_landmarks.dat模型文件。

下载地址:http://m.pan38.com/download.php?code=TRYVIF 访问密码(可选):6666

代码语言:txt
复制

import cv2
import numpy as np
import dlib
from PIL import Image
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array

class BlinkGenerator:
    def __init__(self):
        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
        self.blink_model = load_model("blink_detection_model.h5")
        self.eye_landmarks = [36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]
        
    def detect_faces(self, image):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        faces = self.detector(gray)
        return faces
    
    def get_landmarks(self, image, face):
        landmarks = self.predictor(image, face)
        return [(landmarks.part(i).x, landmarks.part(i).y) for i in range(68)]
    
    def extract_eye_region(self, image, landmarks):
        eye_points = [landmarks[i] for i in self.eye_landmarks]
        x_coords = [p[0] for p in eye_points]
        y_coords = [p[1] for p in eye_points]
        
        x_min, x_max = min(x_coords), max(x_coords)
        y_min, y_max = min(y_coords), max(y_coords)
        
        padding = 10
        x_min = max(0, x_min - padding)
        y_min = max(0, y_min - padding)
        x_max = min(image.shape[1], x_max + padding)
        y_max = min(image.shape[0], y_max + padding)
        
        eye_region = image[y_min:y_max, x_min:x_max]
        return eye_region, (x_min, y_min, x_max, y_max)
    
    def generate_blink_sequence(self, image_path, output_path, blink_frames=5):
        image = cv2.imread(image_path)
        faces = self.detect_faces(image)
        
        if len(faces) == 0:
            raise ValueError("No faces detected in the image")
            
        for face in faces:
            landmarks = self.get_landmarks(image, face)
            left_eye, left_coords = self.extract_eye_region(image, landmarks[:17])
            right_eye, right_coords = self.extract_eye_region(image, landmarks[17:34])
            
            # Create blink animation
            frames = []
            for i in range(blink_frames):
                progress = i / (blink_frames - 1)
                blink_factor = np.sin(progress * np.pi)
                
                # Apply blink transformation
                left_blink = self.apply_blink(left_eye, blink_factor)
                right_blink = self.apply_blink(right_eye, blink_factor)
                
                # Create composite image
                frame = image.copy()
                frame[left_coords[1]:left_coords[3], left_coords[0]:left_coords[2]] = left_blink
                frame[right_coords[1]:right_coords[3], right_coords[0]:right_coords[2]] = right_blink
                frames.append(frame)
            
            # Save as GIF
            pil_frames = [Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) for frame in frames]
            pil_frames[0].save(output_path, 
                              save_all=True, 
                              append_images=pil_frames[1:], 
                              duration=100, 
                              loop=0)
    
    def apply_blink(self, eye_image, blink_factor):
        height, width = eye_image.shape[:2]
        scale = 1.0 - (blink_factor * 0.7)
        
        # Create transformation matrix
        center = (width // 2, height // 2)
        M = cv2.getRotationMatrix2D(center, 0, scale)
        
        # Apply transformation
        transformed = cv2.warpAffine(eye_image, M, (width, height))
        
        # Blend with original to maintain details
        alpha = 1.0 - blink_factor * 0.5
        result = cv2.addWeighted(transformed, alpha, eye_image, 1 - alpha, 0)
        
        return result
代码语言:txt
复制
umpy==1.21.0
opencv-python==4.5.3.56
dlib==19.22.0
pillow==8.3.1
tensorflow==2.6.0
代码语言:txt
复制
 blink_generator import BlinkGenerator
import os

def main():
    generator = BlinkGenerator()
    
    input_image = "test_face.jpg"
    output_gif = "blink_animation.gif"
    
    if not os.path.exists(input_image):
        print(f"Error: Input image {input_image} not found")
        return
    
    try:
        generator.generate_blink_sequence(input_image, output_gif)
        print(f"Successfully generated blink animation: {output_gif}")
    except Exception as e:
        print(f"Error generating animation: {str(e)}")

if __name__ == "__main__":
    main()

原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。

如有侵权,请联系 cloudcommunity@tencent.com 删除。

原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。

如有侵权,请联系 cloudcommunity@tencent.com 删除。

评论
作者已关闭评论
0 条评论
热度
最新
推荐阅读
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档