|
BTD /
Lab7BTD/Modul 7: ÜbungenAufgabe 1: Projektplanung
Aufgabe 2: Tool- oder Pipeline-Entwicklung
Aufgabe 3: Präsentation & Review
Beispielprojekt: 3D Modell des eigenen Gesichts an Blender streamen:Pip installieren (am Mac: /Applications/Blender.app/Contents/Resources/4.5/python).\python.exe -m ensurepip --upgrade .\python.exe -m pip install websocket-server .\python.exe -m pip install --upgrade pip HTML/Javascript-Code:
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>MediaPipe FaceMesh Live</title>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/face_mesh.js"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/camera_utils/camera_utils.js"></script>
<style>
body {
margin: 0;
overflow: hidden;
}
video,
canvas {
position: absolute;
top: 0;
left: 0;
}
canvas {
pointer-events: none;
z-index: 1;
}
</style>
</head>
<body>
<video id="webcam" autoplay playsinline width="640" height="480"></video>
<canvas id="overlay" width="640" height="480"></canvas>
<script>
const video = document.getElementById("webcam");
const canvas = document.getElementById("overlay");
const ctx = canvas.getContext("2d");
// Setup WebSocket to Blender
const ws = new WebSocket("ws://localhost:8765/");
ws.onopen = () => console.log("WebSocket connected!");
ws.onerror = (err) => console.error("WebSocket error:", err);
// Initialize MediaPipe FaceMesh
const faceMesh = new FaceMesh({
locateFile: (file) =>
`https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`,
});
faceMesh.setOptions({
maxNumFaces: 1,
refineLandmarks: true,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5,
});
// Callback when landmarks are detected
faceMesh.onResults((results) => {
ctx.clearRect(0, 0, canvas.width, canvas.height);
if (
results.multiFaceLandmarks &&
results.multiFaceLandmarks.length > 0
) {
const keypoints = results.multiFaceLandmarks[0];
ctx.fillStyle = "red";
keypoints.forEach((p) => {
ctx.beginPath();
ctx.arc(
p.x * canvas.width,
p.y * canvas.height,
1.5,
0,
2 * Math.PI
);
ctx.fill();
});
// Send normalized points to Blender
if (ws.readyState === WebSocket.OPEN) {
const points = keypoints.map((p) => [p.x, p.y, p.z]);
ws.send(JSON.stringify(points));
}
}
});
// Setup camera
const camera = new Camera(video, {
onFrame: async () => await faceMesh.send({ image: video }),
width: 640,
height: 480,
});
camera.start();
</script>
</body>
</html>
HTTP-Server starten.\python.exe -m http.server Blender-Code:
import bpy
from websocket_server import WebsocketServer
import json
import threading
# Punktwolke vorbereiten
mesh_name = "FacePointCloud"
if mesh_name in bpy.data.objects:
bpy.data.objects.remove(bpy.data.objects[mesh_name], do_unlink=True)
mesh = bpy.data.meshes.new(mesh_name)
obj = bpy.data.objects.new(mesh_name, mesh)
bpy.context.collection.objects.link(obj)
num_points = 468
mesh.from_pydata([(0,0,0)]*num_points, [], [])
# Callback für empfangene Nachrichten
def new_message(client, server, message):
try:
points = json.loads(message)
verts = [((p[0]-0.5)*2, (p[1]-0.5)*-2, p[2]) for p in points]
if len(verts) != len(mesh.vertices):
mesh.vertices.add(len(verts)-len(mesh.vertices))
mesh.vertices.foreach_set("co", [c for v in verts for c in v])
mesh.update()
print(f"Empfangen: {len(verts)} Punkte")
except Exception as e:
print("Fehler beim Verarbeiten der Punkte:", e)
# Server starten
def start_ws_server():
server = WebsocketServer(host='127.0.0.1', port=8765, loglevel=1)
server.set_fn_message_received(new_message)
print("WebSocket Server läuft auf ws://127.0.0.1:8765")
server.run_forever()
threading.Thread(target=start_ws_server, daemon=True).start()
print("Face point cloud server Thread gestartet...")
Variante 2 (Crashed Blender nicht)
import bpy
import json
import threading
from collections import deque
import sys
# WebSocket library (make sure installed in Blender Python)
sys.path.insert(0, "/Users/akp/blender_py_libs")
from websocket_server import WebsocketServer
HOST = "127.0.0.1"
PORT = 8765
NUM_POINTS = 468
MESH_NAME = "FacePointCloud"
point_queue = deque(maxlen=1)
queue_lock = threading.Lock()
# Mesh setup
def setup_mesh():
if MESH_NAME in bpy.data.objects:
bpy.data.objects.remove(bpy.data.objects[MESH_NAME], do_unlink=True)
mesh = bpy.data.meshes.new(MESH_NAME)
obj = bpy.data.objects.new(MESH_NAME, mesh)
bpy.context.collection.objects.link(obj)
mesh.from_pydata([(0,0,0)]*NUM_POINTS, [], [])
mesh.update()
print("Mesh initialized")
setup_mesh()
# WebSocket callback
def on_message(client, server, message):
points = json.loads(message)
with queue_lock:
point_queue.append(points)
# Start WebSocket server thread
def start_ws():
server = WebsocketServer(host=HOST, port=PORT, loglevel=0)
server.set_fn_message_received(on_message)
print(f"WebSocket running on ws://{HOST}:{PORT}")
server.run_forever()
threading.Thread(target=start_ws, daemon=True).start()
# Timer function
def update_mesh():
with queue_lock:
if not point_queue:
return 0.01
points = point_queue.pop()
point_queue.clear()
coords = [( (p[0]-0.5)*2, (p[1]-0.5)*-2, p[2]) for p in points]
mesh = bpy.data.objects[MESH_NAME].data
mesh.clear_geometry()
mesh.from_pydata(coords, [], [])
mesh.update()
bpy.context.view_layer.update()
return 0.01
bpy.app.timers.register(update_mesh, persistent=True)
print("Face point cloud server started successfully")
|