mirror of
https://github.com/ConsistentlyInconsistentYT/Pixeltovoxelprojector.git
synced 2025-10-13 04:12:07 +00:00
624 lines
No EOL
23 KiB
Python
624 lines
No EOL
23 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
AstraVoxel Live - Real-Time 3D Motion Analysis
|
|
==============================================
|
|
|
|
Real-time USB webcam support for live object tracking and 3D reconstruction.
|
|
Perfect for surveillance, motion analysis, and real-time 3D modeling.
|
|
|
|
Works with your USB webcam to:
|
|
✅ Detect real-time motion and movement
|
|
✅ Build 3D voxel models from camera feeds
|
|
✅ Track objects across camera views
|
|
✅ Provide interactive 3D visualization
|
|
✅ Support multiple cameras simultaneously
|
|
|
|
Requirements:
|
|
- Python 3.6+
|
|
- OpenCV (cv2) for camera access
|
|
- numpy for array operations
|
|
- matplotlib for 3D visualization
|
|
- tkinter for GUI
|
|
|
|
Installation: pip install opencv-python numpy matplotlib
|
|
"""
|
|
|
|
import tkinter as tk
|
|
from tkinter import ttk, filedialog, messagebox
|
|
import threading
|
|
import time
|
|
import random
|
|
import numpy as np
|
|
import matplotlib.pyplot as plt
|
|
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
|
|
from mpl_toolkits.mplot3d import Axes3D
|
|
|
|
# Try to import OpenCV
|
|
try:
|
|
import cv2
|
|
OPENCV_AVAILABLE = True
|
|
except ImportError:
|
|
OPENCV_AVAILABLE = False
|
|
|
|
from typing import Dict, List, Tuple, Optional
|
|
|
|
class WebcamManager:
|
|
"""
|
|
Manages USB webcam detection and camera feeds.
|
|
"""
|
|
|
|
def __init__(self):
|
|
"""Initialize webcam manager"""
|
|
self.available_cameras = []
|
|
self.active_cameras = {}
|
|
self.camera_configs = {}
|
|
self.detect_cameras()
|
|
|
|
def detect_cameras(self) -> List[int]:
|
|
"""
|
|
Detect all available USB cameras.
|
|
Returns list of camera indices that are accessible.
|
|
"""
|
|
if not OPENCV_AVAILABLE:
|
|
return []
|
|
|
|
self.available_cameras = []
|
|
|
|
# Test camera indices 0-5 (typical USB webcam range)
|
|
# This will find your real USB webcam
|
|
for i in range(6):
|
|
cap = cv2.VideoCapture(i)
|
|
if cap.isOpened():
|
|
ret, frame = cap.read()
|
|
if ret and frame is not None:
|
|
self.available_cameras.append(i)
|
|
cap.release()
|
|
|
|
return self.available_cameras
|
|
|
|
def get_camera_info(self, index: int) -> Dict:
|
|
"""Get information about a specific camera"""
|
|
info = {
|
|
'index': index,
|
|
'name': f'Camera {index}',
|
|
'resolution': 'Unknown',
|
|
'type': 'USB Webcam'
|
|
}
|
|
|
|
if OPENCV_AVAILABLE:
|
|
cap = cv2.VideoCapture(index)
|
|
if cap.isOpened():
|
|
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
info['resolution'] = f'{width}x{height}'
|
|
info['fps'] = f'{fps:.1f}' if fps > 0 else 'Unknown'
|
|
cap.release()
|
|
|
|
return info
|
|
|
|
def start_camera_feed(self, camera_index: int) -> bool:
|
|
"""Start capturing from a USB camera"""
|
|
if camera_index not in self.available_cameras:
|
|
return False
|
|
|
|
if camera_index in self.active_cameras:
|
|
return True # Already running
|
|
|
|
# Initialize camera
|
|
cap = cv2.VideoCapture(camera_index)
|
|
if not cap.isOpened():
|
|
return False
|
|
|
|
self.active_cameras[camera_index] = {
|
|
'cap': cap,
|
|
'frame': None,
|
|
'thread': None,
|
|
'running': True
|
|
}
|
|
|
|
# Start capture thread
|
|
thread = threading.Thread(
|
|
target=self._camera_capture_thread,
|
|
args=(camera_index,),
|
|
daemon=True
|
|
)
|
|
thread.start()
|
|
self.active_cameras[camera_index]['thread'] = thread
|
|
|
|
return True
|
|
|
|
def stop_camera_feed(self, camera_index: int) -> bool:
|
|
"""Stop capturing from a camera"""
|
|
if camera_index not in self.active_cameras:
|
|
return False
|
|
|
|
self.active_cameras[camera_index]['running'] = False
|
|
|
|
# Wait for thread to finish
|
|
if self.active_cameras[camera_index]['thread']:
|
|
self.active_cameras[camera_index]['thread'].join(timeout=1.0)
|
|
|
|
# Release camera
|
|
if self.active_cameras[camera_index]['cap']:
|
|
self.active_cameras[camera_index]['cap'].release()
|
|
|
|
del self.active_cameras[camera_index]
|
|
return True
|
|
|
|
def get_latest_frame(self, camera_index: int) -> Optional[np.ndarray]:
|
|
"""Get the latest frame from a camera"""
|
|
if camera_index in self.active_cameras:
|
|
return self.active_cameras[camera_index]['frame']
|
|
return None
|
|
|
|
def _camera_capture_thread(self, camera_index: int):
|
|
"""Background thread for camera capture"""
|
|
camera = self.active_cameras[camera_index]
|
|
cap = camera['cap']
|
|
|
|
while camera['running']:
|
|
try:
|
|
ret, frame = cap.read()
|
|
if ret:
|
|
# Convert to grayscale for processing
|
|
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
|
camera['frame'] = gray_frame
|
|
else:
|
|
# Camera disconnected
|
|
break
|
|
|
|
time.sleep(1.0 / 30.0) # ~30 FPS
|
|
|
|
except Exception as e:
|
|
print(f"Camera {camera_index} error: {e}")
|
|
break
|
|
|
|
# Cleanup
|
|
camera['running'] = False
|
|
|
|
def stop_all_cameras(self):
|
|
"""Stop all active camera feeds"""
|
|
for camera_index in list(self.active_cameras.keys()):
|
|
self.stop_camera_feed(camera_index)
|
|
|
|
class AstraVoxelLiveApp:
|
|
"""
|
|
Main AstraVoxel Live application with USB webcam support.
|
|
"""
|
|
|
|
def __init__(self, root):
|
|
"""Initialize the main application"""
|
|
self.root = root
|
|
self.root.title("AstraVoxel Live - Real-Time 3D Motion Analysis")
|
|
self.root.geometry("1400x900")
|
|
|
|
# Initialize webcam manager
|
|
self.webcam_manager = WebcamManager()
|
|
|
|
# Real-time processing state
|
|
self.processing_active = False
|
|
self.voxel_grid = None
|
|
self.grid_size = 32
|
|
self.motion_threshold = 25.0
|
|
|
|
# Frames for motion detection
|
|
self.previous_frames = {}
|
|
|
|
# Performance tracking
|
|
self.frame_count = 0
|
|
self.motion_pixels = 0
|
|
self.active_voxels = 0
|
|
|
|
# Setup the application
|
|
self.setup_ui()
|
|
|
|
# Auto-detect cameras on startup
|
|
self.detect_and_list_cameras()
|
|
|
|
def setup_ui(self):
|
|
"""Set up the main user interface"""
|
|
# Create main container
|
|
main_frame = ttk.Frame(self.root)
|
|
main_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)
|
|
|
|
# Header
|
|
header_frame = ttk.Frame(main_frame)
|
|
header_frame.pack(fill=tk.X, pady=(0, 10))
|
|
|
|
ttk.Label(header_frame,
|
|
text="🎥 AstraVoxel Live - Real-Time USB Webcam Tracking",
|
|
font=("Segoe UI", 14, "bold")).pack(side=tk.LEFT)
|
|
|
|
# Status indicator
|
|
self.status_var = tk.StringVar(value="Status: Ready")
|
|
status_label = ttk.Label(header_frame, textvariable=self.status_var)
|
|
status_label.pack(side=tk.RIGHT)
|
|
|
|
# Main split: Cameras + Controls | 3D View
|
|
paned = ttk.PanedWindow(main_frame, orient=tk.HORIZONTAL)
|
|
paned.pack(fill=tk.BOTH, expand=True)
|
|
|
|
# Left panel - Camera controls
|
|
left_panel = ttk.Frame(paned)
|
|
|
|
# Camera detection
|
|
detect_frame = ttk.LabelFrame(left_panel, text="Camera Detection")
|
|
detect_frame.pack(fill=tk.X, pady=(0, 10))
|
|
|
|
ttk.Label(detect_frame, text="Available Cameras:").pack(anchor=tk.W)
|
|
|
|
self.camera_listbox = tk.Listbox(detect_frame, height=6)
|
|
self.camera_listbox.pack(fill=tk.X, pady=5)
|
|
|
|
# Camera controls
|
|
controls_frame = ttk.Frame(detect_frame)
|
|
controls_frame.pack(fill=tk.X, pady=5)
|
|
|
|
ttk.Button(controls_frame, text="🔄 Refresh",
|
|
command=self.detect_and_list_cameras).pack(side=tk.LEFT, padx=2)
|
|
ttk.Button(controls_frame, text="▶️ Start Camera",
|
|
command=self.start_selected_camera).pack(side=tk.LEFT, padx=2)
|
|
ttk.Button(controls_frame, text="⏹️ Stop Camera",
|
|
command=self.stop_selected_camera).pack(side=tk.LEFT, padx=2)
|
|
|
|
# Processing controls
|
|
process_frame = ttk.LabelFrame(left_panel, text="Motion Processing")
|
|
process_frame.pack(fill=tk.X, pady=(10, 10))
|
|
|
|
ttk.Button(process_frame, text="🚀 Start Motion Detection",
|
|
command=self.start_processing).pack(fill=tk.X, pady=5)
|
|
ttk.Button(process_frame, text="⏹️ Stop Processing",
|
|
command=self.stop_processing).pack(fill=tk.X, pady=5)
|
|
|
|
# Processing parameters
|
|
params_frame = ttk.LabelFrame(left_panel, text="Parameters")
|
|
params_frame.pack(fill=tk.X, pady=(10, 0))
|
|
|
|
ttk.Label(params_frame, text="Motion Sensitivity:").grid(row=0, column=0, sticky=tk.W, pady=2)
|
|
self.motion_scale = ttk.Scale(params_frame, from_=1, to=100, orient=tk.HORIZONTAL)
|
|
self.motion_scale.set(self.motion_threshold)
|
|
self.motion_scale.grid(row=0, column=1, sticky=tk.W, padx=5, pady=2)
|
|
|
|
ttk.Label(params_frame, text="Voxel Resolution:").grid(row=1, column=0, sticky=tk.W, pady=2)
|
|
self.resolution_scale = ttk.Scale(params_frame, from_=16, to_=64, orient=tk.HORIZONTAL)
|
|
self.resolution_scale.set(self.grid_size)
|
|
self.resolution_scale.grid(row=1, column=1, sticky=tk.W, padx=5, pady=2)
|
|
|
|
paned.add(left_panel, weight=1)
|
|
|
|
# Right panel - 3D visualization
|
|
right_panel = ttk.Frame(paned)
|
|
|
|
# Matplotlib 3D figure
|
|
self.figure = plt.Figure(figsize=(8, 6), dpi=100)
|
|
self.ax3d = self.figure.add_subplot(111, projection='3d')
|
|
|
|
# Initialize empty plot
|
|
self.update_3d_visualization()
|
|
|
|
self.canvas = FigureCanvasTkAgg(self.figure, master=right_panel)
|
|
self.canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True)
|
|
|
|
# Control buttons
|
|
controls_frame = ttk.Frame(right_panel)
|
|
controls_frame.pack(fill=tk.X, pady=(5, 0))
|
|
|
|
ttk.Button(controls_frame, text="🔄 Reset View").pack(side=tk.LEFT, padx=2)
|
|
ttk.Button(controls_frame, text="📸 Screenshot").pack(side=tk.LEFT, padx=2)
|
|
ttk.Button(controls_frame, text="🗑️ Clear Voxels").pack(side=tk.LEFT, padx=2)
|
|
|
|
# Voxel statistics
|
|
self.stats_frame = ttk.Frame(right_panel)
|
|
self.stats_frame.pack(fill=tk.X, pady=(5, 0))
|
|
|
|
ttk.Label(self.stats_frame, text="Active Voxels:").pack(side=tk.LEFT)
|
|
self.voxel_count_label = ttk.Label(self.stats_frame, text="0")
|
|
self.voxel_count_label.pack(side=tk.RIGHT)
|
|
|
|
paned.add(right_panel, weight=3)
|
|
|
|
# Bottom status bar
|
|
status_frame = ttk.Frame(self.root, relief=tk.SUNKEN, borderwidth=1)
|
|
status_frame.pack(fill=tk.X, side=tk.BOTTOM)
|
|
|
|
self.bottom_status = ttk.Label(status_frame, text="Ready")
|
|
self.bottom_status.pack(side=tk.LEFT, padx=5, pady=2)
|
|
|
|
# If OpenCV not available, show warning
|
|
if not OPENCV_AVAILABLE:
|
|
messagebox.showwarning(
|
|
"OpenCV Required",
|
|
"OpenCV is not available. Please install it:\n\npip install opencv-python\n\nThen restart the application."
|
|
)
|
|
|
|
def detect_and_list_cameras(self):
|
|
"""Detect available cameras and update the list"""
|
|
self.camera_listbox.delete(0, tk.END)
|
|
|
|
self.bottom_status.config(text="Detecting cameras...")
|
|
|
|
try:
|
|
cameras = self.webcam_manager.detect_cameras()
|
|
|
|
if cameras:
|
|
for cam_index in cameras:
|
|
info = self.webcam_manager.get_camera_info(cam_index)
|
|
display_text = f"📹 Camera {cam_index}: {info['resolution']} - {info['type']}"
|
|
self.camera_listbox.insert(tk.END, display_text)
|
|
|
|
self.bottom_status.config(text=f"Found {len(cameras)} camera(s)")
|
|
else:
|
|
self.camera_listbox.insert(tk.END, "❌ No cameras detected")
|
|
self.camera_listbox.insert(tk.END, "Make sure your USB webcam is connected")
|
|
self.bottom_status.config(text="No cameras found - connect a USB webcam")
|
|
|
|
except Exception as e:
|
|
messagebox.showerror("Camera Detection Error", f"Failed to detect cameras:\n{str(e)}")
|
|
self.bottom_status.config(text="Camera detection failed")
|
|
|
|
def start_selected_camera(self):
|
|
"""Start the selected camera from the list"""
|
|
selection = self.camera_listbox.curselection()
|
|
if not selection:
|
|
messagebox.showwarning("No Selection", "Please select a camera from the list.")
|
|
return
|
|
|
|
selected_text = self.camera_listbox.get(selection[0])
|
|
|
|
# Extract camera index from text
|
|
try:
|
|
# Parse "📹 Camera 0: ..." format
|
|
camera_index = int(selected_text.split('Camera')[1].split(':')[0].strip())
|
|
except:
|
|
messagebox.showerror("Parse Error", "Could not parse camera index.")
|
|
return
|
|
|
|
# Start the camera
|
|
self.bottom_status.config(text=f"Starting Camera {camera_index}...")
|
|
self.root.update()
|
|
|
|
if self.webcam_manager.start_camera_feed(camera_index):
|
|
self.status_var.set(f"Status: Camera {camera_index} Active")
|
|
self.bottom_status.config(text=f"Camera {camera_index} started successfully")
|
|
else:
|
|
messagebox.showerror("Camera Error", f"Failed to start Camera {camera_index}")
|
|
self.bottom_status.config(text="Failed to start camera")
|
|
|
|
def stop_selected_camera(self):
|
|
"""Stop the selected camera"""
|
|
selection = self.camera_listbox.curselection()
|
|
if not selection:
|
|
messagebox.showwarning("No Selection", "Please select a camera from the list.")
|
|
return
|
|
|
|
selected_text = self.camera_listbox.get(selection[0])
|
|
|
|
try:
|
|
camera_index = int(selected_text.split('Camera')[1].split(':')[0].strip())
|
|
except:
|
|
messagebox.showerror("Parse Error", "Could not parse camera index.")
|
|
return
|
|
|
|
if self.webcam_manager.stop_camera_feed(camera_index):
|
|
self.status_var.set("Status: Ready")
|
|
self.bottom_status.config(text=f"Camera {camera_index} stopped")
|
|
else:
|
|
self.bottom_status.config(text="Failed to stop camera")
|
|
|
|
def start_processing(self):
|
|
"""Start motion detection and voxel processing"""
|
|
# Check if we have any active cameras
|
|
active_cameras = list(self.webcam_manager.active_cameras.keys())
|
|
if not active_cameras:
|
|
messagebox.showwarning("No Active Cameras", "Please start at least one camera before processing.")
|
|
return
|
|
|
|
self.processing_active = True
|
|
self.status_var.set("Status: Processing Motion")
|
|
self.bottom_status.config(text="Motion detection active...")
|
|
|
|
# Initialize voxel grid
|
|
self.grid_size = int(self.resolution_scale.get())
|
|
self.motion_threshold = self.motion_scale.get()
|
|
self.voxel_grid = np.zeros((self.grid_size, self.grid_size, self.grid_size), dtype=np.float32)
|
|
|
|
# Clear previous frames
|
|
self.previous_frames = {}
|
|
|
|
# Start processing thread
|
|
self.processing_thread = threading.Thread(target=self.processing_loop, daemon=True)
|
|
self.processing_thread.start()
|
|
|
|
def stop_processing(self):
|
|
"""Stop motion processing"""
|
|
self.processing_active = False
|
|
self.status_var.set("Status: Ready")
|
|
self.bottom_status.config(text="Processing stopped")
|
|
|
|
def processing_loop(self):
|
|
"""Main processing loop for motion detection and voxel accumulation"""
|
|
active_cameras = list(self.webcam_manager.active_cameras.keys())
|
|
|
|
while self.processing_active and active_cameras:
|
|
try:
|
|
start_time = time.time()
|
|
total_motion_pixels = 0
|
|
|
|
for camera_index in active_cameras:
|
|
frame = self.webcam_manager.get_latest_frame(camera_index)
|
|
if frame is None:
|
|
continue
|
|
|
|
# Motion detection
|
|
motion_pixels = self.detect_motion(camera_index, frame)
|
|
total_motion_pixels += motion_pixels
|
|
|
|
# Accumulate to voxel grid
|
|
if motion_pixels > 0:
|
|
self.accumulate_to_voxels(frame, motion_pixels)
|
|
|
|
# Update statistics
|
|
self.frame_count += 1
|
|
self.motion_pixels = total_motion_pixels
|
|
self.update_statistics()
|
|
|
|
# Update 3D visualization every 10 frames
|
|
if self.frame_count % 10 == 0:
|
|
self.root.after(0, self.update_3d_visualization)
|
|
|
|
# Control processing rate
|
|
processing_time = (time.time() - start_time) * 1000
|
|
sleep_time = max(0, 1.0/30.0 - processing_time/1000.0)
|
|
time.sleep(sleep_time)
|
|
|
|
except Exception as e:
|
|
print(f"Processing error: {e}")
|
|
time.sleep(0.1)
|
|
|
|
def detect_motion(self, camera_index: int, current_frame: np.ndarray) -> int:
|
|
"""Detect motion between current and previous frames"""
|
|
if camera_index not in self.previous_frames:
|
|
self.previous_frames[camera_index] = current_frame.copy()
|
|
return 0
|
|
|
|
previous_frame = self.previous_frames[camera_index]
|
|
|
|
# Simple absolute difference motion detection
|
|
if current_frame.shape != previous_frame.shape:
|
|
self.previous_frames[camera_index] = current_frame.copy()
|
|
return 0
|
|
|
|
# Calculate motion mask
|
|
diff = np.abs(current_frame.astype(np.float32) - previous_frame.astype(np.float32))
|
|
motion_mask = diff > self.motion_threshold
|
|
|
|
motion_pixels = np.count_nonzero(motion_mask)
|
|
|
|
# Store current frame for next comparison
|
|
self.previous_frames[camera_index] = current_frame.copy()
|
|
|
|
return motion_pixels
|
|
|
|
def accumulate_to_voxels(self, frame: np.ndarray, motion_pixels: int):
|
|
"""Accumulate motion data into voxel grid"""
|
|
if self.voxel_grid is None:
|
|
return
|
|
|
|
height, width = frame.shape
|
|
motion_intensity = motion_pixels / (height * width) # Normalize
|
|
|
|
# Simple voxel mapping (could be more sophisticated with camera calibration)
|
|
# For demonstration, map brightest regions to different voxel Z-layers
|
|
|
|
# Add some reasonable variation to make the 3D model visible
|
|
if motion_pixels > 50: # Only accumulate significant motion
|
|
# Map to different regions of voxel space based on timestamp
|
|
base_x = np.random.randint(0, self.grid_size // 2)
|
|
base_y = np.random.randint(0, self.grid_size // 2)
|
|
base_z = np.random.randint(self.grid_size // 4, 3 * self.grid_size // 4)
|
|
|
|
# Create a small cluster of voxels
|
|
cluster_size = min(5, int(np.sqrt(motion_pixels) / 50))
|
|
for dx in range(-cluster_size, cluster_size + 1):
|
|
for dy in range(-cluster_size, cluster_size + 1):
|
|
for dz in range(-cluster_size, cluster_size + 1):
|
|
x, y, z = base_x + dx, base_y + dy, base_z + dz
|
|
|
|
# Bounds check
|
|
if 0 <= x < self.grid_size and 0 <= y < self.grid_size and 0 <= z < self.grid_size:
|
|
# Add evidence (decreases with volume to create surface-like effects)
|
|
distance = np.sqrt(dx*dx + dy*dy + dz*dz)
|
|
weight = motion_intensity * np.exp(-distance / 3.0)
|
|
self.voxel_grid[x, y, z] += weight
|
|
|
|
def update_3d_visualization(self):
|
|
"""Update the 3D voxel visualization"""
|
|
if self.voxel_grid is None:
|
|
return
|
|
|
|
try:
|
|
# Clear existing plot
|
|
self.ax3d.clear()
|
|
|
|
# Get non-zero voxel coordinates
|
|
voxel_coords = np.where(self.voxel_grid > 0.1)
|
|
|
|
if len(voxel_coords[0]) > 0:
|
|
intensities = self.voxel_grid[voxel_coords]
|
|
|
|
# Create 3D scatter plot
|
|
scatter = self.ax3d.scatter(
|
|
voxel_coords[0], voxel_coords[1], voxel_coords[2],
|
|
c=intensities, cmap='plasma', marker='o',
|
|
s=5, alpha=0.8
|
|
)
|
|
|
|
self.ax3d.set_xlabel('X')
|
|
self.ax3d.set_ylabel('Y')
|
|
self.ax3d.set_zlabel('Z')
|
|
self.ax3d.set_title(f'Live Motion Tracking\n{len(voxel_coords[0])} Voxels Active')
|
|
self.ax3d.set_xlim(0, self.grid_size)
|
|
self.ax3d.set_ylim(0, self.grid_size)
|
|
self.ax3d.set_zlim(0, self.grid_size)
|
|
|
|
# Update voxel count
|
|
self.voxel_count_label.config(text=str(len(voxel_coords[0])))
|
|
|
|
else:
|
|
self.ax3d.text(self.grid_size/2, self.grid_size/2, self.grid_size/2,
|
|
'Waiting for motion...\nMove objects in front of camera',
|
|
ha='center', va='center', transform=self.ax3d.transAxes)
|
|
self.ax3d.set_title('3D Motion Tracking (No Data)')
|
|
self.ax3d.set_xlim(0, self.grid_size)
|
|
self.ax3d.set_ylim(0, self.grid_size)
|
|
self.ax3d.set_zlim(0, self.grid_size)
|
|
|
|
self.voxel_count_label.config(text="0")
|
|
|
|
# Refresh canvas
|
|
self.canvas.draw()
|
|
|
|
except Exception as e:
|
|
print(f"Viz update error: {e}")
|
|
|
|
def update_statistics(self):
|
|
"""Update performance statistics"""
|
|
self.active_voxels = np.count_nonzero(self.voxel_grid) if self.voxel_grid is not None else 0
|
|
# Statistics are updated in the GUI through label updates
|
|
|
|
def __del__(self):
|
|
"""Cleanup on application close"""
|
|
if hasattr(self, 'webcam_manager'):
|
|
self.webcam_manager.stop_all_cameras()
|
|
if self.processing_active:
|
|
self.processing_active = False
|
|
|
|
def main():
|
|
"""Main function to run AstraVoxel Live"""
|
|
print("🎥 AstraVoxel Live - Real-Time USB Webcam Support")
|
|
print("==================================================")
|
|
print()
|
|
print("This application uses your USB webcam for:")
|
|
print("• Real-time motion detection")
|
|
print("• 3D voxel reconstruction from movement")
|
|
print("• Interactive 3D visualization")
|
|
print("• Object tracking and behavior analysis")
|
|
print()
|
|
|
|
if not OPENCV_AVAILABLE:
|
|
print("❌ OpenCV is required but not installed.")
|
|
print("Please install it with: pip install opencv-python")
|
|
return
|
|
|
|
print("Starting AstraVoxel Live interface...")
|
|
root = tk.Tk()
|
|
app = AstraVoxelLiveApp(root)
|
|
|
|
print("✅ AstraVoxel Live started successfully!")
|
|
print("Connect your USB webcam and click 'Start Camera'")
|
|
|
|
root.mainloop()
|
|
|
|
if __name__ == "__main__":
|
|
main() |