This repository has been archived on 2026-02-15. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
breakpilot-pwa/backend/mac_mini_api.py
Benjamin Admin 21a844cb8a fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.

This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).

Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-09 09:51:32 +01:00

575 lines
19 KiB
Python

"""
Mac Mini Remote Control API.
Provides endpoints for:
- Power control (shutdown, restart, wake-on-LAN)
- Status monitoring (ping, SSH, services)
- Docker container management
- Ollama model management
This API can run in two modes:
1. Remote mode: Running on MacBook, controlling Mac Mini via SSH
2. Local mode: Running on Mac Mini (in Docker), using direct commands
"""
import asyncio
import subprocess
import os
import httpx
from typing import Optional, List, Dict, Any
from fastapi import APIRouter, HTTPException, BackgroundTasks
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
import json
import socket
router = APIRouter(prefix="/api/mac-mini", tags=["Mac Mini Control"])
# Configuration
MAC_MINI_IP = os.getenv("MAC_MINI_IP", "192.168.178.100")
MAC_MINI_USER = os.getenv("MAC_MINI_USER", "benjaminadmin")
MAC_MINI_MAC = os.getenv("MAC_MINI_MAC", "") # MAC address for Wake-on-LAN
PROJECT_PATH = "/Users/benjaminadmin/Projekte/breakpilot-pwa"
# Detect if running inside Docker (local mode on Mac Mini)
# In Docker, we use host.docker.internal to access host services
RUNNING_IN_DOCKER = os.path.exists("/.dockerenv")
DOCKER_HOST_IP = "host.docker.internal" if RUNNING_IN_DOCKER else MAC_MINI_IP
OLLAMA_HOST = f"http://{DOCKER_HOST_IP}:11434" if RUNNING_IN_DOCKER else f"http://{MAC_MINI_IP}:11434"
class ModelPullRequest(BaseModel):
model: str
class CommandResponse(BaseModel):
success: bool
message: str
output: Optional[str] = None
async def run_ssh_command(command: str, timeout: int = 30) -> tuple[bool, str]:
"""Run a command via SSH on Mac Mini."""
ssh_cmd = f"ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no {MAC_MINI_USER}@{MAC_MINI_IP} \"{command}\""
try:
process = await asyncio.create_subprocess_shell(
ssh_cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await asyncio.wait_for(
process.communicate(),
timeout=timeout
)
output = stdout.decode() + stderr.decode()
return process.returncode == 0, output.strip()
except asyncio.TimeoutError:
return False, "Command timed out"
except Exception as e:
return False, str(e)
async def check_ping() -> bool:
"""Check if Mac Mini responds to ping."""
try:
process = await asyncio.create_subprocess_shell(
f"ping -c 1 -W 2 {MAC_MINI_IP}",
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL
)
await process.wait()
return process.returncode == 0
except:
return False
async def check_ssh() -> bool:
"""Check if SSH is available."""
success, _ = await run_ssh_command("echo ok", timeout=10)
return success
async def check_service_http(url: str, timeout: int = 5) -> bool:
"""Check if an HTTP service is responding."""
try:
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.get(url)
return response.status_code == 200
except:
return False
async def check_internet() -> bool:
"""Check if Mac Mini has internet access by pinging external servers."""
# Try multiple methods for reliability
# Method 1: HTTP check to a reliable endpoint
try:
async with httpx.AsyncClient(timeout=5) as client:
response = await client.get("https://www.google.com/generate_204")
if response.status_code == 204:
return True
except:
pass
# Method 2: DNS resolution check
try:
socket.getaddrinfo("google.com", 80, socket.AF_INET)
return True
except:
pass
# Method 3: Ping to 8.8.8.8 (Google DNS)
try:
process = await asyncio.create_subprocess_shell(
"ping -c 1 -W 2 8.8.8.8",
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.DEVNULL
)
await asyncio.wait_for(process.wait(), timeout=5)
if process.returncode == 0:
return True
except:
pass
return False
async def get_local_system_info():
"""Get system info when running locally (in Docker on Mac Mini)."""
uptime = None
cpu_load = None
memory = None
# These won't work inside Docker, but we can try basic checks
# The host system info requires the Docker socket or host access
return uptime, cpu_load, memory
async def get_docker_containers_local():
"""Get Docker container status when running inside Docker via Docker socket."""
containers = []
docker_socket = "/var/run/docker.sock"
if not os.path.exists(docker_socket):
return False, containers
try:
# Use Python to query Docker API via Unix socket (no curl needed)
import urllib.request
import urllib.error
class UnixSocketHandler(urllib.request.AbstractHTTPHandler):
def unix_open(self, req):
import http.client
import socket as sock
class UnixHTTPConnection(http.client.HTTPConnection):
def __init__(self, socket_path):
super().__init__("localhost")
self.socket_path = socket_path
def connect(self):
self.sock = sock.socket(sock.AF_UNIX, sock.SOCK_STREAM)
self.sock.connect(self.socket_path)
conn = UnixHTTPConnection(docker_socket)
conn.request(req.get_method(), req.selector)
return conn.getresponse()
# Query Docker API
opener = urllib.request.build_opener(UnixSocketHandler())
req = urllib.request.Request("unix:///containers/json?all=true")
response = opener.open(req, timeout=5)
data = json.loads(response.read().decode())
for c in data:
name = c.get("Names", ["/unknown"])[0].lstrip("/")
state = c.get("State", "unknown")
status = c.get("Status", state)
containers.append({"name": name, "status": status})
return True, containers
except Exception as e:
# Fallback: try using httpx with unix socket support
try:
import httpx
transport = httpx.HTTPTransport(uds=docker_socket)
async with httpx.AsyncClient(transport=transport, timeout=5) as client:
response = await client.get("http://localhost/containers/json?all=true")
if response.status_code == 200:
data = response.json()
for c in data:
name = c.get("Names", ["/unknown"])[0].lstrip("/")
state = c.get("State", "unknown")
status = c.get("Status", state)
containers.append({"name": name, "status": status})
return True, containers
except:
pass
return False, containers
@router.get("/status")
async def get_status():
"""Get comprehensive Mac Mini status."""
# When running inside Docker on Mac Mini, we're always "online"
if RUNNING_IN_DOCKER:
# Local mode - running on Mac Mini itself
# Check services in parallel
ollama_task = asyncio.create_task(check_service_http(f"{OLLAMA_HOST}/api/tags"))
internet_task = asyncio.create_task(check_internet())
docker_task = asyncio.create_task(get_docker_containers_local())
ollama_ok = await ollama_task
internet_ok = await internet_task
docker_ok, containers = await docker_task
# Get Ollama models
models = []
if ollama_ok:
try:
async with httpx.AsyncClient(timeout=10) as client:
response = await client.get(f"{OLLAMA_HOST}/api/tags")
if response.status_code == 200:
data = response.json()
models = data.get("models", [])
except:
pass
return {
"online": True,
"ip": MAC_MINI_IP,
"ping": True,
"ssh": True, # We're running locally, SSH not needed
"docker": docker_ok,
"backend": True, # We're the backend
"ollama": ollama_ok,
"internet": internet_ok, # Neuer Status: Internet-Zugang
"uptime": "Lokal", # Can't get this from inside Docker easily
"cpu_load": None,
"memory": None,
"containers": containers,
"models": models
}
# Remote mode - running on MacBook, checking Mac Mini via SSH
# Start all checks in parallel
ping_task = asyncio.create_task(check_ping())
# Wait for ping first
ping_ok = await ping_task
if not ping_ok:
return {
"online": False,
"ip": MAC_MINI_IP,
"ping": False,
"ssh": False,
"docker": False,
"backend": False,
"ollama": False,
"internet": False,
"uptime": None,
"cpu_load": None,
"memory": None,
"containers": [],
"models": []
}
# If ping succeeds, check other services
ssh_task = asyncio.create_task(check_ssh())
backend_task = asyncio.create_task(check_service_http(f"http://{MAC_MINI_IP}:8000/health"))
ollama_task = asyncio.create_task(check_service_http(f"http://{MAC_MINI_IP}:11434/api/tags"))
ssh_ok = await ssh_task
backend_ok = await backend_task
ollama_ok = await ollama_task
# Check internet via SSH on Mac Mini
internet_ok = False
# Get system info if SSH is available
uptime = None
cpu_load = None
memory = None
docker_ok = False
containers = []
models = []
if ssh_ok:
# Get uptime
success, output = await run_ssh_command("uptime | awk -F'up ' '{print $2}' | awk -F',' '{print $1}'")
if success:
uptime = output.strip()
# Get CPU load
success, output = await run_ssh_command("sysctl -n vm.loadavg | awk '{print $2}'")
if success:
cpu_load = output.strip()
# Get memory usage
success, output = await run_ssh_command("vm_stat | awk '/Pages active/ {active=$3} /Pages wired/ {wired=$3} END {print int((active+wired)*4096/1024/1024/1024*10)/10 \" GB\"}'")
if success:
memory = output.strip()
# Check Docker and get containers
success, output = await run_ssh_command("/usr/local/bin/docker ps --format '{{.Names}}|{{.Status}}'")
if success:
docker_ok = True
for line in output.strip().split('\n'):
if '|' in line:
name, status = line.split('|', 1)
containers.append({"name": name, "status": status})
# Check internet access on Mac Mini
success, _ = await run_ssh_command("ping -c 1 -W 2 8.8.8.8", timeout=10)
internet_ok = success
# Get Ollama models if available
if ollama_ok:
try:
async with httpx.AsyncClient(timeout=10) as client:
response = await client.get(f"http://{MAC_MINI_IP}:11434/api/tags")
if response.status_code == 200:
data = response.json()
models = data.get("models", [])
except:
pass
return {
"online": ping_ok and ssh_ok,
"ip": MAC_MINI_IP,
"ping": ping_ok,
"ssh": ssh_ok,
"docker": docker_ok,
"backend": backend_ok,
"ollama": ollama_ok,
"internet": internet_ok,
"uptime": uptime,
"cpu_load": cpu_load,
"memory": memory,
"containers": containers,
"models": models
}
@router.post("/wake")
async def wake_on_lan():
"""Send Wake-on-LAN magic packet to Mac Mini."""
if not MAC_MINI_MAC:
# Try to get MAC address from ARP cache
try:
process = await asyncio.create_subprocess_shell(
f"arp -n {MAC_MINI_IP} | awk '{{print $3}}'",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, _ = await process.communicate()
mac = stdout.decode().strip()
if not mac or mac == "(incomplete)":
return CommandResponse(
success=False,
message="MAC-Adresse nicht gefunden. Bitte MAC_MINI_MAC setzen.",
output="Wake-on-LAN benötigt die MAC-Adresse des Mac Mini"
)
except:
return CommandResponse(
success=False,
message="Fehler beim Ermitteln der MAC-Adresse"
)
else:
mac = MAC_MINI_MAC
# Send WOL packet using wakeonlan if available, otherwise use Python
try:
# Try wakeonlan command first
process = await asyncio.create_subprocess_shell(
f"wakeonlan {mac}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
if process.returncode == 0:
return CommandResponse(
success=True,
message=f"Wake-on-LAN Paket an {mac} gesendet. Der Mac Mini sollte in 30-60 Sekunden starten.",
output=stdout.decode()
)
else:
# Fall back to Python WOL
import socket
import struct
# Create magic packet
mac_bytes = bytes.fromhex(mac.replace(':', '').replace('-', ''))
magic = b'\xff' * 6 + mac_bytes * 16
# Send to broadcast
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.sendto(magic, ('255.255.255.255', 9))
sock.close()
return CommandResponse(
success=True,
message=f"Wake-on-LAN Paket an {mac} gesendet (Python fallback).",
output="Magic packet sent via broadcast"
)
except Exception as e:
return CommandResponse(
success=False,
message=f"Fehler beim Senden des WOL-Pakets: {str(e)}"
)
@router.post("/restart")
async def restart_mac_mini():
"""Restart Mac Mini via SSH."""
# Use osascript for graceful restart
success, output = await run_ssh_command(
"osascript -e 'tell application \"System Events\" to restart'",
timeout=15
)
if success:
return CommandResponse(
success=True,
message="Neustart wurde ausgelöst. Der Mac Mini startet in wenigen Sekunden neu.",
output=output
)
else:
# Try sudo reboot as fallback
success, output = await run_ssh_command("sudo -n reboot", timeout=15)
if success:
return CommandResponse(
success=True,
message="Neustart wurde ausgelöst (sudo).",
output=output
)
return CommandResponse(
success=False,
message="Neustart fehlgeschlagen. SSH-Verbindung oder Berechtigung fehlt.",
output=output
)
@router.post("/shutdown")
async def shutdown_mac_mini():
"""Shutdown Mac Mini via SSH."""
# Use osascript for graceful shutdown
success, output = await run_ssh_command(
"osascript -e 'tell application \"System Events\" to shut down'",
timeout=15
)
if success:
return CommandResponse(
success=True,
message="Shutdown wurde ausgelöst. Der Mac Mini fährt in wenigen Sekunden herunter.",
output=output
)
else:
return CommandResponse(
success=False,
message="Shutdown fehlgeschlagen. SSH-Verbindung oder Berechtigung fehlt.",
output=output
)
@router.post("/docker/up")
async def docker_up():
"""Start Docker containers on Mac Mini."""
success, output = await run_ssh_command(
f"cd {PROJECT_PATH} && /usr/local/bin/docker compose up -d",
timeout=120
)
return CommandResponse(
success=success,
message="Container werden gestartet..." if success else "Fehler beim Starten der Container",
output=output
)
@router.post("/docker/down")
async def docker_down():
"""Stop Docker containers on Mac Mini."""
success, output = await run_ssh_command(
f"cd {PROJECT_PATH} && /usr/local/bin/docker compose down",
timeout=60
)
return CommandResponse(
success=success,
message="Container werden gestoppt..." if success else "Fehler beim Stoppen der Container",
output=output
)
@router.post("/ollama/pull")
async def pull_ollama_model(request: ModelPullRequest):
"""Pull an Ollama model with streaming progress."""
model_name = request.model
async def stream_progress():
"""Stream the pull progress as JSON lines."""
try:
async with httpx.AsyncClient(timeout=None) as client:
async with client.stream(
"POST",
f"{OLLAMA_HOST}/api/pull",
json={"name": model_name, "stream": True}
) as response:
async for line in response.aiter_lines():
if line:
yield line + "\n"
except Exception as e:
yield json.dumps({"status": f"error: {str(e)}"}) + "\n"
return StreamingResponse(
stream_progress(),
media_type="application/x-ndjson"
)
@router.get("/ollama/models")
async def get_ollama_models():
"""Get list of installed Ollama models."""
try:
async with httpx.AsyncClient(timeout=10) as client:
response = await client.get(f"{OLLAMA_HOST}/api/tags")
if response.status_code == 200:
return response.json()
return {"models": []}
except:
return {"models": []}
@router.delete("/ollama/models/{model_name}")
async def delete_ollama_model(model_name: str):
"""Delete an Ollama model."""
try:
async with httpx.AsyncClient(timeout=30) as client:
response = await client.delete(
f"{OLLAMA_HOST}/api/delete",
json={"name": model_name}
)
if response.status_code == 200:
return CommandResponse(
success=True,
message=f"Modell '{model_name}' wurde gelöscht"
)
return CommandResponse(
success=False,
message=f"Fehler beim Löschen: {response.text}"
)
except Exception as e:
return CommandResponse(
success=False,
message=f"Fehler: {str(e)}"
)