Initial commit

This commit is contained in:
2026-04-24 19:18:15 +08:00
commit fbcbe08696
555 changed files with 96692 additions and 0 deletions

41
scripts/build-server.sh Normal file
View File

@@ -0,0 +1,41 @@
#!/bin/bash
# Build Python server binary for all platforms
set -e
# Determine platform
PLATFORM=$(rustc --print host-tuple 2>/dev/null || echo "unknown")
echo "Building voicebox-server for platform: $PLATFORM"
# Build Python binary
# Resolve PATH to absolute paths before changing directory
export PATH="$(cd "$(dirname "$0")/.." && pwd)/backend/venv/bin:$PATH"
cd backend
# Check if PyInstaller is installed
if ! python -c "import PyInstaller" 2>/dev/null; then
echo "Installing PyInstaller..."
python -m pip install pyinstaller
fi
# Build binary
python build_binary.py
# Create binaries directory if it doesn't exist
mkdir -p ../tauri/src-tauri/binaries
# Copy binary with platform suffix
if [ -f dist/voicebox-server ]; then
cp dist/voicebox-server ../tauri/src-tauri/binaries/voicebox-server-${PLATFORM}
chmod +x ../tauri/src-tauri/binaries/voicebox-server-${PLATFORM}
echo "Built voicebox-server-${PLATFORM}"
elif [ -f dist/voicebox-server.exe ]; then
cp dist/voicebox-server.exe ../tauri/src-tauri/binaries/voicebox-server-${PLATFORM}.exe
echo "Built voicebox-server-${PLATFORM}.exe"
else
echo "Error: Binary not found in dist/"
exit 1
fi
echo "Build complete!"

154
scripts/convert-assets.sh Normal file
View File

@@ -0,0 +1,154 @@
#!/bin/bash
set -e
# Asset Conversion Script
# Converts PNG → WebP and MOV → WebM in public folders
# Deletes original files after successful conversion
cd "$(dirname "$0")/.."
# Directories to process
DIRS=(
"landing/public"
"docs/public"
)
# Track counts
png_converted=0
mov_converted=0
png_failed=0
mov_failed=0
echo "🔄 Converting assets to web-optimized formats..."
echo ""
# Check for required tools
check_dependencies() {
local missing=()
if ! command -v cwebp &> /dev/null && ! command -v ffmpeg &> /dev/null; then
missing+=("cwebp or ffmpeg (for PNG→WebP)")
fi
if ! command -v ffmpeg &> /dev/null; then
missing+=("ffmpeg (for MOV→WebM)")
fi
if [ ${#missing[@]} -ne 0 ]; then
echo "❌ Missing required tools:"
for tool in "${missing[@]}"; do
echo " - $tool"
done
echo ""
echo "Install with: brew install webp ffmpeg"
exit 1
fi
}
# Convert PNG to WebP
convert_png() {
local input="$1"
local output="${input%.png}.webp"
echo " Converting: $input"
if command -v cwebp &> /dev/null; then
# Use cwebp for best quality/size ratio
if cwebp -q 90 "$input" -o "$output" 2>/dev/null; then
rm "$input"
echo " ✓ → $output"
return 0
fi
elif command -v ffmpeg &> /dev/null; then
# Fallback to ffmpeg
if ffmpeg -i "$input" -c:v libwebp -quality 90 "$output" -y 2>/dev/null; then
rm "$input"
echo " ✓ → $output"
return 0
fi
fi
echo " ✗ Failed to convert"
return 1
}
# Convert MOV to WebM
convert_mov() {
local input="$1"
local output="${input%.mov}.webm"
echo " Converting: $input"
if ffmpeg -i "$input" -c:v libvpx-vp9 -crf 30 -b:v 0 -c:a libopus "$output" -y 2>/dev/null; then
rm "$input"
echo " ✓ → $output"
return 0
fi
echo " ✗ Failed to convert"
return 1
}
# Main execution
check_dependencies
for dir in "${DIRS[@]}"; do
if [ ! -d "$dir" ]; then
echo "⚠ Directory not found: $dir (skipping)"
continue
fi
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📁 Processing: $dir"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
# Find and convert PNGs (recursively)
while IFS= read -r -d '' file; do
if convert_png "$file"; then
((png_converted++))
else
((png_failed++))
fi
done < <(find "$dir" -type f -name "*.png" -print0 2>/dev/null)
# Find and convert MOVs (recursively)
while IFS= read -r -d '' file; do
if convert_mov "$file"; then
((mov_converted++))
else
((mov_failed++))
fi
done < <(find "$dir" -type f -name "*.mov" -print0 2>/dev/null)
# Also check for uppercase extensions
while IFS= read -r -d '' file; do
if convert_png "$file"; then
((png_converted++))
else
((png_failed++))
fi
done < <(find "$dir" -type f -name "*.PNG" -print0 2>/dev/null)
while IFS= read -r -d '' file; do
if convert_mov "$file"; then
((mov_converted++))
else
((mov_failed++))
fi
done < <(find "$dir" -type f -name "*.MOV" -print0 2>/dev/null)
echo ""
done
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ Conversion complete!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "Results:"
echo " PNG → WebP: $png_converted converted, $png_failed failed"
echo " MOV → WebM: $mov_converted converted, $mov_failed failed"
if [ $png_converted -eq 0 ] && [ $mov_converted -eq 0 ]; then
echo ""
echo "No PNG or MOV files found to convert."
fi

81
scripts/generate-api.sh Normal file
View File

@@ -0,0 +1,81 @@
#!/bin/bash
# Generate OpenAPI TypeScript client from FastAPI schema
set -e
echo "Generating OpenAPI client..."
# Check if backend is running
if ! curl -s http://localhost:17493/openapi.json > /dev/null 2>&1; then
echo "Backend not running. Starting backend..."
cd backend
# Check if virtual environment exists
if [ ! -d "venv" ]; then
echo "Creating virtual environment..."
python -m venv venv
fi
source venv/bin/activate 2>/dev/null || source venv/Scripts/activate 2>/dev/null
# Install dependencies if needed
if ! python -c "import fastapi" 2>/dev/null; then
echo "Installing backend dependencies..."
pip install -r requirements.txt
fi
# Start backend in background
echo "Starting backend server..."
uvicorn main:app --port 17493 & # Keep the generator on the app's documented local backend port.
BACKEND_PID=$!
# Wait for server to be ready
echo "Waiting for server to start..."
for _ in {1..30}; do
if curl -s http://localhost:17493/openapi.json > /dev/null 2>&1; then
break
fi
sleep 1
done
if ! curl -s http://localhost:17493/openapi.json > /dev/null 2>&1; then
echo "Error: Backend failed to start"
kill $BACKEND_PID 2>/dev/null || true
exit 1
fi
echo "Backend started (PID: $BACKEND_PID)"
STARTED_BACKEND=true
else
STARTED_BACKEND=false
fi
# Download OpenAPI schema
echo "Downloading OpenAPI schema..."
curl -s http://localhost:17493/openapi.json > app/openapi.json
# Check if openapi-typescript-codegen is installed
if ! bunx --bun openapi-typescript-codegen --version > /dev/null 2>&1; then
echo "Installing openapi-typescript-codegen..."
bun add -d openapi-typescript-codegen
fi
# Generate TypeScript client
echo "Generating TypeScript client..."
cd app
bunx --bun openapi-typescript-codegen \
--input openapi.json \
--output src/lib/api \
--client fetch \
--useOptions \
--exportSchemas true
echo "API client generated in app/src/lib/api"
# Clean up
if [ "$STARTED_BACKEND" = true ]; then
echo "Stopping backend server..."
kill $BACKEND_PID 2>/dev/null || true
fi
echo "Done!"

232
scripts/package_cuda.py Normal file
View File

@@ -0,0 +1,232 @@
"""
Package the PyInstaller --onedir CUDA build into two archives.
Takes the PyInstaller --onedir output directory and splits it into:
1. voicebox-server-cuda.tar.gz — server core (exe + non-NVIDIA deps)
2. cuda-libs-cu128.tar.gz — NVIDIA runtime libraries only
3. cuda-libs.json — version manifest for the CUDA libs
Usage:
python scripts/package_cuda.py backend/dist/voicebox-server-cuda/
python scripts/package_cuda.py backend/dist/voicebox-server-cuda/ --output release-assets/
python scripts/package_cuda.py backend/dist/voicebox-server-cuda/ --cuda-libs-version cu128-v1
"""
import argparse
import hashlib
import json
import sys
import tarfile
from pathlib import Path
# DLL name prefixes that identify NVIDIA CUDA runtime libraries.
# These DLLs may appear in different locations depending on the torch
# and PyInstaller version:
# - nvidia/ subdirectories (older torch with separate nvidia-* packages)
# - _internal/torch/lib/ (torch 2.10+ bundles NVIDIA DLLs directly)
# - Top-level directory (some PyInstaller versions)
NVIDIA_DLL_PREFIXES = (
"cublas",
"cublaslt",
"cudart",
"cudnn",
"cufft",
"cufftw",
"curand",
"cusolver",
"cusolvermg",
"cusparse",
"nvjitlink",
"nvrtc",
"nccl",
"caffe2_nvrtc",
)
# Files to keep in the server core even if they match NVIDIA prefixes.
# These are small Python modules or stubs, not the large runtime DLLs.
NVIDIA_KEEP_IN_CORE = {
"torch/cuda/nccl.py",
"torch/_inductor/codegen/cuda/cutlass_lib_extensions/cutlass_mock_imports/cuda/cudart.py",
}
def is_nvidia_file(rel_path: str) -> bool:
"""Check if a relative path belongs to the NVIDIA CUDA libs.
Identifies large NVIDIA runtime DLLs (.dll/.so) regardless of where
PyInstaller placed them. Excludes small Python stubs that happen to
share NVIDIA-related names.
"""
rel_lower = rel_path.lower().replace("\\", "/")
# Never split out Python source files or small stubs
if rel_lower in NVIDIA_KEEP_IN_CORE:
return False
# Files under nvidia/ subdirectory tree (older torch layout)
if rel_lower.startswith("nvidia/") or "/nvidia/" in rel_lower:
# Only DLLs/shared objects — not .py, .dist-info, etc.
if rel_lower.endswith((".dll", ".so")):
return True
# Include entire nvidia/ namespace package tree
for part in rel_lower.split("/"):
if part == "nvidia":
return True
# NVIDIA DLLs anywhere in the tree (e.g. _internal/torch/lib/cublas64_12.dll)
name = rel_lower.rsplit("/", 1)[-1]
if name.endswith(".dll") or name.endswith(".so"):
name_no_ext = name.rsplit(".", 1)[0]
for prefix in NVIDIA_DLL_PREFIXES:
if name_no_ext.startswith(prefix):
return True
return False
def sha256_file(path: Path) -> str:
"""Compute SHA-256 hex digest of a file."""
h = hashlib.sha256()
with open(path, "rb") as f:
while True:
chunk = f.read(1024 * 1024)
if not chunk:
break
h.update(chunk)
return h.hexdigest()
def package(
onedir_path: Path,
output_dir: Path,
cuda_libs_version: str,
torch_compat: str,
):
output_dir.mkdir(parents=True, exist_ok=True)
# Collect all files in the onedir output, split into core vs nvidia
core_files = []
nvidia_files = []
for item in sorted(onedir_path.rglob("*")):
if item.is_dir():
continue
rel = item.relative_to(onedir_path)
rel_str = str(rel)
if is_nvidia_file(rel_str):
nvidia_files.append((rel_str, item))
else:
core_files.append((rel_str, item))
core_size = sum(f.stat().st_size for _, f in core_files)
nvidia_size = sum(f.stat().st_size for _, f in nvidia_files)
print(f"Input directory: {onedir_path}")
print(f"Core files: {len(core_files)} ({core_size / (1024**2):.1f} MB)")
print(f"NVIDIA files: {len(nvidia_files)} ({nvidia_size / (1024**2):.1f} MB)")
if not nvidia_files:
print(
f"ERROR: No NVIDIA files found in {onedir_path}. "
"Refusing to create an empty CUDA libs archive.",
file=sys.stderr,
)
print(
"Make sure you built with --cuda and the NVIDIA packages are present.",
file=sys.stderr,
)
sys.exit(1)
# Create server core archive
# Files are stored relative to the archive root (no parent directory prefix)
# so extracting to backends/cuda/ puts everything at the right level.
server_archive = output_dir / "voicebox-server-cuda.tar.gz"
print(f"\nCreating server core archive: {server_archive.name}")
with tarfile.open(server_archive, "w:gz") as tar:
for rel_str, full_path in core_files:
tar.add(full_path, arcname=rel_str)
server_sha = sha256_file(server_archive)
(output_dir / "voicebox-server-cuda.tar.gz.sha256").write_text(
f"{server_sha} voicebox-server-cuda.tar.gz\n"
)
print(f" Size: {server_archive.stat().st_size / (1024**2):.1f} MB")
print(f" SHA-256: {server_sha[:16]}...")
# Create CUDA libs archive
cuda_libs_archive = output_dir / f"cuda-libs-{cuda_libs_version}.tar.gz"
print(f"\nCreating CUDA libs archive: {cuda_libs_archive.name}")
with tarfile.open(cuda_libs_archive, "w:gz") as tar:
for rel_str, full_path in nvidia_files:
tar.add(full_path, arcname=rel_str)
cuda_sha = sha256_file(cuda_libs_archive)
(output_dir / f"cuda-libs-{cuda_libs_version}.tar.gz.sha256").write_text(
f"{cuda_sha} cuda-libs-{cuda_libs_version}.tar.gz\n"
)
print(f" Size: {cuda_libs_archive.stat().st_size / (1024**2):.1f} MB")
print(f" SHA-256: {cuda_sha[:16]}...")
# Write cuda-libs.json manifest
manifest = {
"version": cuda_libs_version,
"torch_compat": torch_compat,
"archive": cuda_libs_archive.name,
"sha256": cuda_sha,
}
manifest_path = output_dir / "cuda-libs.json"
manifest_path.write_text(json.dumps(manifest, indent=2) + "\n")
print(f"\nManifest: {manifest_path.name}")
print(json.dumps(manifest, indent=2))
# Summary
total_input = core_size + nvidia_size
total_output = server_archive.stat().st_size + cuda_libs_archive.stat().st_size
print(f"\nTotal input: {total_input / (1024**3):.2f} GB")
print(f"Total output: {total_output / (1024**3):.2f} GB (compressed)")
print(
f"Server core: {server_archive.stat().st_size / (1024**2):.1f} MB (redownloaded on app update)"
)
print(
f"CUDA libs: {cuda_libs_archive.stat().st_size / (1024**2):.1f} MB (cached until CUDA toolkit bump)"
)
def main():
parser = argparse.ArgumentParser(
description="Package PyInstaller --onedir CUDA build into server + CUDA libs archives"
)
parser.add_argument(
"input",
type=Path,
help="Path to PyInstaller --onedir output directory (e.g. backend/dist/voicebox-server-cuda/)",
)
parser.add_argument(
"--output",
type=Path,
default=None,
help="Output directory for archives (default: same as input parent)",
)
parser.add_argument(
"--cuda-libs-version",
type=str,
default="cu128-v1",
help="Version string for the CUDA libs archive (default: cu128-v1)",
)
parser.add_argument(
"--torch-compat",
type=str,
default=">=2.7.0,<2.11.0",
help="Torch version compatibility range (default: >=2.6.0,<2.11.0)",
)
args = parser.parse_args()
if not args.input.is_dir():
print(f"Error: {args.input} is not a directory", file=sys.stderr)
print("Expected a PyInstaller --onedir output directory.", file=sys.stderr)
sys.exit(1)
output_dir = args.output or args.input.parent
package(args.input, output_dir, args.cuda_libs_version, args.torch_compat)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,57 @@
#!/bin/bash
# Script to prepare a signed release build
# Usage: ./scripts/prepare-release.sh
set -e
echo "🔑 Checking for signing keys..."
if [ ! -f ~/.tauri/voicebox.key ]; then
echo "❌ Private key not found at ~/.tauri/voicebox.key"
echo "Run: cd tauri && bun tauri signer generate -w ~/.tauri/voicebox.key"
exit 1
fi
if [ ! -f ~/.tauri/voicebox.key.pub ]; then
echo "❌ Public key not found at ~/.tauri/voicebox.key.pub"
exit 1
fi
echo "✅ Signing keys found"
echo ""
# Check if public key is in tauri.conf.json
if grep -q "REPLACE_WITH_YOUR_PUBLIC_KEY" tauri/src-tauri/tauri.conf.json; then
echo "⚠️ Public key not configured in tauri.conf.json"
echo ""
echo "Add this to tauri/src-tauri/tauri.conf.json:"
echo ""
cat ~/.tauri/voicebox.key.pub
echo ""
exit 1
fi
echo "🔧 Setting up environment..."
export TAURI_SIGNING_PRIVATE_KEY="$(cat ~/.tauri/voicebox.key)"
export TAURI_SIGNING_PRIVATE_KEY_PASSWORD=""
echo "✅ Environment configured"
echo ""
echo "📦 Building release..."
echo ""
bun run build
echo ""
echo "✅ Release build complete!"
echo ""
echo "📂 Bundles created in: tauri/src-tauri/target/release/bundle/"
echo ""
echo "Next steps:"
echo "1. Create a GitHub release"
echo "2. Upload all files from the bundle directory"
echo "3. Create latest.json with update metadata"
echo ""

View File

@@ -0,0 +1,371 @@
#!/usr/bin/env node
/**
* Creates placeholder sidecar binaries for development mode.
*
* In dev mode, Tauri requires the sidecar binary files to exist at compile time,
* even though developers typically run the Python server manually.
*
* This script creates minimal placeholder binaries that allow Tauri to compile.
* The actual server should be started separately with `bun run dev:server`.
*/
import { execSync } from 'child_process';
import { existsSync, mkdirSync, statSync, writeFileSync } from 'fs';
import { dirname, join } from 'path';
import { fileURLToPath } from 'url';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const BINARIES_DIR = join(__dirname, '..', 'tauri', 'src-tauri', 'binaries');
// Minimum size to consider a binary "real" (placeholder is ~256 bytes, real is MBs)
const MIN_REAL_BINARY_SIZE = 10000;
// Get the current platform's target triple
function getTargetTriple() {
try {
const triple = execSync('rustc --print host-tuple', { encoding: 'utf-8' }).trim();
return triple;
} catch {
// Fallback detection
const platform = process.platform;
const arch = process.arch;
if (platform === 'win32') {
return arch === 'x64' ? 'x86_64-pc-windows-msvc' : 'i686-pc-windows-msvc';
} else if (platform === 'darwin') {
return arch === 'arm64' ? 'aarch64-apple-darwin' : 'x86_64-apple-darwin';
} else if (platform === 'linux') {
return arch === 'x64' ? 'x86_64-unknown-linux-gnu' : 'aarch64-unknown-linux-gnu';
}
throw new Error(`Unsupported platform: ${platform}/${arch}`);
}
}
// Create a minimal executable for the platform
function createPlaceholderBinary(targetTriple) {
const isWindows = targetTriple.includes('windows');
const binaryName = `voicebox-server-${targetTriple}${isWindows ? '.exe' : ''}`;
const binaryPath = join(BINARIES_DIR, binaryName);
// Check if real binary already exists (larger than our placeholder)
if (existsSync(binaryPath)) {
try {
const stats = statSync(binaryPath);
if (stats.size > MIN_REAL_BINARY_SIZE) {
console.log(
`Real binary already exists: ${binaryName} (${(stats.size / 1024 / 1024).toFixed(1)} MB)`,
);
return;
}
} catch {
// File exists but can't stat - try to replace it
}
}
// Ensure binaries directory exists
if (!existsSync(BINARIES_DIR)) {
mkdirSync(BINARIES_DIR, { recursive: true });
}
if (isWindows) {
// Create a minimal valid Windows PE executable that exits with code 1
// This is the smallest valid PE that Windows will accept
const minimalPE = Buffer.from([
// DOS Header
0x4d,
0x5a,
0x90,
0x00,
0x03,
0x00,
0x00,
0x00,
0x04,
0x00,
0x00,
0x00,
0xff,
0xff,
0x00,
0x00,
0xb8,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x40,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x80,
0x00,
0x00,
0x00,
// DOS Stub
0x0e,
0x1f,
0xba,
0x0e,
0x00,
0xb4,
0x09,
0xcd,
0x21,
0xb8,
0x01,
0x4c,
0xcd,
0x21,
0x54,
0x68,
0x69,
0x73,
0x20,
0x70,
0x72,
0x6f,
0x67,
0x72,
0x61,
0x6d,
0x20,
0x63,
0x61,
0x6e,
0x6e,
0x6f,
0x74,
0x20,
0x62,
0x65,
0x20,
0x72,
0x75,
0x6e,
0x20,
0x69,
0x6e,
0x20,
0x44,
0x4f,
0x53,
0x20,
0x6d,
0x6f,
0x64,
0x65,
0x2e,
0x0d,
0x0d,
0x0a,
0x24,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
// PE Signature
0x50,
0x45,
0x00,
0x00,
// COFF Header (x64)
0x64,
0x86, // Machine: AMD64
0x01,
0x00, // NumberOfSections: 1
0x00,
0x00,
0x00,
0x00, // TimeDateStamp
0x00,
0x00,
0x00,
0x00, // PointerToSymbolTable
0x00,
0x00,
0x00,
0x00, // NumberOfSymbols
0xf0,
0x00, // SizeOfOptionalHeader
0x22,
0x00, // Characteristics: EXECUTABLE_IMAGE | LARGE_ADDRESS_AWARE
// Optional Header (PE32+)
0x0b,
0x02, // Magic: PE32+
0x00,
0x00, // Linker version
0x00,
0x00,
0x00,
0x00, // SizeOfCode
0x00,
0x00,
0x00,
0x00, // SizeOfInitializedData
0x00,
0x00,
0x00,
0x00, // SizeOfUninitializedData
0x00,
0x10,
0x00,
0x00, // AddressOfEntryPoint
0x00,
0x00,
0x00,
0x00, // BaseOfCode
0x00,
0x00,
0x00,
0x40,
0x01,
0x00,
0x00,
0x00, // ImageBase
0x00,
0x10,
0x00,
0x00, // SectionAlignment
0x00,
0x02,
0x00,
0x00, // FileAlignment
0x06,
0x00,
0x00,
0x00, // OS version
0x00,
0x00,
0x00,
0x00, // Image version
0x06,
0x00,
0x00,
0x00, // Subsystem version
0x00,
0x00,
0x00,
0x00, // Win32VersionValue
0x00,
0x20,
0x00,
0x00, // SizeOfImage
0x00,
0x02,
0x00,
0x00, // SizeOfHeaders
0x00,
0x00,
0x00,
0x00, // CheckSum
0x03,
0x00, // Subsystem: CONSOLE
0x60,
0x01, // DllCharacteristics
// Stack/Heap sizes (8 bytes each for PE32+)
0x00,
0x00,
0x10,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x10,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00, // LoaderFlags
0x10,
0x00,
0x00,
0x00, // NumberOfRvaAndSizes
]);
// Pad to 512 bytes minimum for valid PE
const paddedPE = Buffer.alloc(512);
minimalPE.copy(paddedPE);
writeFileSync(binaryPath, paddedPE);
} else {
// Create a minimal shell script for Unix-like systems
const script = `#!/bin/sh
echo "[voicebox-server] Dev mode placeholder - start the real server with: bun run dev:server"
exit 1
`;
writeFileSync(binaryPath, script, { mode: 0o755 });
}
console.log(`Created dev placeholder: ${binaryName}`);
}
function main() {
const targetTriple = getTargetTriple();
createPlaceholderBinary(targetTriple);
}
main();

View File

@@ -0,0 +1,382 @@
#!/usr/bin/env python3
"""
Test script to observe exactly how HuggingFace reports download progress
for each TTS model. Doesn't load models — just downloads and tracks tqdm.
Usage:
backend/venv/bin/python scripts/test_download_progress.py qwen
backend/venv/bin/python scripts/test_download_progress.py luxtts
backend/venv/bin/python scripts/test_download_progress.py chatterbox
Add --delete to clear cache first and force a real download:
backend/venv/bin/python scripts/test_download_progress.py chatterbox --delete
"""
import os
import shutil
import sys
import time
import threading
from pathlib import Path
from contextlib import contextmanager
# ─── Configuration ────────────────────────────────────────────────────────────
MODELS = {
"qwen": {
"repo_id": "Qwen/Qwen3-TTS-12Hz-1.7B-Base",
"method": "from_pretrained",
"description": "Qwen TTS 1.7B (uses transformers from_pretrained)",
},
"luxtts": {
"repo_id": "YatharthS/LuxTTS",
"method": "snapshot_download",
"description": "LuxTTS (uses snapshot_download)",
},
"chatterbox": {
"repo_id": "ResembleAI/chatterbox",
"method": "snapshot_download",
"allow_patterns": [
"ve.pt",
"t3_mtl23ls_v2.safetensors",
"s3gen.pt",
"grapheme_mtl_merged_expanded_v1.json",
"conds.pt",
"Cangjie5_TC.json",
],
"description": "Chatterbox Multilingual (uses snapshot_download with allow_patterns)",
},
}
# ─── Progress tracking (mirrors our HFProgressTracker) ────────────────────────
class ProgressSpy:
"""Intercepts tqdm to see exactly what HF reports."""
def __init__(self):
self._lock = threading.Lock()
self.events = [] # List of dicts: {time, type, ...}
self._original_tqdm_class = None
self._original_tqdm_auto = None
self._patched_modules = {}
self._hf_tqdm_original_update = None
self._start_time = None
def _elapsed(self):
return time.time() - self._start_time if self._start_time else 0
def _log(self, event_type, **kwargs):
entry = {"time": f"{self._elapsed():.1f}s", "type": event_type, **kwargs}
self.events.append(entry)
# Live print
parts = [f"[{entry['time']:>7s}] {event_type:>10s}"]
for k, v in kwargs.items():
if k in ("current", "total") and isinstance(v, (int, float)) and v > 1_000_000:
parts.append(f"{k}={v / 1_000_000:.1f}MB")
else:
parts.append(f"{k}={v}")
print(" ".join(parts), flush=True)
def _create_tracked_tqdm_class(self):
spy = self
original_tqdm = self._original_tqdm_class
class SpyTqdm(original_tqdm):
def __init__(self, *args, **kwargs):
desc = kwargs.get("desc", "")
if not desc and args:
first_arg = args[0]
if isinstance(first_arg, str):
desc = first_arg
filename = ""
if desc:
if ":" in desc:
filename = desc.split(":")[0].strip()
else:
filename = desc.strip()
# Filter out non-standard kwargs
tqdm_kwargs = {
'iterable', 'desc', 'total', 'leave', 'file', 'ncols',
'mininterval', 'maxinterval', 'miniters', 'ascii', 'disable',
'unit', 'unit_scale', 'dynamic_ncols', 'smoothing',
'bar_format', 'initial', 'position', 'postfix',
'unit_divisor', 'write_bytes', 'lock_args', 'nrows',
'colour', 'color', 'delay', 'gui', 'disable_default', 'pos',
}
filtered_kwargs = {k: v for k, v in kwargs.items() if k in tqdm_kwargs}
try:
super().__init__(*args, **filtered_kwargs)
except TypeError:
super().__init__(*args, **kwargs)
self._spy_filename = filename or "unknown"
total = getattr(self, "total", None)
spy._log(
"INIT",
filename=self._spy_filename,
total=total or 0,
unit=kwargs.get("unit", "?"),
unit_scale=kwargs.get("unit_scale", False),
disable=kwargs.get("disable", False),
)
def update(self, n=1):
result = super().update(n)
current = getattr(self, "n", 0)
total = getattr(self, "total", 0)
filename = self._spy_filename
spy._log(
"UPDATE",
filename=filename,
n=n,
current=current,
total=total or 0,
pct=f"{100 * current / total:.1f}%" if total else "?",
)
return result
def close(self):
spy._log("CLOSE", filename=self._spy_filename)
return super().close()
return SpyTqdm
@contextmanager
def patch(self):
"""Context manager that patches tqdm globally — same as HFProgressTracker."""
self._start_time = time.time()
try:
import tqdm as tqdm_module
self._original_tqdm_class = tqdm_module.tqdm
except ImportError:
yield
return
tracked_tqdm = self._create_tracked_tqdm_class()
# Patch tqdm.tqdm
tqdm_module.tqdm = tracked_tqdm
# Patch tqdm.auto.tqdm
self._original_tqdm_auto = None
if hasattr(tqdm_module, "auto") and hasattr(tqdm_module.auto, "tqdm"):
self._original_tqdm_auto = tqdm_module.auto.tqdm
tqdm_module.auto.tqdm = tracked_tqdm
# Patch in sys.modules (same as HFProgressTracker)
tqdm_attr_names = ['tqdm', 'base_tqdm', 'old_tqdm']
patched_count = 0
for module_name in list(sys.modules.keys()):
if "huggingface" in module_name or module_name.startswith("tqdm"):
try:
module = sys.modules[module_name]
for attr_name in tqdm_attr_names:
if hasattr(module, attr_name):
attr = getattr(module, attr_name)
is_tqdm_class = (
attr is self._original_tqdm_class
or (self._original_tqdm_auto and attr is self._original_tqdm_auto)
or (
hasattr(attr, "__name__")
and attr.__name__ == "tqdm"
and hasattr(attr, "update")
)
)
if is_tqdm_class:
key = f"{module_name}.{attr_name}"
self._patched_modules[key] = (module, attr_name, attr)
setattr(module, attr_name, tracked_tqdm)
patched_count += 1
except (AttributeError, TypeError):
pass
# Monkey-patch HF's tqdm.update (same as HFProgressTracker)
try:
from huggingface_hub.utils import tqdm as hf_tqdm_module
if hasattr(hf_tqdm_module, 'tqdm'):
hf_tqdm_class = hf_tqdm_module.tqdm
self._hf_tqdm_original_update = hf_tqdm_class.update
spy = self
def patched_update(tqdm_self, n=1):
result = spy._hf_tqdm_original_update(tqdm_self, n)
desc = getattr(tqdm_self, 'desc', '') or ''
current = getattr(tqdm_self, 'n', 0)
total = getattr(tqdm_self, 'total', 0) or 0
spy._log(
"HF_UPDATE",
desc=desc,
current=current,
total=total,
pct=f"{100 * current / total:.1f}%" if total else "?",
)
return result
hf_tqdm_class.update = patched_update
patched_count += 1
except (ImportError, AttributeError):
pass
print(f"\n=== Patched {patched_count} tqdm references ===\n", flush=True)
try:
yield
finally:
# Restore everything
import tqdm as tqdm_module
tqdm_module.tqdm = self._original_tqdm_class
if self._original_tqdm_auto:
tqdm_module.auto.tqdm = self._original_tqdm_auto
for key, (module, attr_name, original) in self._patched_modules.items():
try:
setattr(module, attr_name, original)
except (AttributeError, TypeError):
pass
if self._hf_tqdm_original_update:
try:
from huggingface_hub.utils import tqdm as hf_tqdm_module
if hasattr(hf_tqdm_module, 'tqdm'):
hf_tqdm_module.tqdm.update = self._hf_tqdm_original_update
except (ImportError, AttributeError):
pass
def summary(self):
print("\n" + "=" * 70)
print("SUMMARY")
print("=" * 70)
inits = [e for e in self.events if e["type"] == "INIT"]
updates = [e for e in self.events if e["type"] in ("UPDATE", "HF_UPDATE")]
print(f"\ntqdm bars created: {len(inits)}")
for e in inits:
print(f" - {e.get('filename', '?'):40s} total={e.get('total', '?')}")
print(f"\nTotal update calls: {len(updates)}")
# Group updates by filename
by_file = {}
for e in updates:
fn = e.get("filename") or e.get("desc", "unknown")
if fn not in by_file:
by_file[fn] = []
by_file[fn].append(e)
for fn, evts in by_file.items():
max_current = max(e.get("current", 0) for e in evts)
max_total = max(e.get("total", 0) for e in evts)
print(f"\n {fn}:")
print(f" updates: {len(evts)}")
print(f" max current: {max_current:,}")
print(f" max total: {max_total:,}")
if max_total > 0 and max_current > 0:
print(f" final pct: {100 * max_current / max_total:.1f}%")
else:
print(f" final pct: NO PROGRESS REPORTED")
# ─── Delete cache ─────────────────────────────────────────────────────────────
def delete_cache(repo_id: str):
from huggingface_hub import constants as hf_constants
cache_dir = Path(hf_constants.HF_HUB_CACHE)
repo_cache = cache_dir / ("models--" + repo_id.replace("/", "--"))
if repo_cache.exists():
print(f"Deleting cache: {repo_cache}")
shutil.rmtree(repo_cache)
print("Deleted.")
else:
print(f"No cache found at {repo_cache}")
# ─── Download functions ───────────────────────────────────────────────────────
def download_qwen(spy: ProgressSpy):
"""Mirrors how pytorch_backend.py downloads Qwen."""
from transformers import AutoModel
repo_id = MODELS["qwen"]["repo_id"]
print(f"Downloading {repo_id} via AutoModel.from_pretrained...")
with spy.patch():
# This is what Qwen3TTSModel.from_pretrained does under the hood
from huggingface_hub import snapshot_download
snapshot_download(repo_id)
def download_luxtts(spy: ProgressSpy):
"""Mirrors how luxtts_backend.py downloads LuxTTS."""
from huggingface_hub import snapshot_download
repo_id = MODELS["luxtts"]["repo_id"]
print(f"Downloading {repo_id} via snapshot_download...")
with spy.patch():
snapshot_download(repo_id)
def download_chatterbox(spy: ProgressSpy):
"""Mirrors how chatterbox_backend.py downloads Chatterbox."""
from huggingface_hub import snapshot_download
cfg = MODELS["chatterbox"]
print(f"Downloading {cfg['repo_id']} via snapshot_download with allow_patterns...")
with spy.patch():
snapshot_download(
repo_id=cfg["repo_id"],
repo_type="model",
revision="main",
allow_patterns=cfg["allow_patterns"],
token=os.getenv("HF_TOKEN"),
)
# ─── Main ─────────────────────────────────────────────────────────────────────
def main():
if len(sys.argv) < 2 or sys.argv[1] not in MODELS:
print(f"Usage: {sys.argv[0]} <{'|'.join(MODELS.keys())}> [--delete]")
sys.exit(1)
model_key = sys.argv[1]
should_delete = "--delete" in sys.argv
cfg = MODELS[model_key]
print(f"\n{'=' * 70}")
print(f"Testing download progress for: {cfg['description']}")
print(f"Repo: {cfg['repo_id']}")
print(f"Method: {cfg['method']}")
print(f"{'=' * 70}\n")
if should_delete:
delete_cache(cfg["repo_id"])
print()
spy = ProgressSpy()
dispatch = {
"qwen": download_qwen,
"luxtts": download_luxtts,
"chatterbox": download_chatterbox,
}
try:
dispatch[model_key](spy)
except Exception as e:
print(f"\n!!! Download failed: {e}")
spy.summary()
if __name__ == "__main__":
main()

216
scripts/update-icons.sh Normal file
View File

@@ -0,0 +1,216 @@
#!/bin/bash
set -e
# Complete Icon Update Script
# Updates both Liquid Glass icon bundle AND all platform fallback icons from exports
cd "$(dirname "$0")/.."
EXPORTS_DIR="tauri/assets/voicebox_exports"
ICON_BUNDLE="tauri/assets/voicebox.icon"
ASSETS_DIR="$ICON_BUNDLE/Assets"
ICONS_DIR="tauri/src-tauri/icons"
LANDING_LOGO="landing/public/voicebox-logo.png"
LANDING_PUBLIC="landing/public"
SOURCE_ICON="$EXPORTS_DIR/voicebox-iOS-Dark-1024x1024@1x.png"
echo "🎨 Updating all Voicebox icons from exports..."
echo ""
# Check if source exists
if [ ! -f "$SOURCE_ICON" ]; then
echo "Error: Source icon not found at $SOURCE_ICON"
exit 1
fi
# ============================================
# PART 1: Compile Liquid Glass Icon Bundle
# ============================================
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📦 Part 1: Compiling Liquid Glass Icon Bundle"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "Compiling voicebox.icon with actool..."
# Remove old generated icons to force rebuild
rm -rf tauri/src-tauri/gen/*.icns tauri/src-tauri/gen/Assets.car 2>/dev/null
cd tauri/src-tauri
cargo build 2>/dev/null || echo " ⚠ Cargo build had warnings (this is normal)"
cd ../..
if [ -f "tauri/src-tauri/gen/voicebox.icns" ]; then
echo " ✓ voicebox.icns generated"
else
echo " ⚠ Warning: voicebox.icns not generated (will use fallback)"
fi
echo ""
# ============================================
# PART 2: Generate Platform Fallback Icons
# ============================================
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🖼️ Part 2: Generating Platform Fallback Icons"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
mkdir -p "$ICONS_DIR"
# macOS & Desktop Icons
echo "Generating macOS/Desktop icons..."
sips -s format png -z 32 32 "$SOURCE_ICON" --out "$ICONS_DIR/32x32.png" 2>/dev/null
sips -s format png -z 64 64 "$SOURCE_ICON" --out "$ICONS_DIR/64x64.png" 2>/dev/null
sips -s format png -z 128 128 "$SOURCE_ICON" --out "$ICONS_DIR/128x128.png" 2>/dev/null
sips -s format png -z 256 256 "$SOURCE_ICON" --out "$ICONS_DIR/128x128@2x.png" 2>/dev/null
sips -s format png -z 512 512 "$SOURCE_ICON" --out "$ICONS_DIR/icon.png" 2>/dev/null
# Copy Liquid Glass compiled ICNS or generate fallback
echo "Copying icon.icns..."
if [ -f "tauri/src-tauri/gen/voicebox.icns" ]; then
cp tauri/src-tauri/gen/voicebox.icns "$ICONS_DIR/icon.icns"
echo " ✓ Copied Liquid Glass compiled icon.icns"
else
echo " ⚠ Liquid Glass icon not found, generating fallback icon.icns..."
mkdir -p /tmp/voicebox-iconset.iconset
sips -s format png -z 16 16 "$SOURCE_ICON" --out /tmp/voicebox-iconset.iconset/icon_16x16.png 2>/dev/null
sips -s format png -z 32 32 "$SOURCE_ICON" --out /tmp/voicebox-iconset.iconset/icon_16x16@2x.png 2>/dev/null
sips -s format png -z 32 32 "$SOURCE_ICON" --out /tmp/voicebox-iconset.iconset/icon_32x32.png 2>/dev/null
sips -s format png -z 64 64 "$SOURCE_ICON" --out /tmp/voicebox-iconset.iconset/icon_32x32@2x.png 2>/dev/null
sips -s format png -z 128 128 "$SOURCE_ICON" --out /tmp/voicebox-iconset.iconset/icon_128x128.png 2>/dev/null
sips -s format png -z 256 256 "$SOURCE_ICON" --out /tmp/voicebox-iconset.iconset/icon_128x128@2x.png 2>/dev/null
sips -s format png -z 256 256 "$SOURCE_ICON" --out /tmp/voicebox-iconset.iconset/icon_256x256.png 2>/dev/null
sips -s format png -z 512 512 "$SOURCE_ICON" --out /tmp/voicebox-iconset.iconset/icon_256x256@2x.png 2>/dev/null
sips -s format png -z 512 512 "$SOURCE_ICON" --out /tmp/voicebox-iconset.iconset/icon_512x512.png 2>/dev/null
sips -s format png -z 1024 1024 "$SOURCE_ICON" --out /tmp/voicebox-iconset.iconset/icon_512x512@2x.png 2>/dev/null
iconutil -c icns /tmp/voicebox-iconset.iconset -o "$ICONS_DIR/icon.icns"
rm -rf /tmp/voicebox-iconset.iconset
echo " ✓ Generated fallback icon.icns"
fi
# Windows Square Logos
echo "Generating Windows icons..."
for size in 30 44 71 89 107 142 150 284 310; do
sips -s format png -z $size $size "$SOURCE_ICON" --out "$ICONS_DIR/Square${size}x${size}Logo.png" 2>/dev/null
done
sips -s format png -z 50 50 "$SOURCE_ICON" --out "$ICONS_DIR/StoreLogo.png" 2>/dev/null
# Windows icon.ico (multi-size ICO file)
echo "Generating Windows icon.ico..."
if command -v convert &> /dev/null; then
# Create temporary PNG files at different sizes for ICO
# Windows typically uses: 16x16, 32x32, 48x48, 256x256
sips -s format png -z 16 16 "$SOURCE_ICON" --out /tmp/icon-16.png 2>/dev/null
sips -s format png -z 32 32 "$SOURCE_ICON" --out /tmp/icon-32.png 2>/dev/null
sips -s format png -z 48 48 "$SOURCE_ICON" --out /tmp/icon-48.png 2>/dev/null
sips -s format png -z 256 256 "$SOURCE_ICON" --out /tmp/icon-256.png 2>/dev/null
# Combine into proper multi-size ICO file
convert /tmp/icon-16.png /tmp/icon-32.png /tmp/icon-48.png /tmp/icon-256.png "$ICONS_DIR/icon.ico" 2>/dev/null
rm -f /tmp/icon-16.png /tmp/icon-32.png /tmp/icon-48.png /tmp/icon-256.png 2>/dev/null
echo " ✓ Generated Windows icon.ico"
else
# Fallback: use sips to create a basic ICO (single size)
echo " ⚠ ImageMagick not found - generating basic icon.ico (single size)"
sips -s format ico -z 256 256 "$SOURCE_ICON" --out "$ICONS_DIR/icon.ico" 2>/dev/null || echo " ⚠ Failed to generate icon.ico (sips may not support ICO format)"
fi
# iOS Icons
echo "Generating iOS icons..."
mkdir -p "$ICONS_DIR/ios"
declare -A ios_sizes=(
["AppIcon-20x20@1x.png"]="20"
["AppIcon-20x20@2x.png"]="40"
["AppIcon-20x20@2x-1.png"]="40"
["AppIcon-20x20@3x.png"]="60"
["AppIcon-29x29@1x.png"]="29"
["AppIcon-29x29@2x.png"]="58"
["AppIcon-29x29@2x-1.png"]="58"
["AppIcon-29x29@3x.png"]="87"
["AppIcon-40x40@1x.png"]="40"
["AppIcon-40x40@2x.png"]="80"
["AppIcon-40x40@2x-1.png"]="80"
["AppIcon-40x40@3x.png"]="120"
["AppIcon-60x60@2x.png"]="120"
["AppIcon-60x60@3x.png"]="180"
["AppIcon-76x76@1x.png"]="76"
["AppIcon-76x76@2x.png"]="152"
["AppIcon-83.5x83.5@2x.png"]="167"
["AppIcon-512@2x.png"]="1024"
)
for filename in "${!ios_sizes[@]}"; do
size="${ios_sizes[$filename]}"
sips -s format png -z $size $size "$SOURCE_ICON" --out "$ICONS_DIR/ios/$filename" 2>/dev/null
done
# Android Icons
echo "Generating Android icons..."
mkdir -p "$ICONS_DIR/android/mipmap-mdpi"
mkdir -p "$ICONS_DIR/android/mipmap-hdpi"
mkdir -p "$ICONS_DIR/android/mipmap-xhdpi"
mkdir -p "$ICONS_DIR/android/mipmap-xxhdpi"
mkdir -p "$ICONS_DIR/android/mipmap-xxxhdpi"
sips -s format png -z 48 48 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-mdpi/ic_launcher.png" 2>/dev/null
sips -s format png -z 48 48 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-mdpi/ic_launcher_round.png" 2>/dev/null
sips -s format png -z 48 48 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-mdpi/ic_launcher_foreground.png" 2>/dev/null
sips -s format png -z 72 72 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-hdpi/ic_launcher.png" 2>/dev/null
sips -s format png -z 72 72 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-hdpi/ic_launcher_round.png" 2>/dev/null
sips -s format png -z 72 72 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-hdpi/ic_launcher_foreground.png" 2>/dev/null
sips -s format png -z 96 96 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-xhdpi/ic_launcher.png" 2>/dev/null
sips -s format png -z 96 96 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-xhdpi/ic_launcher_round.png" 2>/dev/null
sips -s format png -z 96 96 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-xhdpi/ic_launcher_foreground.png" 2>/dev/null
sips -s format png -z 144 144 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-xxhdpi/ic_launcher.png" 2>/dev/null
sips -s format png -z 144 144 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-xxhdpi/ic_launcher_round.png" 2>/dev/null
sips -s format png -z 144 144 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-xxhdpi/ic_launcher_foreground.png" 2>/dev/null
sips -s format png -z 192 192 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-xxxhdpi/ic_launcher.png" 2>/dev/null
sips -s format png -z 192 192 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-xxxhdpi/ic_launcher_round.png" 2>/dev/null
sips -s format png -z 192 192 "$SOURCE_ICON" --out "$ICONS_DIR/android/mipmap-xxxhdpi/ic_launcher_foreground.png" 2>/dev/null
# Landing Page Logo & Favicon
echo "Generating landing page logo..."
mkdir -p "$LANDING_PUBLIC"
sips -s format png -z 1024 1024 "$SOURCE_ICON" --out "$LANDING_LOGO" 2>/dev/null
echo "Generating landing page favicon..."
# Generate favicon.png (32x32 is standard for favicons)
sips -s format png -z 32 32 "$SOURCE_ICON" --out "$LANDING_PUBLIC/favicon.png" 2>/dev/null
# Generate proper multi-size favicon.ico using ImageMagick if available
if command -v convert &> /dev/null; then
# Create temporary PNG files at different sizes for ICO
sips -s format png -z 16 16 "$SOURCE_ICON" --out /tmp/favicon-16.png 2>/dev/null
sips -s format png -z 32 32 "$SOURCE_ICON" --out /tmp/favicon-32.png 2>/dev/null
# Combine into proper multi-size ICO file
convert /tmp/favicon-16.png /tmp/favicon-32.png "$LANDING_PUBLIC/favicon.ico" 2>/dev/null
rm -f /tmp/favicon-16.png /tmp/favicon-32.png 2>/dev/null
echo " ✓ Generated proper multi-size favicon.ico"
else
# Fallback: skip ICO if ImageMagick not available (PNG will be used)
echo " ⚠ ImageMagick not found - skipping favicon.ico (using favicon.png instead)"
fi
# Also generate apple-touch-icon (180x180 for iOS)
sips -s format png -z 180 180 "$SOURCE_ICON" --out "$LANDING_PUBLIC/apple-touch-icon.png" 2>/dev/null
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ All icons updated successfully!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "Updated:"
echo " ✓ Liquid Glass icon bundle with all appearance variants"
echo " ✓ macOS/Desktop fallback icons"
echo " ✓ Windows Square logos"
echo " ✓ Windows icon.ico (multi-size)"
echo " ✓ iOS AppIcons (18 sizes)"
echo " ✓ Android mipmap icons (5 densities)"
echo " ✓ Landing page logo"
echo " ✓ Landing page favicon"
echo ""
echo "Next: Rebuild the app with 'cd tauri && bun run tauri build'"