Version control Claude Code configuration including: - Global instructions (CLAUDE.md) - User settings (settings.json) - Custom agents (architect, designer, engineer, etc.) - Custom skills (create-skill templates and workflows) Excludes session data, secrets, cache, and temporary files per .gitignore. Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
243 lines
7.4 KiB
Python
Executable File
243 lines
7.4 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
Proxmox VM Operations Examples
|
|
Demonstrates common VM management workflows
|
|
"""
|
|
|
|
import sys
|
|
import os
|
|
|
|
# Add parent directory to path for imports
|
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
|
|
from proxmox_client import ProxmoxClient
|
|
import time
|
|
|
|
|
|
def example_list_all_vms():
|
|
"""List all VMs with detailed status"""
|
|
print("=== Example: List All VMs ===\n")
|
|
|
|
client = ProxmoxClient()
|
|
vms = client.get_all_vms_status()
|
|
|
|
print(f"Total VMs: {len(vms)}\n")
|
|
|
|
running = [vm for vm in vms if vm['status'] == 'running']
|
|
stopped = [vm for vm in vms if vm['status'] == 'stopped']
|
|
|
|
print(f"Running: {len(running)}")
|
|
print(f"Stopped: {len(stopped)}\n")
|
|
|
|
print("Running VMs:")
|
|
for vm in running:
|
|
mem_pct = (vm['mem'] / vm['maxmem'] * 100) if vm['maxmem'] > 0 else 0
|
|
cpu_pct = vm['cpu'] * 100
|
|
print(f" VM {vm['vmid']}: {vm['name']}")
|
|
print(f" CPU: {cpu_pct:.1f}%, Memory: {mem_pct:.1f}%, Uptime: {vm['uptime']/3600:.1f}h")
|
|
|
|
|
|
def example_vm_lifecycle():
|
|
"""Start, monitor, and stop a VM"""
|
|
print("\n=== Example: VM Lifecycle Management ===\n")
|
|
|
|
client = ProxmoxClient()
|
|
vmid = 100 # Change to your test VM
|
|
|
|
# Get current status
|
|
status = client.get_vm_status(vmid)
|
|
print(f"VM {vmid} current status: {status['status']}")
|
|
|
|
if status['status'] == 'stopped':
|
|
# Start the VM
|
|
print(f"Starting VM {vmid}...")
|
|
upid = client.start_vm(vmid)
|
|
print(f"Task ID: {upid}")
|
|
|
|
# Wait for completion
|
|
print("Waiting for VM to start...")
|
|
if client.wait_for_task(upid, timeout=60):
|
|
print("VM started successfully!")
|
|
else:
|
|
print("Start operation timed out or failed")
|
|
|
|
# Monitor resources
|
|
time.sleep(5)
|
|
status = client.get_vm_status(vmid)
|
|
print(f"\nVM Status: {status['status']}")
|
|
print(f"CPU Usage: {status['cpu']*100:.1f}%")
|
|
print(f"Memory: {status['mem']/(1024**3):.1f}GB / {status['maxmem']/(1024**3):.1f}GB")
|
|
|
|
|
|
def example_snapshot_workflow():
|
|
"""Create, list, and manage snapshots"""
|
|
print("\n=== Example: Snapshot Workflow ===\n")
|
|
|
|
client = ProxmoxClient()
|
|
vmid = 100 # Change to your test VM
|
|
|
|
# Create snapshot
|
|
snapname = "test-snapshot-example"
|
|
print(f"Creating snapshot '{snapname}' for VM {vmid}...")
|
|
|
|
upid = client.create_snapshot(
|
|
vmid=vmid,
|
|
snapname=snapname,
|
|
description="Example snapshot for testing",
|
|
vmstate=False
|
|
)
|
|
print(f"Task ID: {upid}")
|
|
|
|
if client.wait_for_task(upid, timeout=120):
|
|
print("Snapshot created successfully!")
|
|
|
|
# List all snapshots
|
|
print("\nAll snapshots:")
|
|
snapshots = client.list_snapshots(vmid)
|
|
for snap in snapshots:
|
|
if snap.get('name') == 'current':
|
|
continue
|
|
print(f" - {snap['name']}: {snap.get('description', 'No description')}")
|
|
|
|
# Clean up - delete the test snapshot
|
|
print(f"\nDeleting test snapshot...")
|
|
upid = client.delete_snapshot(vmid, snapname)
|
|
if client.wait_for_task(upid, timeout=60):
|
|
print("Snapshot deleted successfully!")
|
|
else:
|
|
print("Snapshot creation failed or timed out")
|
|
|
|
|
|
def example_clone_vm():
|
|
"""Clone a VM from a template"""
|
|
print("\n=== Example: Clone VM ===\n")
|
|
|
|
client = ProxmoxClient()
|
|
|
|
# Clone from template
|
|
template_id = 100 # Your template VM
|
|
new_vmid = 199 # New VM ID (make sure it doesn't exist)
|
|
new_name = "test-clone-example"
|
|
|
|
print(f"Cloning VM {template_id} to new VM {new_vmid} ({new_name})...")
|
|
|
|
try:
|
|
upid = client.clone_vm(
|
|
vmid=template_id,
|
|
newid=new_vmid,
|
|
name=new_name,
|
|
full=True # Full clone, not linked
|
|
)
|
|
print(f"Task ID: {upid}")
|
|
|
|
print("Waiting for clone operation (this may take a few minutes)...")
|
|
if client.wait_for_task(upid, timeout=300):
|
|
print(f"VM cloned successfully! New VM ID: {new_vmid}")
|
|
|
|
# Get details of new VM
|
|
vm_info = client.get_vm(new_vmid)
|
|
print(f"New VM name: {vm_info.get('name')}")
|
|
print(f"Memory: {vm_info.get('memory')}MB")
|
|
print(f"CPUs: {vm_info.get('cores')}")
|
|
|
|
# Clean up - delete the test clone
|
|
print(f"\nDeleting test clone VM {new_vmid}...")
|
|
upid = client.delete_vm(new_vmid)
|
|
if client.wait_for_task(upid, timeout=60):
|
|
print("Test clone deleted successfully!")
|
|
else:
|
|
print("Clone operation timed out or failed")
|
|
|
|
except Exception as e:
|
|
print(f"Error: {e}")
|
|
|
|
|
|
def example_resource_monitoring():
|
|
"""Monitor node and VM resources"""
|
|
print("\n=== Example: Resource Monitoring ===\n")
|
|
|
|
client = ProxmoxClient()
|
|
|
|
# Get node status
|
|
node_status = client.get_node_status()
|
|
print("Node Status:")
|
|
print(f" CPU: {node_status['cpu']*100:.1f}%")
|
|
print(f" Memory: {node_status['memory']['used']/(1024**3):.1f}GB / {node_status['memory']['total']/(1024**3):.1f}GB")
|
|
print(f" Uptime: {node_status['uptime']/(3600*24):.1f} days")
|
|
|
|
# Find VMs with high resource usage
|
|
print("\n High Resource Usage VMs (>50% memory):")
|
|
vms = client.get_all_vms_status()
|
|
for vm in vms:
|
|
if vm['status'] == 'running' and vm['maxmem'] > 0:
|
|
mem_pct = (vm['mem'] / vm['maxmem']) * 100
|
|
if mem_pct > 50:
|
|
print(f" VM {vm['vmid']} ({vm['name']}): {mem_pct:.1f}% memory")
|
|
|
|
|
|
def example_find_vm_by_name():
|
|
"""Find a VM by name instead of ID"""
|
|
print("\n=== Example: Find VM by Name ===\n")
|
|
|
|
client = ProxmoxClient()
|
|
|
|
vm_name = "docker-tdarr" # Change to one of your VMs
|
|
print(f"Searching for VM: {vm_name}")
|
|
|
|
vm = client.get_vm_by_name(vm_name)
|
|
if vm:
|
|
print(f"Found VM {vm['vmid']}: {vm['name']}")
|
|
print(f"Status: {vm['status']}")
|
|
print(f"Memory: {vm['mem']/(1024**3):.1f}GB / {vm['maxmem']/(1024**3):.1f}GB")
|
|
else:
|
|
print(f"VM '{vm_name}' not found")
|
|
|
|
|
|
def example_batch_operations():
|
|
"""Perform batch operations on multiple VMs"""
|
|
print("\n=== Example: Batch Operations ===\n")
|
|
|
|
client = ProxmoxClient()
|
|
|
|
# Get all stopped VMs
|
|
vms = client.get_all_vms_status()
|
|
stopped_vms = [vm for vm in vms if vm['status'] == 'stopped']
|
|
|
|
print(f"Found {len(stopped_vms)} stopped VMs:")
|
|
for vm in stopped_vms:
|
|
print(f" VM {vm['vmid']}: {vm['name']}")
|
|
|
|
# Example: Could start all stopped VMs (commented out for safety)
|
|
# print("\nStarting all stopped VMs...")
|
|
# for vm in stopped_vms:
|
|
# print(f"Starting VM {vm['vmid']}...")
|
|
# client.start_vm(vm['vmid'])
|
|
|
|
|
|
def main():
|
|
"""Run all examples"""
|
|
print("Proxmox VM Operations Examples")
|
|
print("=" * 50)
|
|
|
|
try:
|
|
# Run examples (comment out any you don't want to run)
|
|
example_list_all_vms()
|
|
example_vm_lifecycle() # Caution: will start/stop a VM
|
|
example_snapshot_workflow() # Caution: creates/deletes snapshots
|
|
# example_clone_vm() # Caution: creates/deletes VMs
|
|
example_resource_monitoring()
|
|
example_find_vm_by_name()
|
|
example_batch_operations()
|
|
|
|
print("\n" + "=" * 50)
|
|
print("Examples completed!")
|
|
|
|
except Exception as e:
|
|
print(f"\nError running examples: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|