Skip to content

Builders API Reference

Auto-generated from source code docstrings.

Oryx Builder

builders.oryx_builder

Oryx Builder - Build, Release, and Ship projects to the Ocean

The complete Drydock shipping system using Microsoft Oryx.

Usage

python oryx_builder.py check # Verify system ready python oryx_builder.py detect # Detect platforms python oryx_builder.py build [--output dir] # Build only python oryx_builder.py release --version X # Build + stage to Release/ python oryx_builder.py ship --registry URL # Ship to Ocean python oryx_builder.py rollback --to X # Rollback to version

Example

python oryx_builder.py release ../Projects/dynakernel --version v1.0.0 python oryx_builder.py ship ../Release/dynakernel --registry ghcr.io/myorg python oryx_builder.py rollback ../Ocean/dynakernel --to v0.9.0

Classes

BuildResult dataclass

Result of an Oryx build.

Source code in Tools/builders/oryx_builder.py
@dataclass
class BuildResult:
    """Result of an Oryx build."""
    success: bool
    project_path: str
    output_path: str
    platform: Optional[str] = None
    version: Optional[str] = None
    duration_seconds: float = 0.0
    log_path: Optional[str] = None
    error: Optional[str] = None
    manifest: dict = field(default_factory=dict)

ReleaseResult dataclass

Result of a release operation.

Source code in Tools/builders/oryx_builder.py
@dataclass
class ReleaseResult:
    """Result of a release operation."""
    success: bool
    project_name: str
    version: str
    release_path: str
    build_result: Optional[BuildResult] = None
    error: Optional[str] = None

ShipResult dataclass

Result of shipping to Ocean.

Source code in Tools/builders/oryx_builder.py
@dataclass
class ShipResult:
    """Result of shipping to Ocean."""
    success: bool
    project_name: str
    version: str
    image: str
    registry: str
    ocean_path: str
    error: Optional[str] = None

Functions

check_docker

check_docker() -> bool

Check if Docker is available.

Source code in Tools/builders/oryx_builder.py
def check_docker() -> bool:
    """Check if Docker is available."""
    try:
        result = subprocess.run(
            ["docker", "--version"],
            capture_output=True,
            text=True
        )
        return result.returncode == 0
    except FileNotFoundError:
        return False

check_oryx_image

check_oryx_image() -> bool

Check if Oryx image is available locally.

Source code in Tools/builders/oryx_builder.py
def check_oryx_image() -> bool:
    """Check if Oryx image is available locally."""
    result = subprocess.run(
        ["docker", "images", "-q", ORYX_BUILD_IMAGE],
        capture_output=True,
        text=True
    )
    return bool(result.stdout.strip())

pull_oryx_image

pull_oryx_image() -> bool

Pull the Oryx build image.

Source code in Tools/builders/oryx_builder.py
def pull_oryx_image() -> bool:
    """Pull the Oryx build image."""
    print(f"Pulling Oryx image: {ORYX_BUILD_IMAGE}")
    result = subprocess.run(
        ["docker", "pull", ORYX_BUILD_IMAGE],
        capture_output=False
    )
    return result.returncode == 0

detect_platform

detect_platform(project_path: Path) -> dict

Detect platform using Oryx.

Source code in Tools/builders/oryx_builder.py
def detect_platform(project_path: Path) -> dict:
    """Detect platform using Oryx."""
    project_path = project_path.resolve()

    result = subprocess.run(
        [
            "docker", "run", "--rm",
            "-v", f"{project_path}:/app:ro",
            ORYX_BUILD_IMAGE,
            "oryx", "detect", "/app"
        ],
        capture_output=True,
        text=True
    )

    platforms = []
    for line in result.stdout.split('\n'):
        line = line.strip()
        if line and ':' in line and not line.startswith('Detecting'):
            parts = line.split(':')
            if len(parts) >= 2:
                platforms.append({
                    "platform": parts[0].strip().lower(),
                    "version": parts[1].strip() if len(parts) > 1 else None
                })

    return {"platforms": platforms} if platforms else {}

build_project

build_project(project_path: Path, output_path: Optional[Path] = None, platform: Optional[str] = None, platform_version: Optional[str] = None, env_vars: Optional[dict] = None) -> BuildResult

Build a project using Oryx.

Source code in Tools/builders/oryx_builder.py
def build_project(
    project_path: Path,
    output_path: Optional[Path] = None,
    platform: Optional[str] = None,
    platform_version: Optional[str] = None,
    env_vars: Optional[dict] = None
) -> BuildResult:
    """Build a project using Oryx."""
    import time
    start_time = time.time()

    project_path = project_path.resolve()

    if output_path is None:
        output_path = project_path / "dist"
    else:
        output_path = output_path.resolve()

    output_path.mkdir(parents=True, exist_ok=True)

    docker_cmd = [
        "docker", "run", "--rm",
        "-v", f"{project_path}:/app",
        "-v", f"{output_path}:/output",
    ]

    env_vars = env_vars or {}
    if platform:
        env_vars["PLATFORM_NAME"] = platform
    if platform_version:
        env_vars["PLATFORM_VERSION"] = platform_version

    for key, value in env_vars.items():
        docker_cmd.extend(["-e", f"{key}={value}"])

    docker_cmd.extend([
        ORYX_BUILD_IMAGE,
        "oryx", "build", "/app",
        "--output", "/output"
    ])

    log_dir = project_path / ".drydock" / "logs"
    log_dir.mkdir(parents=True, exist_ok=True)
    log_path = log_dir / f"build-{datetime.now().strftime('%Y%m%d-%H%M%S')}.log"

    print(f"Building: {project_path}")
    print(f"Output:   {output_path}")

    with open(log_path, "w") as log_file:
        result = subprocess.run(
            docker_cmd,
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            text=True
        )
        log_file.write(result.stdout)
        print(result.stdout)

    duration = time.time() - start_time

    manifest = {}
    manifest_path = output_path / "oryx-manifest.toml"
    if manifest_path.exists():
        manifest = parse_toml(manifest_path)

    return BuildResult(
        success=result.returncode == 0,
        project_path=str(project_path),
        output_path=str(output_path),
        platform=manifest.get("PlatformName"),
        version=manifest.get("NodeVersion") or manifest.get("PythonVersion") or manifest.get("DotNetCoreSdkVersion"),
        duration_seconds=duration,
        log_path=str(log_path),
        error=None if result.returncode == 0 else "Build failed",
        manifest=manifest
    )

parse_toml

parse_toml(path: Path) -> dict

Simple TOML parser for manifest files.

Source code in Tools/builders/oryx_builder.py
def parse_toml(path: Path) -> dict:
    """Simple TOML parser for manifest files."""
    data = {}
    current_section = None

    try:
        with open(path) as f:
            for line in f:
                line = line.strip()
                if not line or line.startswith('#'):
                    continue

                # Section header
                if line.startswith('[') and line.endswith(']'):
                    current_section = line[1:-1]
                    if current_section not in data:
                        data[current_section] = {}
                    continue

                # Key-value
                if '=' in line:
                    key, value = line.split('=', 1)
                    key = key.strip()
                    value = value.strip().strip('"').strip("'")

                    if current_section:
                        data[current_section][key] = value
                    else:
                        data[key] = value
    except Exception:
        pass

    return data

write_toml

write_toml(path: Path, data: dict)

Write data to TOML file.

Source code in Tools/builders/oryx_builder.py
def write_toml(path: Path, data: dict):
    """Write data to TOML file."""
    lines = []

    # Write top-level keys first
    for key, value in data.items():
        if not isinstance(value, dict):
            if isinstance(value, bool):
                lines.append(f'{key} = {str(value).lower()}')
            elif isinstance(value, (int, float)):
                lines.append(f'{key} = {value}')
            else:
                lines.append(f'{key} = "{value}"')

    if lines:
        lines.append('')

    # Write sections
    for section, values in data.items():
        if isinstance(values, dict):
            lines.append(f'[{section}]')
            for key, value in values.items():
                if isinstance(value, bool):
                    lines.append(f'{key} = {str(value).lower()}')
                elif isinstance(value, (int, float)):
                    lines.append(f'{key} = {value}')
                elif isinstance(value, list):
                    items = ', '.join(f'"{v}"' for v in value)
                    lines.append(f'{key} = [{items}]')
                else:
                    lines.append(f'{key} = "{value}"')
            lines.append('')

    path.write_text('\n'.join(lines))

release_project

release_project(project_path: Path, version: str, platform: Optional[str] = None) -> ReleaseResult

Build and stage project to Release/.

Source code in Tools/builders/oryx_builder.py
def release_project(
    project_path: Path,
    version: str,
    platform: Optional[str] = None
) -> ReleaseResult:
    """Build and stage project to Release/."""

    project_path = project_path.resolve()
    project_name = project_path.name

    # Normalize version
    if not version.startswith('v'):
        version = f'v{version}'

    release_path = RELEASE_DIR / project_name
    dist_path = release_path / "dist"

    print(f"\n{'='*60}")
    print(f"RELEASING {project_name} {version}")
    print(f"{'='*60}\n")

    # Clean previous release
    if release_path.exists():
        print(f"Cleaning previous release: {release_path}")
        shutil.rmtree(release_path)

    release_path.mkdir(parents=True)
    dist_path.mkdir()

    # Build
    print("\n[1/4] Building with Oryx...")
    build_result = build_project(project_path, dist_path, platform=platform)

    if not build_result.success:
        return ReleaseResult(
            success=False,
            project_name=project_name,
            version=version,
            release_path=str(release_path),
            build_result=build_result,
            error="Build failed"
        )

    # Generate drydock manifest
    print("\n[2/4] Generating manifest...")
    manifest = generate_drydock_manifest(
        project_path,
        project_name,
        version,
        build_result
    )
    write_toml(release_path / "drydock-manifest.toml", manifest)

    # Generate Dockerfile
    print("\n[3/4] Generating Dockerfile...")
    dockerfile = generate_dockerfile(
        build_result.platform or "unknown",
        build_result.manifest
    )
    (release_path / "Dockerfile").write_text(dockerfile)

    # Generate release notes template
    print("\n[4/4] Generating release notes...")
    release_notes = generate_release_notes(project_name, version, build_result)
    (release_path / "release-notes.md").write_text(release_notes)

    print(f"\n{'='*60}")
    print(f"RELEASE STAGED: {release_path}")
    print(f"{'='*60}")
    print(f"\nNext: Review release, then ship with:")
    print(f"  python oryx_builder.py ship {release_path} --registry <url>")

    return ReleaseResult(
        success=True,
        project_name=project_name,
        version=version,
        release_path=str(release_path),
        build_result=build_result
    )

generate_drydock_manifest

generate_drydock_manifest(project_path: Path, project_name: str, version: str, build_result: BuildResult) -> dict

Generate drydock-manifest.toml content.

Source code in Tools/builders/oryx_builder.py
def generate_drydock_manifest(
    project_path: Path,
    project_name: str,
    version: str,
    build_result: BuildResult
) -> dict:
    """Generate drydock-manifest.toml content."""

    # Try to read assembly manifest if it exists
    assembly_manifest_path = project_path / "assembly-manifest.toml"
    components = []
    if assembly_manifest_path.exists():
        assembly = parse_toml(assembly_manifest_path)
        components = assembly.get("components", {}).get("used", [])

    manifest = {
        "release": {
            "project": project_name,
            "version": version,
            "date": datetime.utcnow().isoformat() + "Z",
            "build_id": datetime.now().strftime('%Y%m%d-%H%M%S'),
        },
        "build": {
            "platform": build_result.platform or "unknown",
            "platform_version": build_result.version or "unknown",
            "duration_seconds": round(build_result.duration_seconds, 2),
            "oryx_image": ORYX_BUILD_IMAGE,
        },
        "artifacts": {
            "dist_path": "dist/",
            "dockerfile": "Dockerfile",
        },
        "provenance": {
            "source_project": str(project_path),
            "drydock_root": str(DRYDOCK_ROOT),
        }
    }

    return manifest

generate_dockerfile

generate_dockerfile(platform: str, oryx_manifest: dict) -> str

Generate a Dockerfile based on platform.

Source code in Tools/builders/oryx_builder.py
def generate_dockerfile(platform: str, oryx_manifest: dict) -> str:
    """Generate a Dockerfile based on platform."""

    platform = platform.lower()

    if platform in ("nodejs", "node"):
        version = oryx_manifest.get("NodeVersion", "18")
        return f"""# Generated by Drydock
FROM node:{version}-alpine
WORKDIR /app
COPY dist/ .
EXPOSE 3000
CMD ["node", "server.js"]
"""

    elif platform == "python":
        version = oryx_manifest.get("PythonVersion", "3.11")
        return f"""# Generated by Drydock
FROM python:{version}-slim
WORKDIR /app
COPY dist/ .
RUN pip install --no-cache-dir -r requirements.txt 2>/dev/null || true
EXPOSE 8000
CMD ["python", "-m", "gunicorn", "app:app", "--bind", "0.0.0.0:8000"]
"""

    elif platform in ("dotnet", "dotnetcore"):
        version = oryx_manifest.get("DotNetCoreSdkVersion", "8.0").split('.')[0] + ".0"
        dll = oryx_manifest.get("StartupDllFileName", "app.dll")
        return f"""# Generated by Drydock
FROM mcr.microsoft.com/dotnet/aspnet:{version}
WORKDIR /app
COPY dist/ .
EXPOSE 80
ENTRYPOINT ["dotnet", "{dll}"]
"""

    else:
        return """# Generated by Drydock
# Platform not recognized - customize as needed
FROM alpine:latest
WORKDIR /app
COPY dist/ .
CMD ["/bin/sh"]
"""

generate_release_notes

generate_release_notes(project_name: str, version: str, build_result: BuildResult) -> str

Generate release notes template.

Source code in Tools/builders/oryx_builder.py
def generate_release_notes(
    project_name: str,
    version: str,
    build_result: BuildResult
) -> str:
    """Generate release notes template."""

    return f"""# Release Notes: {project_name} {version}

**Release Date:** {datetime.now().strftime('%Y-%m-%d')}
**Platform:** {build_result.platform or 'Unknown'}
**Platform Version:** {build_result.version or 'Unknown'}

## What's New

- {{Describe new features}}

## Bug Fixes

- {{List bug fixes}}

## Breaking Changes

- {{List breaking changes, or "None"}}

## Upgrade Instructions

```bash
# Pull new image
docker pull {{registry}}/{project_name}:{version}

# Or update deployment
kubectl set image deployment/{project_name} app={{registry}}/{project_name}:{version}
```

## Known Issues

- {{List known issues, or "None"}}

---

*Built with Drydock + Oryx*
"""

ship_to_ocean

ship_to_ocean(release_path: Path, registry: str, tag: Optional[str] = None) -> ShipResult

Ship a release to Ocean (production registry).

Source code in Tools/builders/oryx_builder.py
def ship_to_ocean(
    release_path: Path,
    registry: str,
    tag: Optional[str] = None
) -> ShipResult:
    """Ship a release to Ocean (production registry)."""

    release_path = release_path.resolve()

    # Read manifest to get version
    manifest_path = release_path / "drydock-manifest.toml"
    if not manifest_path.exists():
        return ShipResult(
            success=False,
            project_name="unknown",
            version="unknown",
            image="",
            registry=registry,
            ocean_path="",
            error=f"No drydock-manifest.toml found in {release_path}"
        )

    manifest = parse_toml(manifest_path)
    project_name = manifest.get("release", {}).get("project", release_path.name)
    version = manifest.get("release", {}).get("version", "unknown")

    if tag:
        version = tag if tag.startswith('v') else f'v{tag}'

    image_name = f"{registry}/{project_name}:{version}"

    print(f"\n{'='*60}")
    print(f"SHIPPING {project_name} {version}")
    print(f"{'='*60}\n")

    # Build container image
    print(f"[1/4] Building container image: {image_name}")

    result = subprocess.run(
        ["docker", "build", "-t", image_name, str(release_path)],
        capture_output=True,
        text=True
    )

    if result.returncode != 0:
        print(f"Error: {result.stderr}")
        return ShipResult(
            success=False,
            project_name=project_name,
            version=version,
            image=image_name,
            registry=registry,
            ocean_path="",
            error=f"Docker build failed: {result.stderr}"
        )

    print("  Image built successfully")

    # Push to registry
    print(f"\n[2/4] Pushing to registry: {registry}")

    push_result = subprocess.run(
        ["docker", "push", image_name],
        capture_output=True,
        text=True
    )

    if push_result.returncode != 0:
        print(f"  Warning: Push failed (registry may not be available)")
        print(f"  {push_result.stderr}")
        # Continue anyway - might be local testing
    else:
        print("  Pushed successfully")

    # Update Ocean
    print(f"\n[3/4] Updating Ocean registry...")
    ocean_path = update_ocean(project_name, version, image_name, manifest)
    print(f"  Ocean updated: {ocean_path}")

    # Summary
    print(f"\n[4/4] Generating deployment config...")
    generate_deployment_config(ocean_path, project_name, image_name)

    print(f"\n{'='*60}")
    print(f"SHIPPED TO OCEAN")
    print(f"{'='*60}")
    print(f"\n  Image:   {image_name}")
    print(f"  Ocean:   {ocean_path}")
    print(f"  Version: {version}")

    return ShipResult(
        success=True,
        project_name=project_name,
        version=version,
        image=image_name,
        registry=registry,
        ocean_path=str(ocean_path)
    )

update_ocean

update_ocean(project_name: str, version: str, image: str, manifest: dict) -> Path

Update Ocean with new deployment.

Source code in Tools/builders/oryx_builder.py
def update_ocean(
    project_name: str,
    version: str,
    image: str,
    manifest: dict
) -> Path:
    """Update Ocean with new deployment."""

    ocean_project = OCEAN_DIR / project_name
    current_dir = ocean_project / "current"
    history_dir = ocean_project / "history"

    # Create directories
    ocean_project.mkdir(parents=True, exist_ok=True)
    history_dir.mkdir(exist_ok=True)

    # Archive current to history (if exists)
    if current_dir.exists():
        current_version_file = current_dir / "version.txt"
        if current_version_file.exists():
            old_version = current_version_file.read_text().strip()
            old_history = history_dir / old_version
            if not old_history.exists():
                print(f"  Archiving {old_version} to history/")
                shutil.copytree(current_dir, old_history)
        shutil.rmtree(current_dir)

    current_dir.mkdir()

    # Write current state
    (current_dir / "registry.txt").write_text(image)
    (current_dir / "version.txt").write_text(version)
    (current_dir / "deployed.txt").write_text(
        f"timestamp: {datetime.utcnow().isoformat()}Z\n"
        f"build_id: {manifest.get('release', {}).get('build_id', 'unknown')}\n"
        f"platform: {manifest.get('build', {}).get('platform', 'unknown')}\n"
    )

    return ocean_project

generate_deployment_config

generate_deployment_config(ocean_path: Path, project_name: str, image: str)

Generate deployment.yaml for the project.

Source code in Tools/builders/oryx_builder.py
def generate_deployment_config(ocean_path: Path, project_name: str, image: str):
    """Generate deployment.yaml for the project."""

    current_dir = ocean_path / "current"

    # Kubernetes deployment
    k8s_deployment = f"""apiVersion: apps/v1
kind: Deployment
metadata:
  name: {project_name}
  labels:
    app: {project_name}
spec:
  replicas: 2
  selector:
    matchLabels:
      app: {project_name}
  template:
    metadata:
      labels:
        app: {project_name}
    spec:
      containers:
        - name: {project_name}
          image: {image}
          ports:
            - containerPort: 8000
          resources:
            limits:
              memory: "256Mi"
              cpu: "500m"
---
apiVersion: v1
kind: Service
metadata:
  name: {project_name}
spec:
  selector:
    app: {project_name}
  ports:
    - port: 80
      targetPort: 8000
  type: ClusterIP
"""

    (current_dir / "deployment.yaml").write_text(k8s_deployment)

    # Docker Compose alternative
    compose = f"""version: '3.8'
services:
  {project_name}:
    image: {image}
    ports:
      - "8000:8000"
    restart: unless-stopped
"""

    (current_dir / "docker-compose.yaml").write_text(compose)

rollback

rollback(ocean_path: Path, target_version: str) -> bool

Rollback to a previous version.

Source code in Tools/builders/oryx_builder.py
def rollback(ocean_path: Path, target_version: str) -> bool:
    """Rollback to a previous version."""

    ocean_path = ocean_path.resolve()
    project_name = ocean_path.name

    current_dir = ocean_path / "current"
    history_dir = ocean_path / "history"

    # Normalize version
    if not target_version.startswith('v'):
        target_version = f'v{target_version}'

    target_dir = history_dir / target_version

    if not target_dir.exists():
        print(f"Error: Version {target_version} not found in history/")
        print(f"Available versions:")
        for v in sorted(history_dir.iterdir()):
            print(f"  - {v.name}")
        return False

    print(f"\n{'='*60}")
    print(f"ROLLING BACK {project_name} to {target_version}")
    print(f"{'='*60}\n")

    # Get current version
    current_version = "unknown"
    if (current_dir / "version.txt").exists():
        current_version = (current_dir / "version.txt").read_text().strip()

    # Archive current to history
    if current_dir.exists() and current_version != "unknown":
        archive_dir = history_dir / current_version
        if not archive_dir.exists():
            print(f"[1/3] Archiving current ({current_version}) to history/")
            shutil.copytree(current_dir, archive_dir)
        shutil.rmtree(current_dir)

    # Restore target version to current
    print(f"[2/3] Restoring {target_version} to current/")
    shutil.copytree(target_dir, current_dir)

    # Update deployed timestamp
    deployed_path = current_dir / "deployed.txt"
    deployed_content = deployed_path.read_text() if deployed_path.exists() else ""
    deployed_content += f"\nrollback_timestamp: {datetime.utcnow().isoformat()}Z"
    deployed_content += f"\nrolled_back_from: {current_version}"
    deployed_path.write_text(deployed_content)

    print(f"[3/3] Rollback complete")

    image = (current_dir / "registry.txt").read_text().strip()
    print(f"\n{'='*60}")
    print(f"ROLLBACK COMPLETE")
    print(f"{'='*60}")
    print(f"\n  From:  {current_version}")
    print(f"  To:    {target_version}")
    print(f"  Image: {image}")
    print(f"\nTo deploy, apply the deployment config:")
    print(f"  kubectl apply -f {current_dir}/deployment.yaml")

    return True

Containerd Builder

builders.containerd_builder

Containerd Builder - Docker-free container management for Drydock

Uses containerd directly (via ctr CLI) instead of Docker daemon. Provides OCI-native operations, namespace isolation, and air-gapped workflows via OCI archive import/export.

Requirements
  • containerd running (systemctl status containerd)
  • ctr CLI available
  • Root/sudo access for containerd socket
Usage

python containerd_builder.py check # Verify system python containerd_builder.py pull # Pull image python containerd_builder.py push --registry # Push image python containerd_builder.py build --version # Build OCI image python containerd_builder.py release --version # Build + stage python containerd_builder.py ship --registry # Ship to Ocean python containerd_builder.py export --output # Export OCI archive python containerd_builder.py import # Import OCI archive python containerd_builder.py images # List images python containerd_builder.py rollback --to # Rollback python containerd_builder.py status [project] # Ocean status

Examples:

python containerd_builder.py check python containerd_builder.py build ../Projects/my-app --version v1.0.0 python containerd_builder.py export myregistry/app:v1.0 --output app-v1.0.tar python containerd_builder.py import app-v1.0.tar python containerd_builder.py ship ../Release/my-app --registry ghcr.io/myorg

Classes

BuildResult dataclass

Result of a containerd build.

Source code in Tools/builders/containerd_builder.py
@dataclass
class BuildResult:
    """Result of a containerd build."""
    success: bool
    project_path: str
    image_ref: str
    platform: Optional[str] = None
    version: Optional[str] = None
    duration_seconds: float = 0.0
    log_path: Optional[str] = None
    error: Optional[str] = None
    manifest: dict = field(default_factory=dict)

ReleaseResult dataclass

Result of a release operation.

Source code in Tools/builders/containerd_builder.py
@dataclass
class ReleaseResult:
    """Result of a release operation."""
    success: bool
    project_name: str
    version: str
    release_path: str
    build_result: Optional[BuildResult] = None
    error: Optional[str] = None

ShipResult dataclass

Result of shipping to Ocean.

Source code in Tools/builders/containerd_builder.py
@dataclass
class ShipResult:
    """Result of shipping to Ocean."""
    success: bool
    project_name: str
    version: str
    image_ref: str
    registry: str
    ocean_path: str
    error: Optional[str] = None

Functions

needs_sudo

needs_sudo() -> bool

Check if sudo is required for containerd socket access.

Source code in Tools/builders/containerd_builder.py
def needs_sudo() -> bool:
    """Check if sudo is required for containerd socket access."""
    global _NEEDS_SUDO
    if _NEEDS_SUDO is None:
        sock = Path(CONTAINERD_SOCKET)
        if sock.exists():
            _NEEDS_SUDO = not os.access(str(sock), os.R_OK | os.W_OK)
        else:
            _NEEDS_SUDO = True
    return _NEEDS_SUDO

ctr_cmd

ctr_cmd(args: list[str], namespace: str = CONTAINERD_NAMESPACE) -> list[str]

Build a ctr command with namespace and optional sudo.

Source code in Tools/builders/containerd_builder.py
def ctr_cmd(args: list[str], namespace: str = CONTAINERD_NAMESPACE) -> list[str]:
    """Build a ctr command with namespace and optional sudo."""
    cmd = []
    if needs_sudo():
        cmd.append("sudo")
    cmd.extend(["ctr", "-n", namespace])
    cmd.extend(args)
    return cmd

run_ctr

run_ctr(args: list[str], namespace: str = CONTAINERD_NAMESPACE, capture: bool = True, timeout: int = 300) -> subprocess.CompletedProcess

Run a ctr command and return result.

Source code in Tools/builders/containerd_builder.py
def run_ctr(
    args: list[str],
    namespace: str = CONTAINERD_NAMESPACE,
    capture: bool = True,
    timeout: int = 300
) -> subprocess.CompletedProcess:
    """Run a ctr command and return result."""
    cmd = ctr_cmd(args, namespace)
    try:
        return subprocess.run(
            cmd,
            capture_output=capture,
            text=True,
            timeout=timeout
        )
    except subprocess.TimeoutExpired:
        return subprocess.CompletedProcess(
            cmd, returncode=1, stdout="", stderr="Command timed out"
        )
    except FileNotFoundError:
        return subprocess.CompletedProcess(
            cmd, returncode=1, stdout="", stderr="ctr not found"
        )

check_containerd

check_containerd() -> bool

Check if containerd is running.

Source code in Tools/builders/containerd_builder.py
def check_containerd() -> bool:
    """Check if containerd is running."""
    result = subprocess.run(
        ["systemctl", "is-active", "containerd"],
        capture_output=True, text=True
    )
    return result.stdout.strip() == "active"

check_ctr

check_ctr() -> bool

Check if ctr CLI is available.

Source code in Tools/builders/containerd_builder.py
def check_ctr() -> bool:
    """Check if ctr CLI is available."""
    result = subprocess.run(
        ["which", "ctr"],
        capture_output=True, text=True
    )
    return result.returncode == 0

check_buildkit

check_buildkit() -> bool

Check if BuildKit is available.

Source code in Tools/builders/containerd_builder.py
def check_buildkit() -> bool:
    """Check if BuildKit is available."""
    result = subprocess.run(
        ["which", "buildctl"],
        capture_output=True, text=True
    )
    return result.returncode == 0

check_namespace

check_namespace() -> bool

Ensure the drydock namespace exists.

Source code in Tools/builders/containerd_builder.py
def check_namespace() -> bool:
    """Ensure the drydock namespace exists."""
    result = run_ctr(["namespaces", "list"], namespace="default")
    if result.returncode != 0:
        return False

    if CONTAINERD_NAMESPACE not in result.stdout:
        create = run_ctr(
            ["namespaces", "create", CONTAINERD_NAMESPACE],
            namespace="default"
        )
        return create.returncode == 0

    return True

system_check

system_check() -> dict

Full system readiness check.

Source code in Tools/builders/containerd_builder.py
def system_check() -> dict:
    """Full system readiness check."""
    status = {
        "containerd": check_containerd(),
        "ctr": check_ctr(),
        "buildkit": check_buildkit(),
        "namespace": False,
        "sudo_required": needs_sudo(),
        "socket": Path(CONTAINERD_SOCKET).exists(),
    }

    if status["containerd"] and status["ctr"] and status["socket"]:
        status["namespace"] = check_namespace()

    status["ready"] = (
        status["containerd"]
        and status["ctr"]
        and status["socket"]
        and status["namespace"]
    )

    return status

pull_image

pull_image(image_ref: str) -> bool

Pull an image from a registry into the drydock namespace.

Source code in Tools/builders/containerd_builder.py
def pull_image(image_ref: str) -> bool:
    """Pull an image from a registry into the drydock namespace."""
    print(f"Pulling: {image_ref}")
    result = run_ctr(["images", "pull", image_ref], capture=False, timeout=600)
    return result.returncode == 0

push_image

push_image(image_ref: str) -> bool

Push an image to a registry.

Source code in Tools/builders/containerd_builder.py
def push_image(image_ref: str) -> bool:
    """Push an image to a registry."""
    print(f"Pushing: {image_ref}")
    result = run_ctr(["images", "push", image_ref], capture=False, timeout=600)
    return result.returncode == 0

tag_image

tag_image(source_ref: str, target_ref: str) -> bool

Tag (re-reference) an image.

Source code in Tools/builders/containerd_builder.py
def tag_image(source_ref: str, target_ref: str) -> bool:
    """Tag (re-reference) an image."""
    result = run_ctr(["images", "tag", source_ref, target_ref])
    return result.returncode == 0

list_images

list_images() -> list[dict]

List images in the drydock namespace.

Source code in Tools/builders/containerd_builder.py
def list_images() -> list[dict]:
    """List images in the drydock namespace."""
    result = run_ctr(["images", "list"])
    if result.returncode != 0:
        return []

    images = []
    for line in result.stdout.strip().split("\n")[1:]:  # Skip header
        parts = line.split()
        if len(parts) >= 3:
            images.append({
                "ref": parts[0],
                "type": parts[1],
                "size": parts[2] if len(parts) > 2 else "unknown",
            })

    return images

remove_image

remove_image(image_ref: str) -> bool

Remove an image from the drydock namespace.

Source code in Tools/builders/containerd_builder.py
def remove_image(image_ref: str) -> bool:
    """Remove an image from the drydock namespace."""
    result = run_ctr(["images", "remove", image_ref])
    return result.returncode == 0

export_image

export_image(image_ref: str, output_path: Path) -> bool

Export an image as an OCI archive (tar).

Source code in Tools/builders/containerd_builder.py
def export_image(image_ref: str, output_path: Path) -> bool:
    """Export an image as an OCI archive (tar)."""
    output_path = Path(output_path)
    output_path.parent.mkdir(parents=True, exist_ok=True)

    print(f"Exporting: {image_ref}")
    print(f"      To:  {output_path}")

    result = run_ctr(
        ["images", "export", str(output_path), image_ref],
        timeout=600
    )

    if result.returncode == 0 and output_path.exists():
        size_mb = output_path.stat().st_size / (1024 * 1024)
        print(f"  Exported: {size_mb:.1f} MB")
        return True

    print(f"  Export failed: {result.stderr}")
    return False

import_image

import_image(archive_path: Path) -> bool

Import an OCI archive into the drydock namespace.

Source code in Tools/builders/containerd_builder.py
def import_image(archive_path: Path) -> bool:
    """Import an OCI archive into the drydock namespace."""
    archive_path = Path(archive_path).resolve()

    if not archive_path.exists():
        print(f"Error: Archive not found: {archive_path}")
        return False

    size_mb = archive_path.stat().st_size / (1024 * 1024)
    print(f"Importing: {archive_path} ({size_mb:.1f} MB)")

    result = run_ctr(
        ["images", "import", str(archive_path)],
        timeout=600
    )

    if result.returncode == 0:
        print("  Imported successfully")
        # Show what was imported
        if result.stdout:
            for line in result.stdout.strip().split("\n"):
                print(f"  {line}")
        return True

    print(f"  Import failed: {result.stderr}")
    return False

detect_platform

detect_platform(project_path: Path) -> dict

Detect project platform (mirrors platform_detector.py logic).

Source code in Tools/builders/containerd_builder.py
def detect_platform(project_path: Path) -> dict:
    """Detect project platform (mirrors platform_detector.py logic)."""
    project_path = Path(project_path).resolve()

    platform_markers = {
        "nodejs": ["package.json"],
        "python": ["requirements.txt", "setup.py", "pyproject.toml", "Pipfile"],
        "dotnet": ["*.csproj", "*.sln", "*.fsproj"],
        "go": ["go.mod"],
        "rust": ["Cargo.toml"],
        "java": ["pom.xml", "build.gradle", "build.gradle.kts"],
        "ruby": ["Gemfile"],
        "php": ["composer.json"],
    }

    detected = []
    for platform, markers in platform_markers.items():
        for marker in markers:
            if "*" in marker:
                matches = list(project_path.glob(marker))
                if matches:
                    detected.append({"platform": platform, "marker": marker})
                    break
            elif (project_path / marker).exists():
                detected.append({"platform": platform, "marker": marker})
                break

    return {"platforms": detected}

get_base_image

get_base_image(platform: str, version: Optional[str] = None) -> str

Get the appropriate base image for a platform.

Source code in Tools/builders/containerd_builder.py
def get_base_image(platform: str, version: Optional[str] = None) -> str:
    """Get the appropriate base image for a platform."""
    defaults = {
        "nodejs": ("node", "22-alpine"),
        "python": ("python", "3.12-slim"),
        "dotnet": ("mcr.microsoft.com/dotnet/aspnet", "8.0"),
        "go": ("golang", "1.22-alpine"),
        "rust": ("rust", "1.77-slim"),
        "java": ("eclipse-temurin", "21-jre"),
        "ruby": ("ruby", "3.3-slim"),
        "php": ("php", "8.3-fpm-alpine"),
    }

    image, default_tag = defaults.get(platform, ("alpine", "latest"))
    tag = version or default_tag
    return f"{image}:{tag}"

generate_dockerfile_content

generate_dockerfile_content(project_path: Path, platform: str) -> str

Generate a Dockerfile for the detected platform.

Source code in Tools/builders/containerd_builder.py
def generate_dockerfile_content(project_path: Path, platform: str) -> str:
    """Generate a Dockerfile for the detected platform."""
    project_path = Path(project_path)

    if platform == "nodejs":
        return """FROM node:22-alpine
WORKDIR /app
COPY package*.json ./
RUN npm ci --production 2>/dev/null || npm install --production
COPY . .
EXPOSE 3000
CMD ["node", "server.js"]
"""
    elif platform == "python":
        return """FROM python:3.12-slim
WORKDIR /app
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt 2>/dev/null || true
COPY . .
EXPOSE 8000
CMD ["python", "-m", "gunicorn", "app:app", "--bind", "0.0.0.0:8000"]
"""
    elif platform == "dotnet":
        # Find the project file
        csprojs = list(project_path.glob("**/*.csproj"))
        project_file = csprojs[0].name if csprojs else "app.csproj"
        dll_name = project_file.replace(".csproj", ".dll")
        return f"""FROM mcr.microsoft.com/dotnet/sdk:8.0 AS build
WORKDIR /src
COPY . .
RUN dotnet restore && dotnet publish -c Release -o /app/publish

FROM mcr.microsoft.com/dotnet/aspnet:8.0
WORKDIR /app
COPY --from=build /app/publish .
EXPOSE 80
ENTRYPOINT ["dotnet", "{dll_name}"]
"""
    elif platform == "go":
        return """FROM golang:1.22-alpine AS build
WORKDIR /src
COPY go.* ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 go build -o /app/server .

FROM alpine:latest
WORKDIR /app
COPY --from=build /app/server .
EXPOSE 8080
CMD ["./server"]
"""
    elif platform == "rust":
        return """FROM rust:1.77-slim AS build
WORKDIR /src
COPY . .
RUN cargo build --release

FROM debian:bookworm-slim
WORKDIR /app
COPY --from=build /src/target/release/* ./
EXPOSE 8080
CMD ["./app"]
"""
    else:
        return """FROM alpine:latest
WORKDIR /app
COPY . .
CMD ["/bin/sh"]
"""

build_with_buildkit

build_with_buildkit(project_path: Path, image_ref: str, dockerfile_path: Optional[Path] = None) -> bool

Build an image using BuildKit (if available).

Source code in Tools/builders/containerd_builder.py
def build_with_buildkit(
    project_path: Path,
    image_ref: str,
    dockerfile_path: Optional[Path] = None
) -> bool:
    """Build an image using BuildKit (if available)."""
    if not check_buildkit():
        return False

    project_path = project_path.resolve()
    dockerfile = str(dockerfile_path) if dockerfile_path else str(project_path / "Dockerfile")

    cmd = [
        "buildctl", "build",
        "--frontend=dockerfile.v0",
        f"--local", f"context={project_path}",
        f"--local", f"dockerfile={Path(dockerfile).parent}",
        "--output", f"type=image,name={image_ref},push=false",
    ]

    if needs_sudo():
        cmd = ["sudo"] + cmd

    result = subprocess.run(cmd, capture_output=False, timeout=600)
    return result.returncode == 0

build_with_ctr

build_with_ctr(project_path: Path, image_ref: str, base_image: str, platform: str) -> bool

Build an image by layering project files onto a base image.

This is the fallback when BuildKit isn't available. It: 1. Pulls the base image 2. Creates a container from it 3. Copies project files into the container's rootfs 4. Commits the container as a new image snapshot

Source code in Tools/builders/containerd_builder.py
def build_with_ctr(
    project_path: Path,
    image_ref: str,
    base_image: str,
    platform: str
) -> bool:
    """
    Build an image by layering project files onto a base image.

    This is the fallback when BuildKit isn't available. It:
    1. Pulls the base image
    2. Creates a container from it
    3. Copies project files into the container's rootfs
    4. Commits the container as a new image snapshot
    """
    import tempfile

    project_path = project_path.resolve()

    # Pull base image
    print(f"  Pulling base: {base_image}")
    pull_result = run_ctr(["images", "pull", base_image], timeout=600)
    if pull_result.returncode != 0:
        print(f"  Failed to pull {base_image}: {pull_result.stderr}")
        return False

    # Create a container from the base
    container_id = f"drydock-build-{datetime.now().strftime('%Y%m%d%H%M%S')}"
    print(f"  Creating build container: {container_id}")

    create_result = run_ctr([
        "containers", "create",
        base_image,
        container_id
    ])

    if create_result.returncode != 0:
        print(f"  Failed to create container: {create_result.stderr}")
        return False

    # For snapshot-based approach, we use the content API
    # Export the base, modify, re-import
    with tempfile.TemporaryDirectory() as tmpdir:
        tmpdir = Path(tmpdir)

        # Export base image to OCI archive
        base_tar = tmpdir / "base.tar"
        export_result = run_ctr(
            ["images", "export", str(base_tar), base_image],
            timeout=300
        )

        if export_result.returncode != 0:
            print(f"  Failed to export base: {export_result.stderr}")
            run_ctr(["containers", "delete", container_id])
            return False

        # Create a layer tar with project files
        layer_tar = tmpdir / "layer.tar"
        tar_cmd = ["tar", "cf", str(layer_tar), "-C", str(project_path), "."]
        tar_result = subprocess.run(tar_cmd, capture_output=True, text=True)

        if tar_result.returncode != 0:
            print(f"  Failed to create layer: {tar_result.stderr}")
            run_ctr(["containers", "delete", container_id])
            return False

        layer_size = layer_tar.stat().st_size / (1024 * 1024)
        print(f"  Project layer: {layer_size:.1f} MB")

        # Tag the base image as our new image reference
        # (This is a simplified approach - creates a reference)
        tag_result = run_ctr(["images", "tag", base_image, image_ref])
        if tag_result.returncode != 0:
            # Image ref might already exist, remove and retry
            run_ctr(["images", "remove", image_ref])
            tag_result = run_ctr(["images", "tag", base_image, image_ref])

    # Clean up build container
    run_ctr(["containers", "delete", container_id])

    if tag_result.returncode == 0:
        print(f"  Image created: {image_ref}")
        return True

    print(f"  Failed to create image: {tag_result.stderr}")
    return False

build_project

build_project(project_path: Path, version: str, image_name: Optional[str] = None, platform: Optional[str] = None) -> BuildResult

Build a project into a containerd image.

Source code in Tools/builders/containerd_builder.py
def build_project(
    project_path: Path,
    version: str,
    image_name: Optional[str] = None,
    platform: Optional[str] = None
) -> BuildResult:
    """Build a project into a containerd image."""
    import time
    start_time = time.time()

    project_path = Path(project_path).resolve()
    project_name = project_path.name

    if not version.startswith("v"):
        version = f"v{version}"

    # Image reference
    if image_name:
        image_ref = f"{image_name}:{version}"
    else:
        image_ref = f"drydock/{project_name}:{version}"

    # Log setup
    log_dir = project_path / ".drydock" / "logs"
    log_dir.mkdir(parents=True, exist_ok=True)
    log_path = log_dir / f"containerd-build-{datetime.now().strftime('%Y%m%d-%H%M%S')}.log"

    print(f"Building: {project_path}")
    print(f"Image:    {image_ref}")

    # Detect platform if not specified
    if not platform:
        detection = detect_platform(project_path)
        if detection.get("platforms"):
            platform = detection["platforms"][0]["platform"]
            print(f"Platform: {platform} (auto-detected)")
        else:
            platform = "unknown"
            print("Platform: unknown (no markers found)")

    # Check for existing Dockerfile
    dockerfile = project_path / "Dockerfile"
    generated_dockerfile = False

    if not dockerfile.exists():
        print("  Generating Dockerfile...")
        content = generate_dockerfile_content(project_path, platform)
        dockerfile.write_text(content)
        generated_dockerfile = True

    # Try BuildKit first, fallback to ctr approach
    success = False
    if check_buildkit():
        print("  Building with BuildKit...")
        success = build_with_buildkit(project_path, image_ref, dockerfile)

    if not success:
        print("  Building with ctr (base image + layer)...")
        base_image = get_base_image(platform)
        success = build_with_ctr(project_path, image_ref, base_image, platform)

    # Clean up generated Dockerfile
    if generated_dockerfile and dockerfile.exists():
        dockerfile.unlink()

    duration = time.time() - start_time

    # Write build log
    log_content = (
        f"Build: {project_name} {version}\n"
        f"Image: {image_ref}\n"
        f"Platform: {platform}\n"
        f"Duration: {duration:.1f}s\n"
        f"Success: {success}\n"
        f"Method: {'buildkit' if check_buildkit() else 'ctr'}\n"
        f"Timestamp: {datetime.utcnow().isoformat()}Z\n"
    )
    log_path.write_text(log_content)

    return BuildResult(
        success=success,
        project_path=str(project_path),
        image_ref=image_ref,
        platform=platform,
        version=version,
        duration_seconds=duration,
        log_path=str(log_path),
        error=None if success else "Build failed",
        manifest={
            "platform": platform,
            "image_ref": image_ref,
            "version": version,
        }
    )

parse_toml

parse_toml(path: Path) -> dict

Simple TOML parser for manifest files.

Source code in Tools/builders/containerd_builder.py
def parse_toml(path: Path) -> dict:
    """Simple TOML parser for manifest files."""
    data = {}
    current_section = None

    try:
        with open(path) as f:
            for line in f:
                line = line.strip()
                if not line or line.startswith("#"):
                    continue
                if line.startswith("[") and line.endswith("]"):
                    current_section = line[1:-1]
                    if current_section not in data:
                        data[current_section] = {}
                    continue
                if "=" in line:
                    key, value = line.split("=", 1)
                    key = key.strip()
                    value = value.strip().strip('"').strip("'")
                    if current_section:
                        data[current_section][key] = value
                    else:
                        data[key] = value
    except Exception:
        pass

    return data

write_toml

write_toml(path: Path, data: dict)

Write data to TOML file.

Source code in Tools/builders/containerd_builder.py
def write_toml(path: Path, data: dict):
    """Write data to TOML file."""
    lines = []

    for key, value in data.items():
        if not isinstance(value, dict):
            if isinstance(value, bool):
                lines.append(f"{key} = {str(value).lower()}")
            elif isinstance(value, (int, float)):
                lines.append(f"{key} = {value}")
            else:
                lines.append(f'{key} = "{value}"')

    if lines:
        lines.append("")

    for section, values in data.items():
        if isinstance(values, dict):
            lines.append(f"[{section}]")
            for key, value in values.items():
                if isinstance(value, bool):
                    lines.append(f"{key} = {str(value).lower()}")
                elif isinstance(value, (int, float)):
                    lines.append(f"{key} = {value}")
                elif isinstance(value, list):
                    items = ", ".join(f'"{v}"' for v in value)
                    lines.append(f"{key} = [{items}]")
                else:
                    lines.append(f'{key} = "{value}"')
            lines.append("")

    path.write_text("\n".join(lines))

release_project

release_project(project_path: Path, version: str, platform: Optional[str] = None) -> ReleaseResult

Build and stage a project to Release/.

Source code in Tools/builders/containerd_builder.py
def release_project(
    project_path: Path,
    version: str,
    platform: Optional[str] = None
) -> ReleaseResult:
    """Build and stage a project to Release/."""
    project_path = Path(project_path).resolve()
    project_name = project_path.name

    if not version.startswith("v"):
        version = f"v{version}"

    release_path = RELEASE_DIR / project_name

    print(f"\n{'='*60}")
    print(f"RELEASING {project_name} {version} (containerd)")
    print(f"{'='*60}\n")

    # Clean previous release
    if release_path.exists():
        print(f"Cleaning previous release: {release_path}")
        shutil.rmtree(release_path)

    release_path.mkdir(parents=True)

    # Build image
    print("\n[1/4] Building with containerd...")
    build_result = build_project(
        project_path, version,
        platform=platform
    )

    if not build_result.success:
        return ReleaseResult(
            success=False,
            project_name=project_name,
            version=version,
            release_path=str(release_path),
            build_result=build_result,
            error="Build failed"
        )

    # Export image as OCI archive for portability
    print("\n[2/4] Exporting OCI archive...")
    archive_path = release_path / f"{project_name}-{version}.tar"
    export_image(build_result.image_ref, archive_path)

    # Generate manifest
    print("\n[3/4] Generating manifest...")
    manifest = {
        "release": {
            "project": project_name,
            "version": version,
            "date": datetime.utcnow().isoformat() + "Z",
            "build_id": datetime.now().strftime("%Y%m%d-%H%M%S"),
            "builder": "containerd",
        },
        "build": {
            "platform": build_result.platform or "unknown",
            "image_ref": build_result.image_ref,
            "duration_seconds": round(build_result.duration_seconds, 2),
            "method": "buildkit" if check_buildkit() else "ctr",
        },
        "artifacts": {
            "oci_archive": archive_path.name,
            "image_ref": build_result.image_ref,
        },
        "provenance": {
            "source_project": str(project_path),
            "drydock_root": str(DRYDOCK_ROOT),
            "containerd_namespace": CONTAINERD_NAMESPACE,
        },
    }
    write_toml(release_path / "drydock-manifest.toml", manifest)

    # Generate release notes
    print("\n[4/4] Generating release notes...")
    notes = f"""# Release Notes: {project_name} {version}

**Release Date:** {datetime.now().strftime('%Y-%m-%d')}
**Builder:** containerd (Docker-free)
**Platform:** {build_result.platform or 'Unknown'}
**Image:** `{build_result.image_ref}`

## Artifacts

- OCI Archive: `{archive_path.name}`
- Image Ref: `{build_result.image_ref}`

## Deployment

### Using containerd (ctr)
```bash
# Import on target host
sudo ctr -n drydock images import {archive_path.name}

# Run
sudo ctr -n drydock run {build_result.image_ref} {project_name}
```

### Using nerdctl (if available)
```bash
nerdctl load < {archive_path.name}
nerdctl run -d --name {project_name} {build_result.image_ref}
```

### Air-gapped transfer
```bash
# Copy archive to target
scp {archive_path.name} target-host:/tmp/
# Import on target
ssh target-host 'sudo ctr -n drydock images import /tmp/{archive_path.name}'
```

## What's New

- {{Describe changes}}

---

*Built with Drydock containerd_builder.py*
"""
    (release_path / "release-notes.md").write_text(notes)

    print(f"\n{'='*60}")
    print(f"RELEASE STAGED: {release_path}")
    print(f"{'='*60}")
    print(f"\nArtifacts:")
    print(f"  Archive:  {archive_path}")
    print(f"  Image:    {build_result.image_ref}")
    print(f"  Manifest: {release_path / 'drydock-manifest.toml'}")
    print(f"\nNext: Ship with:")
    print(f"  python containerd_builder.py ship {release_path} --registry <url>")

    return ReleaseResult(
        success=True,
        project_name=project_name,
        version=version,
        release_path=str(release_path),
        build_result=build_result,
    )

ship_to_ocean

ship_to_ocean(release_path: Path, registry: str, tag: Optional[str] = None) -> ShipResult

Ship a release to Ocean (production).

Source code in Tools/builders/containerd_builder.py
def ship_to_ocean(
    release_path: Path,
    registry: str,
    tag: Optional[str] = None
) -> ShipResult:
    """Ship a release to Ocean (production)."""
    release_path = Path(release_path).resolve()

    # Read manifest
    manifest_path = release_path / "drydock-manifest.toml"
    if not manifest_path.exists():
        return ShipResult(
            success=False,
            project_name="unknown",
            version="unknown",
            image_ref="",
            registry=registry,
            ocean_path="",
            error=f"No drydock-manifest.toml in {release_path}",
        )

    manifest = parse_toml(manifest_path)
    project_name = manifest.get("release", {}).get("project", release_path.name)
    version = manifest.get("release", {}).get("version", "unknown")
    local_image = manifest.get("artifacts", {}).get("image_ref", "")

    if tag:
        version = tag if tag.startswith("v") else f"v{tag}"

    registry_image = f"{registry}/{project_name}:{version}"

    print(f"\n{'='*60}")
    print(f"SHIPPING {project_name} {version}")
    print(f"{'='*60}\n")

    # Tag for registry
    print(f"[1/4] Tagging: {local_image}{registry_image}")
    if local_image and local_image != registry_image:
        tag_image(local_image, registry_image)

    # Push to registry
    print(f"\n[2/4] Pushing to: {registry}")
    push_ok = push_image(registry_image)
    if not push_ok:
        print("  Warning: Push failed (registry may be unavailable)")
        print("  Continuing with local-only ship...")

    # Update Ocean
    print(f"\n[3/4] Updating Ocean registry...")
    ocean_path = update_ocean(project_name, version, registry_image, manifest)
    print(f"  Ocean: {ocean_path}")

    # Copy OCI archive to Ocean for air-gapped recovery
    print(f"\n[4/4] Archiving OCI bundle...")
    archive_name = f"{project_name}-{version}.tar"
    source_archive = release_path / archive_name
    ocean_current = ocean_path / "current"

    if source_archive.exists():
        target_archive = ocean_current / archive_name
        shutil.copy2(source_archive, target_archive)
        print(f"  Archive: {target_archive}")

    print(f"\n{'='*60}")
    print(f"SHIPPED TO OCEAN")
    print(f"{'='*60}")
    print(f"\n  Image:    {registry_image}")
    print(f"  Ocean:    {ocean_path}")
    print(f"  Version:  {version}")

    return ShipResult(
        success=True,
        project_name=project_name,
        version=version,
        image_ref=registry_image,
        registry=registry,
        ocean_path=str(ocean_path),
    )

update_ocean

update_ocean(project_name: str, version: str, image_ref: str, manifest: dict) -> Path

Update Ocean with new deployment state.

Source code in Tools/builders/containerd_builder.py
def update_ocean(
    project_name: str,
    version: str,
    image_ref: str,
    manifest: dict
) -> Path:
    """Update Ocean with new deployment state."""
    ocean_project = OCEAN_DIR / project_name
    current_dir = ocean_project / "current"
    history_dir = ocean_project / "history"

    ocean_project.mkdir(parents=True, exist_ok=True)
    history_dir.mkdir(exist_ok=True)

    # Archive current to history
    if current_dir.exists():
        version_file = current_dir / "version.txt"
        if version_file.exists():
            old_version = version_file.read_text().strip()
            old_history = history_dir / old_version
            if not old_history.exists():
                print(f"  Archiving {old_version} to history/")
                shutil.copytree(current_dir, old_history)
        shutil.rmtree(current_dir)

    current_dir.mkdir()

    # Write current state
    (current_dir / "registry.txt").write_text(image_ref)
    (current_dir / "version.txt").write_text(version)
    (current_dir / "builder.txt").write_text("containerd")
    (current_dir / "deployed.txt").write_text(
        f"timestamp: {datetime.utcnow().isoformat()}Z\n"
        f"build_id: {manifest.get('release', {}).get('build_id', 'unknown')}\n"
        f"platform: {manifest.get('build', {}).get('platform', 'unknown')}\n"
        f"builder: containerd\n"
        f"method: {manifest.get('build', {}).get('method', 'ctr')}\n"
    )

    # Generate containerd-native run script
    (current_dir / "run.sh").write_text(f"""#!/bin/bash
# Run {project_name} {version} via containerd
set -e

IMAGE="{image_ref}"
NAME="{project_name}"

# Import from archive if image not present
if ! sudo ctr -n drydock images check "$IMAGE" >/dev/null 2>&1; then
    ARCHIVE="{project_name}-{version}.tar"
    if [ -f "$ARCHIVE" ]; then
        echo "Importing from archive..."
        sudo ctr -n drydock images import "$ARCHIVE"
    else
        echo "Pulling from registry..."
        sudo ctr -n drydock images pull "$IMAGE"
    fi
fi

# Stop existing instance
sudo ctr -n drydock tasks kill "$NAME" 2>/dev/null || true
sudo ctr -n drydock containers delete "$NAME" 2>/dev/null || true

# Run
echo "Starting $NAME ($IMAGE)..."
sudo ctr -n drydock run -d --net-host "$IMAGE" "$NAME"
echo "Running: $NAME"
""")
    os.chmod(current_dir / "run.sh", 0o755)

    return ocean_project

rollback

rollback(ocean_path: Path, target_version: str) -> bool

Rollback to a previous version.

Source code in Tools/builders/containerd_builder.py
def rollback(ocean_path: Path, target_version: str) -> bool:
    """Rollback to a previous version."""
    ocean_path = Path(ocean_path).resolve()
    project_name = ocean_path.name

    current_dir = ocean_path / "current"
    history_dir = ocean_path / "history"

    if not target_version.startswith("v"):
        target_version = f"v{target_version}"

    target_dir = history_dir / target_version

    if not target_dir.exists():
        print(f"Error: Version {target_version} not found in history/")
        if history_dir.exists():
            print("Available versions:")
            for v in sorted(history_dir.iterdir()):
                print(f"  - {v.name}")
        return False

    print(f"\n{'='*60}")
    print(f"ROLLING BACK {project_name} to {target_version}")
    print(f"{'='*60}\n")

    # Archive current
    current_version = "unknown"
    if (current_dir / "version.txt").exists():
        current_version = (current_dir / "version.txt").read_text().strip()

    if current_dir.exists() and current_version != "unknown":
        archive_dir = history_dir / current_version
        if not archive_dir.exists():
            print(f"[1/3] Archiving current ({current_version}) to history/")
            shutil.copytree(current_dir, archive_dir)
        shutil.rmtree(current_dir)

    # Restore target
    print(f"[2/3] Restoring {target_version} to current/")
    shutil.copytree(target_dir, current_dir)

    # Update rollback metadata
    deployed_path = current_dir / "deployed.txt"
    content = deployed_path.read_text() if deployed_path.exists() else ""
    content += f"\nrollback_timestamp: {datetime.utcnow().isoformat()}Z"
    content += f"\nrolled_back_from: {current_version}"
    deployed_path.write_text(content)

    # Re-import image if archive exists
    archive_name = f"{project_name}-{target_version}.tar"
    archive_path = current_dir / archive_name
    if archive_path.exists():
        print(f"[3/3] Re-importing image from archive...")
        import_image(archive_path)
    else:
        print(f"[3/3] No archive found (pull from registry if needed)")

    image = (current_dir / "registry.txt").read_text().strip() if (current_dir / "registry.txt").exists() else "?"

    print(f"\n{'='*60}")
    print(f"ROLLBACK COMPLETE")
    print(f"{'='*60}")
    print(f"\n  From:  {current_version}")
    print(f"  To:    {target_version}")
    print(f"  Image: {image}")
    print(f"\nTo run:")
    print(f"  bash {current_dir}/run.sh")

    return True