24  Backend Abstraction

Let TensorWeaver run on CPU or GPU without code changes.

24.1 The Problem

Currently, TensorWeaver uses NumPy directly:

class Tensor:
    def __init__(self, data):
        self.data = np.array(data)  # Always NumPy

    def __add__(self, other):
        return Tensor(self.data + other.data)  # NumPy operation

To use GPU, we’d need to change every operation. Not ideal.

24.2 The Solution: Backend Abstraction

Create a backend interface:

# backend.py

class Backend:
    """Abstract backend for array operations."""

    def array(self, data, dtype=None):
        raise NotImplementedError

    def zeros(self, shape, dtype=None):
        raise NotImplementedError

    def ones(self, shape, dtype=None):
        raise NotImplementedError

    def randn(self, *shape):
        raise NotImplementedError

    def matmul(self, a, b):
        raise NotImplementedError

    def exp(self, x):
        raise NotImplementedError

    def log(self, x):
        raise NotImplementedError

    def sqrt(self, x):
        raise NotImplementedError

    def maximum(self, a, b):
        raise NotImplementedError

24.3 NumPy Backend

import numpy as np

class NumPyBackend(Backend):
    """CPU backend using NumPy."""

    def array(self, data, dtype=None):
        dtype = dtype or np.float32
        return np.array(data, dtype=dtype)

    def zeros(self, shape, dtype=None):
        dtype = dtype or np.float32
        return np.zeros(shape, dtype=dtype)

    def ones(self, shape, dtype=None):
        dtype = dtype or np.float32
        return np.ones(shape, dtype=dtype)

    def randn(self, *shape):
        return np.random.randn(*shape).astype(np.float32)

    def matmul(self, a, b):
        return a @ b

    def exp(self, x):
        return np.exp(x)

    def log(self, x):
        return np.log(x)

    def sqrt(self, x):
        return np.sqrt(x)

    def maximum(self, a, b):
        return np.maximum(a, b)

24.4 cuNumeric Backend

try:
    import cunumeric as cn
    CUNUMERIC_AVAILABLE = True
except ImportError:
    CUNUMERIC_AVAILABLE = False

class CuNumericBackend(Backend):
    """GPU backend using cuNumeric."""

    def __init__(self):
        if not CUNUMERIC_AVAILABLE:
            raise RuntimeError("cuNumeric not available")

    def array(self, data, dtype=None):
        dtype = dtype or cn.float32
        return cn.array(data, dtype=dtype)

    def zeros(self, shape, dtype=None):
        dtype = dtype or cn.float32
        return cn.zeros(shape, dtype=dtype)

    def ones(self, shape, dtype=None):
        dtype = dtype or cn.float32
        return cn.ones(shape, dtype=dtype)

    def randn(self, *shape):
        return cn.random.randn(*shape).astype(cn.float32)

    def matmul(self, a, b):
        return a @ b

    def exp(self, x):
        return cn.exp(x)

    def log(self, x):
        return cn.log(x)

    def sqrt(self, x):
        return cn.sqrt(x)

    def maximum(self, a, b):
        return cn.maximum(a, b)

24.5 Global Backend Selection

# Global backend instance
_backend = NumPyBackend()

def get_backend():
    """Get current backend."""
    return _backend

def set_backend(backend):
    """Set global backend."""
    global _backend
    _backend = backend

def use_gpu():
    """Switch to GPU backend."""
    set_backend(CuNumericBackend())

def use_cpu():
    """Switch to CPU backend."""
    set_backend(NumPyBackend())

24.6 Updated Tensor Class

class Tensor:
    def __init__(self, data, requires_grad=False):
        backend = get_backend()
        self.data = backend.array(data)
        self.requires_grad = requires_grad
        self.grad = None
        self.grad_fn = None
        self.parents = []

    def __add__(self, other):
        if not isinstance(other, Tensor):
            other = Tensor(other)
        result = Tensor(self.data + other.data)
        # ... gradient tracking ...
        return result

    def __matmul__(self, other):
        backend = get_backend()
        result = Tensor(backend.matmul(self.data, other.data))
        # ... gradient tracking ...
        return result

24.7 Using the Backend

import tensorweaver as tw

# Default: CPU
model = create_model()
train(model, data)  # Runs on CPU

# Switch to GPU
tw.use_gpu()
model = create_model()
train(model, data)  # Runs on GPU — same code!

# Switch back
tw.use_cpu()

24.8 Context Manager

from contextlib import contextmanager

@contextmanager
def device(backend_name):
    """Temporarily use a different backend."""
    old_backend = get_backend()
    try:
        if backend_name == 'gpu':
            set_backend(CuNumericBackend())
        elif backend_name == 'cpu':
            set_backend(NumPyBackend())
        yield
    finally:
        set_backend(old_backend)

# Usage
with device('gpu'):
    # This runs on GPU
    result = expensive_computation()

# Back to default (CPU)

24.9 Device Property

Track which device a tensor is on:

class Tensor:
    def __init__(self, data, requires_grad=False, device=None):
        self.device = device or get_backend()
        self.data = self.device.array(data)
        # ...

    def to(self, device):
        """Move tensor to another device."""
        if device == 'gpu':
            new_backend = CuNumericBackend()
        else:
            new_backend = NumPyBackend()

        # Convert data
        new_data = new_backend.array(self.data)
        return Tensor(new_data, requires_grad=self.requires_grad,
                      device=new_backend)

# Usage
x = Tensor([1, 2, 3])  # CPU
x_gpu = x.to('gpu')    # Move to GPU

24.10 Summary

  • Backend abstraction separates computation from implementation
  • NumPyBackend for CPU
  • CuNumericBackend for GPU
  • Same model code works on both
  • Switch with use_gpu() / use_cpu()

Next: Training on GPU.