[1]:
# Install in Google Colab
try:
    import google.colab
    !pip install -q -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ quantem-widget
except ImportError:
    pass  # Not in Colab, skip
[2]:
try:
    %load_ext autoreload
    %autoreload 2
    %env ANYWIDGET_HMR=1
except Exception:
    pass  # autoreload unavailable (Colab Python 3.12+)
env: ANYWIDGET_HMR=1

Open In Colab # Mark2D — Quick Demo Pick atomic column positions on a HAADF-STEM image with a hexagonal lattice. Click on bright atom columns to select up to 5 positions.

[3]:
import numpy as np
import torch
import quantem.widget
from quantem.widget import Mark2D
device = torch.device("mps" if torch.backends.mps.is_available() else "cuda" if torch.cuda.is_available() else "cpu")
def make_haadf_stem(size=256, spacing=18, sigma=2.8):
    """HAADF-STEM image with atomic columns on a hexagonal lattice."""
    coords = torch.stack(torch.meshgrid(torch.arange(size, device=device, dtype=torch.float32),
                                        torch.arange(size, device=device, dtype=torch.float32), indexing="ij"), dim=-1)
    y = coords[..., 0]  # (size, size)
    x = coords[..., 1]  # (size, size)
    img = torch.zeros(size, size, device=device, dtype=torch.float32)
    # Precompute all lattice positions
    a1 = torch.tensor([spacing, 0.0], device=device)
    a2 = torch.tensor([spacing * 0.5, spacing * (3**0.5) / 2], device=device)
    i_range = torch.arange(-1, size // spacing + 2, device=device, dtype=torch.float32)
    j_range = torch.arange(-1, size // spacing + 2, device=device, dtype=torch.float32)
    ii, jj = torch.meshgrid(i_range, j_range, indexing="ij")
    ii = ii.reshape(-1)
    jj = jj.reshape(-1)
    cx = ii * a1[0] + jj * a2[0]  # (N,)
    cy = ii * a1[1] + jj * a2[1]  # (N,)
    # Filter positions within bounds
    mask = (cx > -spacing) & (cx < size + spacing) & (cy > -spacing) & (cy < size + spacing)
    cx = cx[mask]
    cy = cy[mask]
    ii_filt = ii[mask]
    jj_filt = jj[mask]
    # Intensity variation (like mixed Z columns)
    intensity = 0.7 + 0.3 * (((ii_filt + jj_filt) % 3) == 0).float()
    # Vectorized Gaussian: (N, size, size) would be too large, batch over positions
    for k in range(len(cx)):
        img += intensity[k] * torch.exp(-((x - cx[k])**2 + (y - cy[k])**2) / (2 * sigma**2))
    img_np = img.cpu().numpy()
    # Add background and noise in NumPy (Poisson/normal noise unreliable on MPS)
    img_np += np.random.normal(0.08, 0.015, (size, size)).astype(np.float32)
    scan_noise = np.random.normal(0, 0.01, (size, 1)).astype(np.float32) * np.ones((1, size), dtype=np.float32)
    img_np += scan_noise
    return np.clip(img_np, 0, None).astype(np.float32)
haadf = make_haadf_stem()
print(f"Generator ready (device={device})")
w = Mark2D(haadf, scale=1.0, max_points=5)
w
print(f"quantem.widget {quantem.widget.__version__}")
Generator ready (device=mps)
quantem.widget 0.4.0a3

Inspect widget state#

After clicking on atom columns above, run the cell below to see a full summary of the widget state — image info, placed points with coordinates, display settings.

[4]:
w.summary()
Mark2D
════════════════════════════════
Image:    256×256
Data:     min=0.01679  max=1.118  mean=0.1897  dtype=float32
Display:  gray | auto contrast | linear
Points:   0/5
Marker:   circle red  size=12px