Open In Colab # Show4D — All Features Comprehensive demo of every Show4D feature using a synthetic nanoparticle diffraction dataset (128×128 scan, 128×128 detector). ~200 crystalline particles with 10 distinct zone-axis orientations scattered on an amorphous substrate. Click on a nanoparticle to see its Bragg spots, Kikuchi bands, and HOLZ ring. Click on the background to see diffuse amorphous rings.

[1]:
# Install in Google Colab
try:
    import google.colab
    !pip install -q -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ quantem-widget
except ImportError:
    pass  # Not in Colab, skip
[2]:
try:
    %load_ext autoreload
    %autoreload 2
    %env ANYWIDGET_HMR=1
except Exception:
    pass  # autoreload unavailable (Colab Python 3.12+)
env: ANYWIDGET_HMR=1

1. Synthetic Nanoparticle Dataset#

~200 crystalline particles with 10 distinct zone-axis orientations on an amorphous substrate (128×128 scan, 128×128 detector). GPU-accelerated template computation via PyTorch.

  • Crystalline particles: Bragg spots with FCC selection rules, Kikuchi bands, HOLZ ring

  • Amorphous background: diffuse radial rings, no sharp spots

  • Particle sizes follow log-normal distribution (2-15 px radius)

  • Density gradient across the field of view

[3]:
import math
import numpy as np
import torch
import quantem.widget
from quantem.widget import Show4D
device = torch.device(
    "mps" if torch.backends.mps.is_available() else
    "cuda" if torch.cuda.is_available() else "cpu"
)
print(f"Using device: {device}")
def make_nanoparticle_sample(nav=128, det=128, n_particles=200, n_orientations=10):
    """Nanoparticle sample: crystalline particles on amorphous substrate.
    GPU-accelerated template computation via PyTorch, batched assignment."""
    rng = np.random.default_rng(42)
    c = det // 2
    # Detector grid on GPU
    yy, xx = torch.meshgrid(
        torch.arange(det, device=device, dtype=torch.float32),
        torch.arange(det, device=device, dtype=torch.float32), indexing="ij",
    )
    r = torch.sqrt((xx - c) ** 2 + (yy - c) ** 2)
    # Build particle map on CPU (small, integer operations)
    particle_map = np.full((nav, nav), -1, dtype=int)
    radii = rng.lognormal(mean=1.5, sigma=0.5, size=n_particles).clip(2, 15)
    centers_x = rng.beta(2, 1.2, n_particles) * (nav - 4) + 2
    centers_y = rng.uniform(2, nav - 2, n_particles)
    orientations_arr = rng.integers(0, n_orientations, size=n_particles)
    ii, jj = np.ogrid[:nav, :nav]
    for p in range(n_particles):
        mask = (ii - centers_x[p]) ** 2 + (jj - centers_y[p]) ** 2 <= radii[p] ** 2
        particle_map[mask] = orientations_arr[p]
    print(f"Particle coverage: {np.sum(particle_map >= 0) / particle_map.size:.1%}")
    # Per-orientation crystallographic properties
    grain_rot = rng.uniform(0, np.pi, n_orientations)
    grain_a1 = rng.uniform(11, 22, n_orientations)
    grain_a2 = rng.uniform(11, 22, n_orientations)
    grain_angle = rng.uniform(55, 125, n_orientations)
    # Precompute amorphous pattern on GPU → CPU
    amorphous_t = (0.02 * torch.exp(-r / 45) + 0.12 * torch.exp(-((r - 15) ** 2) / 50)
                 + 0.06 * torch.exp(-((r - 32) ** 2) / 80) + 0.03 * torch.exp(-((r - 48) ** 2) / 100))
    amorphous_np = amorphous_t.cpu().numpy().astype(np.float32)
    # Precompute per-orientation diffraction templates on GPU → CPU
    templates = np.zeros((n_orientations, det, det), dtype=np.float32)
    for o in range(n_orientations):
        rot = grain_rot[o]
        a1m, a2m = grain_a1[o], grain_a2[o]
        angle = np.radians(grain_angle[o])
        a1x, a1y = a1m * np.cos(rot), a1m * np.sin(rot)
        a2x, a2y = a2m * np.cos(rot + angle), a2m * np.sin(rot + angle)
        dp = 0.03 * torch.exp(-r / 50)
        dp = dp + torch.clamp(1.0 - torch.clamp(r - 7, min=0) / 1.5, min=0, max=1)
        for h in range(-4, 5):
            for k in range(-4, 5):
                if h == 0 and k == 0:
                    continue
                sx = c + h * a1x + k * a2x
                sy = c + h * a1y + k * a2y
                if not (-5 < sx < det + 5 and -5 < sy < det + 5):
                    continue
                f = 0.6 if (h + k) % 2 == 0 else 0.07
                g_sq = (sx - c) ** 2 + (sy - c) ** 2
                dp = dp + f * math.exp(-g_sq / 5500) * torch.exp(-((xx - sx) ** 2 + (yy - sy) ** 2) / 6.5)
        for dh, dk, s in [(1, 0, 0.018), (0, 1, 0.014), (1, 1, 0.009)]:
            gx = dh * a1x + dk * a2x
            gy = dh * a1y + dk * a2y
            g_len = math.sqrt(gx ** 2 + gy ** 2)
            if g_len < 1:
                continue
            perp = ((xx - c) * (-gy) + (yy - c) * gx) / g_len
            band = torch.exp(-((perp - g_len / 2) ** 2) / 16) + torch.exp(-((perp + g_len / 2) ** 2) / 16)
            dp = dp + s * band * torch.exp(-r / 55)
        dp = dp + 0.02 * torch.exp(-((r - 50) ** 2) / 4.5)
        templates[o] = dp.cpu().numpy()
    # Allocate output on CPU (1 GB)
    data = np.zeros((nav, nav, det, det), dtype=np.float32)
    BATCH = 2048
    # Amorphous positions (batched)
    amorphous_idx = np.argwhere(particle_map == -1)
    for start in range(0, len(amorphous_idx), BATCH):
        idx = amorphous_idx[start:start + BATCH]
        n = len(idx)
        batch = np.empty((n, det, det), dtype=np.float32)
        batch[:] = amorphous_np
        batch += 0.008 * rng.standard_normal((n, det, det)).astype(np.float32)
        np.maximum(batch, 0, out=batch)
        data[idx[:, 0], idx[:, 1]] = rng.poisson(np.clip(batch * 300, 0, 1e6)).astype(np.float32) / 300
    # Crystalline positions (batched per orientation)
    for o in range(n_orientations):
        o_idx = np.argwhere(particle_map == o)
        if len(o_idx) == 0:
            continue
        t_vals = (0.5 + 0.5 * rng.random(len(o_idx))).astype(np.float32)
        for start in range(0, len(o_idx), BATCH):
            idx = o_idx[start:start + BATCH]
            t_batch = t_vals[start:start + BATCH]
            scaled = templates[o][None, :, :] * t_batch[:, None, None]
            np.maximum(scaled, 0, out=scaled)
            data[idx[:, 0], idx[:, 1]] = rng.poisson(np.clip(scaled * 400, 0, 1e6)).astype(np.float32) / 400
    return data, particle_map
data, pmap = make_nanoparticle_sample()
print(f"Shape: {data.shape}, dtype: {data.dtype}")
print(f"Range: [{data.min():.3f}, {data.max():.3f}]")
print(f"Memory: {data.nbytes / 1e6:.0f} MB")
print(f"Orientations: {len(np.unique(pmap[pmap >= 0]))}")
print(f"quantem.widget {quantem.widget.__version__}")
Using device: mps
Particle coverage: 57.9%
Shape: (128, 128, 128, 128), dtype: float32
Range: [0.000, 1.237]
Memory: 1074 MB
Orientations: 10
quantem.widget 0.4.0a3

2. Basic Viewer#

Default view: navigation image (mean of all diffraction patterns) on the left, diffraction pattern at the selected position on the right. Click on a nanoparticle to see its zone-axis pattern. Click on the background to see diffuse amorphous rings.

[4]:
w_basic = Show4D(
    data,
    title="Nanoparticle Diffraction",
    nav_pixel_size=2.39,
    nav_pixel_unit="\u00c5",
    sig_pixel_size=0.46,
    sig_pixel_unit="mrad",
)
w_basic
[4]:

3. Custom Navigation Image#

Override the default mean image with a custom navigation image. Here we use the standard deviation across diffraction patterns, which highlights nanoparticle positions (high structural variation) vs the uniform amorphous background.

[5]:
# Standard deviation across detector — highlights particle locations
nav_std = data.std(axis=(2, 3))
print(f"Nav image shape: {nav_std.shape}")
print(f"Nav std range: [{nav_std.min():.4f}, {nav_std.max():.4f}]")
w_nav = Show4D(
    data, nav_image=nav_std,
    title="Custom Nav (Std Dev)",
    nav_pixel_size=2.39, nav_pixel_unit="\u00c5",
    sig_pixel_size=0.46, sig_pixel_unit="mrad",
)
w_nav
Nav image shape: (128, 128)
Nav std range: [0.0333, 0.1229]
[5]:

4. Programmatic Position Control#

Set the navigation position from Python. The signal panel updates automatically.

[6]:
w_pos = Show4D(
    data, title="Position Control",
    nav_pixel_size=2.39, nav_pixel_unit="\u00c5",
    sig_pixel_size=0.46, sig_pixel_unit="mrad",
)
# Set position using the property
w_pos.position = (0, 0)
print(f"Position: {w_pos.position}")
print(f"Nav shape: {w_pos.nav_shape}, Sig shape: {w_pos.sig_shape}")
w_pos
Position: (0, 0)
Nav shape: (128, 128), Sig shape: (128, 128)
[6]:
[7]:
# Move to a different position
w_pos.position = (96, 64)
print(f"Moved to: {w_pos.position}")
Moved to: (96, 64)

5. ROI Modes#

Draw ROI masks on the navigation image to average diffraction patterns from a region. Three shapes available: circle, square, and rectangle. When a ROI is active, the signal panel shows the averaged diffraction pattern across all positions within the ROI.

[8]:
# Circle ROI — select a cluster of nanoparticles
w_circle = Show4D(
    data, title="Circle ROI",
    nav_pixel_size=2.39, nav_pixel_unit="\u00c5",
    sig_pixel_size=0.46, sig_pixel_unit="mrad",
)
w_circle.roi_mode = "circle"
w_circle.roi_center_x = 64.0
w_circle.roi_center_y = 64.0
w_circle.roi_radius = 15.0
w_circle
[8]:
[9]:
# Square ROI
w_square = Show4D(
    data, title="Square ROI",
    nav_pixel_size=2.39, nav_pixel_unit="\u00c5",
    sig_pixel_size=0.46, sig_pixel_unit="mrad",
)
w_square.roi_mode = "square"
w_square.roi_center_x = 80.0
w_square.roi_center_y = 40.0
w_square.roi_radius = 12.0
w_square
[9]:
[10]:
# Rectangle ROI
w_rect = Show4D(
    data, title="Rectangle ROI",
    nav_pixel_size=2.39, nav_pixel_unit="\u00c5",
    sig_pixel_size=0.46, sig_pixel_unit="mrad",
)
w_rect.roi_mode = "rect"
w_rect.roi_center_x = 64.0
w_rect.roi_center_y = 64.0
w_rect.roi_width = 40.0
w_rect.roi_height = 16.0
w_rect
[10]:

6. Scale Bars#

Scale bars appear automatically when pixel sizes and units are provided. The nanoparticle dataset uses 2.39 Å/px (real-space) and 0.46 mrad/px (diffraction). Unit conversion (Å → nm) happens automatically at ≥10 Å.

[11]:
# Without calibration — shows pixel units
w_uncal = Show4D(data, title="Without Calibration")
print(f"Nav: {w_uncal.nav_pixel_size} {w_uncal.nav_pixel_unit}/px (uncalibrated)")
# With calibration — shows physical scale bars
w_cal = Show4D(
    data,
    title="With Calibration",
    nav_pixel_size=2.39,
    nav_pixel_unit="\u00c5",
    sig_pixel_size=0.46,
    sig_pixel_unit="mrad",
)
print(f"Nav: {w_cal.nav_pixel_size} {w_cal.nav_pixel_unit}/px")
print(f"Sig: {w_cal.sig_pixel_size} {w_cal.sig_pixel_unit}/px")
w_cal
Nav: 0.0 px/px (uncalibrated)
Nav: 2.39 Å/px
Sig: 0.46 mrad/px
[11]:

7. Non-Square Shapes#

Show4D handles non-square navigation arrays. The canvas automatically adjusts aspect ratio. Here we crop the nanoparticle dataset to a rectangular region.

[12]:
# Crop to rectangular nav region
data_rect = data[:96, :48].copy()
print(f"Non-square shape: {data_rect.shape}")
print(f"Memory: {data_rect.nbytes / 1e6:.1f} MB")
w_nonsquare = Show4D(
    data_rect,
    title="Non-Square (96\u00d748 nav, 128\u00d7128 det)",
    nav_pixel_size=2.39, nav_pixel_unit="\u00c5",
    sig_pixel_size=0.46, sig_pixel_unit="mrad",
)
w_nonsquare
Non-square shape: (96, 48, 128, 128)
Memory: 302.0 MB
[12]:

8. Data Range and Statistics#

Access computed statistics and data ranges for both panels.

[13]:
w_stats = Show4D(
    data, title="Statistics Demo",
    nav_pixel_size=2.39, nav_pixel_unit="\u00c5",
    sig_pixel_size=0.46, sig_pixel_unit="mrad",
)
print("Navigation image:")
print(f"  Data range: [{w_stats.nav_data_min:.4f}, {w_stats.nav_data_max:.4f}]")
print(f"  Stats [mean, min, max, std]: {[f'{s:.4f}' for s in w_stats.nav_stats]}")
print()
print("Signal (current frame):")
print(f"  Data range: [{w_stats.sig_data_min:.4f}, {w_stats.sig_data_max:.4f}]")
print(f"  Stats [mean, min, max, std]: {[f'{s:.4f}' for s in w_stats.sig_stats]}")
print()
print(f"repr: {w_stats}")
w_stats
Navigation image:
  Data range: [0.0210, 0.0493]
  Stats [mean, min, max, std]: ['0.0357', '0.0210', '0.0493', '0.0055']

Signal (current frame):
  Data range: [0.0000, 1.2375]
  Stats [mean, min, max, std]: ['0.0353', '0.0000', '0.9050', '0.0920']

repr: Show4D(shape=(128, 128, 128, 128), pos=(64, 64))
[13]:

9. State Persistence#

Save and restore all display settings — colormap, calibration, ROI config, snap settings — to a JSON file. Resume analysis after a kernel restart or share exact display state with a colleague.

[14]:
# Inspect current state
w_state = Show4D(
    data, title="Nanoparticle Analysis",
    cmap="viridis", log_scale=True,
    nav_pixel_size=2.39, nav_pixel_unit="\u00c5",
    sig_pixel_size=0.46, sig_pixel_unit="mrad",
)
w_state.summary()
w_state
Nanoparticle Analysis
════════════════════════════════
Nav:      128×128 (2.39 Å/px)
Signal:   128×128 (0.46 mrad/px)
Position: (64, 64)
Display:  viridis | auto contrast | log
[14]:
[15]:
# Save state to JSON
w_state.save("show4d_state.json")
print("Saved to show4d_state.json")
# Inspect the state dict
import json
print(json.dumps(w_state.state_dict(), indent=2))
Saved to show4d_state.json
{
  "title": "Nanoparticle Analysis",
  "cmap": "viridis",
  "log_scale": true,
  "auto_contrast": true,
  "show_stats": true,
  "show_controls": true,
  "show_fft": false,
  "disabled_tools": [],
  "hidden_tools": [],
  "percentile_low": 0.5,
  "percentile_high": 99.5,
  "nav_pixel_size": 2.39,
  "sig_pixel_size": 0.46,
  "nav_pixel_unit": "\u00c5",
  "sig_pixel_unit": "mrad",
  "roi_mode": "off",
  "roi_reduce": "mean",
  "roi_center_row": 64.0,
  "roi_center_col": 64.0,
  "roi_radius": 19.2,
  "roi_radius_inner": 9.6,
  "roi_width": 38.4,
  "roi_height": 19.2,
  "snap_enabled": false,
  "snap_radius": 5,
  "path_interval_ms": 100,
  "path_loop": true,
  "profile_line": [],
  "profile_width": 1
}
[16]:
# Restore from file — all settings come back
w_restored = Show4D(data, state="show4d_state.json")
print(f"Restored: cmap={w_restored.cmap}, log_scale={w_restored.log_scale}")
print(f"Nav pixel: {w_restored.nav_pixel_size} {w_restored.nav_pixel_unit}")
w_restored
Restored: cmap=viridis, log_scale=True
Nav pixel: 2.39 Å
[16]:
[17]:
# Clean up
from pathlib import Path
Path("show4d_state.json").unlink(missing_ok=True)