Saving results
Capture inference outputs as structured data or images so you can reuse them in downstream tools, dashboards, or datasets.
Estimated read time: 3 minutes
Saving results is part organization, part serialization. This page walks through exporting structured detections, writing overlays to disk, and handling batched outputs. Each section includes its own setup so you can copy and run examples independently.
Save structured detections as JSON
Convert inference data into plain Python dictionaries before serialization. This avoids issues with NumPy types.
Example
from pathlib import Path
import json
from degirum_tools import ModelSpec, remote_assets
model_spec = ModelSpec(
model_name="yolov8n_coco--640x640_quant_hailort_multidevice_1",
zoo_url="degirum/hailo",
inference_host_address="@local",
model_properties={"device_type": ["HAILORT/HAILO8L", "HAILORT/HAILO8"]},
)
model = model_spec.load_model()
result = model(remote_assets.three_persons)
output_dir = Path("saved-results")
output_dir.mkdir(parents=True, exist_ok=True)
def detection_to_dict(det):
return {
"label": det.get("label"),
"score": float(det.get("score", 0)),
"bbox": [float(x) for x in det.get("bbox", [])],
"category_id": det.get("category_id"),
}
json_path = output_dir / "three-persons.json"
with json_path.open("w", encoding="utf-8") as f:
json.dump([detection_to_dict(det) for det in result.results], f, indent=2)
print(f"Wrote {json_path}")Example output:
Wrote saved-results/three-persons.jsonNeed to capture metadata for compliance or analytics? Append additional keys (e.g., timestamps or camera IDs) before writing the JSON file.
Export detections as CSV
CSV exports integrate well with spreadsheets and BI tools. Flatten bounding boxes into separate columns for easier filtering.
Example
from pathlib import Path
import csv
from degirum_tools import ModelSpec, remote_assets
model_spec = ModelSpec(
model_name="yolov8n_coco--640x640_quant_hailort_multidevice_1",
zoo_url="degirum/hailo",
inference_host_address="@local",
model_properties={"device_type": ["HAILORT/HAILO8L", "HAILORT/HAILO8"]},
)
model = model_spec.load_model()
result = model(remote_assets.three_persons)
output_dir = Path("saved-results")
output_dir.mkdir(parents=True, exist_ok=True)
csv_path = output_dir / "three-persons.csv"
with csv_path.open("w", newline="", encoding="utf-8") as f:
writer = csv.DictWriter(
f,
fieldnames=[
"label",
"score",
"bbox_xmin",
"bbox_ymin",
"bbox_xmax",
"bbox_ymax",
],
)
writer.writeheader()
for det in result.results:
x_min, y_min, x_max, y_max = det.get("bbox", [None] * 4)
writer.writerow(
{
"label": det.get("label"),
"score": float(det.get("score", 0)),
"bbox_xmin": float(x_min) if x_min is not None else None,
"bbox_ymin": float(y_min) if y_min is not None else None,
"bbox_xmax": float(x_max) if x_max is not None else None,
"bbox_ymax": float(y_max) if y_max is not None else None,
}
)
print(f"Wrote {csv_path}")Example output:
Wrote saved-results/three-persons.csvSave overlay images
Use OpenCV to create PNGs you can share or archive. Swap in result.image to capture the pre-annotation frame.
Example
from pathlib import Path
import cv2
from degirum_tools import ModelSpec, remote_assets
model_spec = ModelSpec(
model_name="yolov8n_coco--640x640_quant_hailort_multidevice_1",
zoo_url="degirum/hailo",
inference_host_address="@local",
model_properties={"device_type": ["HAILORT/HAILO8L", "HAILORT/HAILO8"]},
)
model = model_spec.load_model()
result = model(remote_assets.three_persons)
output_dir = Path("saved-results")
output_dir.mkdir(parents=True, exist_ok=True)
overlay_path = output_dir / "three-persons-overlay.png"
cv2.imwrite(str(overlay_path), result.image_overlay)
print(f"Wrote {overlay_path}")Example output:
Wrote saved-results/three-persons-overlay.pngBatch saves with predict_dir
predict_dir yields (image_path, inference_results) tuples. Use the filenames to generate deterministic output names and reuse the JSON helper from above.
Example
from pathlib import Path
import json
import cv2
from degirum_tools import ModelSpec, predict_dir
model_spec = ModelSpec(
model_name="yolov8n_coco--640x640_quant_hailort_multidevice_1",
zoo_url="degirum/hailo",
inference_host_address="@local",
model_properties={"device_type": ["HAILORT/HAILO8L", "HAILORT/HAILO8"]},
)
model = model_spec.load_model()
output_dir = Path("saved-results")
output_dir.mkdir(parents=True, exist_ok=True)
def detection_to_dict(det):
return {
"label": det.get("label"),
"score": float(det.get("score", 0)),
"bbox": [float(x) for x in det.get("bbox", [])],
"category_id": det.get("category_id"),
}
input_dir = Path("/path/to/images") # replace with your folder
for image_path, inference_results in predict_dir(model, str(input_dir)):
stem = Path(image_path).stem
overlay_path = output_dir / f"{stem}-overlay.png"
cv2.imwrite(str(overlay_path), inference_results.image_overlay)
json_path = output_dir / f"{stem}.json"
with json_path.open("w", encoding="utf-8") as f:
json.dump(
[detection_to_dict(det) for det in inference_results.results], f
)
print(f"Wrote {overlay_path} and {json_path}")Example output:
Wrote saved-results/frame-001-overlay.png and saved-results/frame-001.json
Wrote saved-results/frame-002-overlay.png and saved-results/frame-002.jsonVerify filesystem permissions before running batch exports, and double-check available disk space when saving long video sequences.
Last updated
Was this helpful?

