Skip to content

Tools: Read

Model reading and inspection tools.

build_model_summary(doc, state)

Build a model summary.

Source code in src/idfkit_mcp/tools/read.py
def build_model_summary(doc: IDFDocument[Literal[True]], state: ServerState) -> ModelSummary:
    """Build a model summary."""
    from idfkit import version_string

    groups: dict[str, dict[str, int]] = {}
    total_objects = 0
    zone_count = 0

    for obj_type, collection in doc.items():
        count = len(collection)
        total_objects += count
        if obj_type == "Zone":
            zone_count = count
        schema = state.schema
        obj_group = schema.get_group(obj_type) if schema else "Unknown"
        obj_group = obj_group or "Ungrouped"
        groups.setdefault(obj_group, {})[obj_type] = count

    return ModelSummary(
        version=version_string(doc.version),
        file_path=str(state.file_path) if state.file_path else None,
        total_objects=total_objects,
        zone_count=zone_count,
        groups={g: GroupSummary(count=sum(v.values()), types=v) for g, v in sorted(groups.items())},
    )

build_references(doc, name)

Build a bidirectional references result for a given object name.

Source code in src/idfkit_mcp/tools/read.py
def build_references(doc: IDFDocument[Literal[True]], name: str) -> ReferencesResult:
    """Build a bidirectional references result for a given object name."""
    referencing = doc.get_referencing(name)
    referenced_by = [{"object_type": obj.obj_type, "name": obj.name} for obj in referencing]

    references: list[str] = []
    target_obj = _find_object_by_name(doc, name)
    if target_obj is not None:
        refs = doc.get_references(target_obj)
        references = sorted(refs)

    return ReferencesResult.model_validate({
        "name": name,
        "referenced_by": referenced_by,
        "referenced_by_count": len(referenced_by),
        "references": references,
        "references_count": len(references),
    })

convert_osm_to_idf(osm_path, output_path, allow_newer_versions=True, overwrite=False)

Convert an OSM model to IDF and load it.

Source code in src/idfkit_mcp/tools/read.py
@tool(annotations=_LOAD)
def convert_osm_to_idf(
    osm_path: Annotated[str, Field(description="Source .osm path.")],
    output_path: Annotated[str, Field(description="Output .idf path.")],
    allow_newer_versions: Annotated[bool, Field(description="Allow newer OSM versions.")] = True,
    overwrite: Annotated[bool, Field(description="Overwrite existing output.")] = False,
) -> ConvertOsmResult:
    """Convert an OSM model to IDF and load it."""
    from pathlib import Path

    from idfkit import load_idf

    try:
        import openstudio  # type: ignore[import-untyped]
    except ImportError:
        raise ToolError(
            "OpenStudio SDK not available. "
            "Reinstall 'idfkit-mcp' in this environment, or use the Docker image where dependencies are preinstalled."
        ) from None
    openstudio = cast(Any, openstudio)

    input_path = Path(osm_path)
    out_path = Path(output_path)

    if input_path.suffix.lower() != ".osm":
        raise ToolError(f"Input file must have .osm extension: '{input_path}'.")
    if not input_path.exists():
        raise ToolError(f"Input OSM file not found: '{input_path}'.")
    if not input_path.is_file():
        raise ToolError(f"Input OSM path is not a file: '{input_path}'.")

    if out_path.suffix.lower() != ".idf":
        raise ToolError(f"Output file must have .idf extension: '{out_path}'.")
    if out_path.exists() and not overwrite:
        raise ToolError(f"Output file already exists: '{out_path}'. Set overwrite=True to replace it.")
    if not out_path.parent.exists():
        raise ToolError(f"Output directory does not exist: '{out_path.parent}'.")

    # OpenStudio's C++ ForwardTranslator writes warnings directly to fd 1
    # (C-level stdout), which corrupts the MCP stdio JSON-RPC stream.
    # Redirect fd 1 → fd 2 (stderr) during translation to keep the transport clean.
    import os

    saved_fd = os.dup(1)
    os.dup2(2, 1)
    try:
        version_translator = openstudio.osversion.VersionTranslator()
        version_translator.setAllowNewerVersions(allow_newer_versions)
        optional_model = version_translator.loadModel(openstudio.path(str(input_path)))
        if optional_model.empty():
            raise ToolError(f"Failed to load OSM model: '{input_path}'.")

        model = optional_model.get()
        forward_translator = openstudio.energyplus.ForwardTranslator()
        workspace = forward_translator.translateModel(model)

        saved = workspace.save(openstudio.path(str(out_path)), overwrite)
        if not saved:
            raise ToolError(f"Failed to save translated IDF to '{out_path}'.")
    finally:
        os.dup2(saved_fd, 1)
        os.close(saved_fd)

    doc = load_idf(str(out_path), strict=True)
    state = get_state()
    state.document = doc
    state.schema = doc.schema
    state.file_path = out_path
    state.simulation_result = None
    state.save_session()
    logger.info("Converted OSM %s -> %s", input_path, out_path)

    version_getter = getattr(openstudio, "openStudioVersion", None)
    openstudio_version = str(version_getter()) if callable(version_getter) else "unknown"

    summary = build_model_summary(doc, state)
    return ConvertOsmResult(
        **summary.model_dump(),
        status="converted",
        osm_path=str(input_path),
        output_path=str(out_path),
        openstudio_version=openstudio_version,
        allow_newer_versions=allow_newer_versions,
        translator_warnings_count=len(version_translator.warnings()) + len(forward_translator.warnings()),
        translator_errors_count=len(version_translator.errors()) + len(forward_translator.errors()),
    )

get_change_log(limit=20)

Return recent model mutation history for this session.

Records add, update, remove, rename, duplicate, load, and new-model operations in chronological order. Useful for auditing what the agent has changed and verifying that edits were applied as intended.

The log is in-memory only and resets when clear_session is called.

Source code in src/idfkit_mcp/tools/read.py
@tool(annotations=_READ_ONLY)
def get_change_log(
    limit: Annotated[int, Field(description="Maximum entries to return.")] = 20,
) -> GetChangeLogResult:
    """Return recent model mutation history for this session.

    Records add, update, remove, rename, duplicate, load, and new-model operations
    in chronological order. Useful for auditing what the agent has changed and
    verifying that edits were applied as intended.

    The log is in-memory only and resets when clear_session is called.
    """
    state = get_state()
    limit = min(limit, MAX_CHANGE_LOG)
    entries = state.change_log[-limit:]
    return GetChangeLogResult(
        entry_count=len(entries),
        entries=[ChangeLogEntry(**e) for e in entries],
    )

list_objects(object_type, limit=50)

List objects of a type with names and required fields.

Source code in src/idfkit_mcp/tools/read.py
@tool(annotations=_READ_ONLY)
def list_objects(
    object_type: Annotated[str, Field(description='EnergyPlus object type (e.g. "Zone").')],
    limit: Annotated[int, Field(description="Maximum objects to return.")] = 50,
) -> ListObjectsResult:
    """List objects of a type with names and required fields."""
    limit = min(limit, 200)

    state = get_state()
    doc = state.require_model()

    if object_type not in doc:
        raise ToolError(f"No objects of type '{object_type}' in the model.")

    collection = doc.get_collection(object_type)
    total = len(collection)
    objects = [serialize_object(obj, schema=state.schema, brief=True) for obj in list(collection)[:limit]]

    logger.debug("list_objects: type=%s total=%d returned=%d", object_type, total, len(objects))
    return ListObjectsResult(object_type=object_type, total=total, returned=len(objects), objects=objects)

load_model(file_path=None, upload_name=None, version=None)

Open an IDF or epJSON file as the active model.

Provide exactly one source: file_path for files on the server's disk, or upload_name to load a file the user dropped into the file_manager UI.

Source code in src/idfkit_mcp/tools/read.py
@tool(annotations=_LOAD)
def load_model(
    file_path: Annotated[
        str | None, Field(description="Server-local path to an IDF/epJSON file (stdio/local clients).")
    ] = None,
    upload_name: Annotated[
        str | None,
        Field(description="Name of a file uploaded via the file_manager UI tool (remote clients)."),
    ] = None,
    version: Annotated[str | None, Field(description='Version override as "X.Y.Z".')] = None,
) -> ModelSummary:
    """Open an IDF or epJSON file as the active model.

    Provide exactly one source: ``file_path`` for files on the server's disk, or
    ``upload_name`` to load a file the user dropped into the file_manager UI.
    """
    from pathlib import Path

    from idfkit import load_epjson, load_idf

    if (file_path is None) == (upload_name is None):
        raise ToolError("Provide exactly one of 'file_path' or 'upload_name'.")

    state = get_state()
    ver = None
    if version is not None:
        parts = version.split(".")
        ver = (int(parts[0]), int(parts[1]), int(parts[2]))

    if upload_name is not None:
        from idfkit_mcp.server import uploads
        from idfkit_mcp.state import session_uploads_dir

        try:
            data = uploads.get_bytes(upload_name, session_id=state.session_id)
        except KeyError as e:
            raise ToolError(str(e)) from e
        dest_dir = session_uploads_dir(state.session_id)
        dest_dir.mkdir(parents=True, exist_ok=True)
        path = dest_dir / upload_name
        path.write_bytes(data)
    else:
        path = Path(file_path)  # type: ignore[arg-type]

    if path.suffix.lower() in (".epjson", ".json"):
        doc = load_epjson(str(path), version=ver, strict=True)
    else:
        doc = load_idf(str(path), version=ver, strict=True)

    state.document = doc
    state.schema = doc.schema
    state.file_path = path
    state.simulation_result = None
    state.save_session()

    logger.info("Loaded model %s (version=%s, objects=%d)", path, doc.version, len(list(doc.all_objects)))
    return build_model_summary(doc, state)

search_objects(query, object_type=None, limit=20)

Find objects by name or field value substring match.

Source code in src/idfkit_mcp/tools/read.py
@tool(annotations=_READ_ONLY)
def search_objects(
    query: Annotated[str, Field(description="Case-insensitive substring match on name and string fields.")],
    object_type: Annotated[str | None, Field(description="Restrict search to a specific type.")] = None,
    limit: Annotated[int, Field(description="Maximum results to return.")] = 20,
) -> SearchObjectsResult:
    """Find objects by name or field value substring match."""
    limit = min(limit, 100)

    if not query.strip():
        return SearchObjectsResult.model_validate({"query": query, "count": 0, "matches": []})

    state = get_state()
    doc = state.require_model()
    query_lower = query.lower()

    matches: list[dict[str, str]] = []
    for obj in doc.all_objects:
        if object_type is not None and obj.obj_type != object_type:
            continue
        if _matches_query(obj, query_lower):
            matches.append({"object_type": obj.obj_type, "name": obj.name})
            if len(matches) >= limit:
                break

    logger.debug("search_objects: query=%r type=%s matched=%d", query, object_type, len(matches))
    return SearchObjectsResult.model_validate({"query": query, "count": len(matches), "matches": matches})