Skip to content

Tools: Write

Model creation and editing tools.

add_object(object_type, name='', fields=None)

Add one object. Use batch_add_objects for multiple.

Source code in src/idfkit_mcp/tools/write.py
@tool(annotations=_MUTATE, output_schema=None)
def add_object(
    object_type: Annotated[str, Field(description='EnergyPlus object type (e.g. "Zone", "Material").')],
    name: Annotated[str, Field(description="Object name (empty for unnamed types).")] = "",
    fields: Annotated[dict[str, Any] | None, Field(description="Field values as {field_name: value}.")] = None,
) -> dict[str, Any]:
    """Add one object. Use batch_add_objects for multiple."""
    state = get_state()
    doc = state.require_model()
    kwargs = fields or {}
    obj = doc.add(object_type, name, **kwargs)
    logger.info("Added %s %r", object_type, name)
    logger.debug("add_object fields: %s", kwargs)
    return serialize_object(obj)

batch_add_objects(objects)

Add multiple objects in one call. Continues on errors.

Source code in src/idfkit_mcp/tools/write.py
@tool(annotations=_MUTATE)
def batch_add_objects(
    objects: Annotated[list[dict[str, Any]], Field(description="List of dicts with keys: object_type, name, fields.")],
) -> BatchAddResult:
    """Add multiple objects in one call. Continues on errors."""
    state = get_state()
    doc = state.require_model()

    results: list[dict[str, object]] = []
    success_count = 0
    error_count = 0

    for i, spec in enumerate(objects):
        try:
            obj_type = spec.get("object_type")
            if not obj_type:
                results.append({"index": i, "error": "Missing 'object_type'"})
                error_count += 1
                continue

            obj_name: str = spec.get("name", "")
            obj_fields: dict[str, Any] = spec.get("fields") or {}
            obj = doc.add(obj_type, obj_name, **obj_fields)
            results.append({"index": i, **serialize_object(obj, brief=True)})
            success_count += 1
        except Exception as e:
            results.append({"index": i, "error": str(e)})
            error_count += 1

    logger.info("Batch add: %d total, %d success, %d errors", len(objects), success_count, error_count)
    if error_count:
        logger.warning("batch_add_objects: %d/%d objects failed", error_count, len(objects))
    return BatchAddResult(total=len(objects), success=success_count, errors=error_count, results=results)

clear_session()

Reset model and simulation state so you can start fresh.

Unloads the current model, schema, simulation results, migration report, and weather file. Uploaded files are kept so the user can re-load them without re-uploading.

WARNING: Only call this when the user explicitly asks to start over. Do NOT call this to recover from tool errors — those errors are recoverable by retrying the failed tool or calling load_model again.

Source code in src/idfkit_mcp/tools/write.py
@tool(annotations=_DESTRUCTIVE)
def clear_session() -> ClearSessionResult:
    """Reset model and simulation state so you can start fresh.

    Unloads the current model, schema, simulation results, migration report,
    and weather file. Uploaded files are kept so the user can re-load them
    without re-uploading.

    WARNING: Only call this when the user explicitly asks to start over.
    Do NOT call this to recover from tool errors — those errors are
    recoverable by retrying the failed tool or calling load_model again.
    """
    state = get_state()
    state.clear_session()
    return ClearSessionResult(status="cleared")

duplicate_object(object_type, name, new_name)

Copy an object with a new name.

Source code in src/idfkit_mcp/tools/write.py
@tool(annotations=_MUTATE, output_schema=None)
def duplicate_object(
    object_type: Annotated[str, Field(description="EnergyPlus object type.")],
    name: Annotated[str, Field(description="Source object name.")],
    new_name: Annotated[str, Field(description="Name for the duplicate.")],
) -> dict[str, Any]:
    """Copy an object with a new name."""
    state = get_state()
    doc = state.require_model()
    source = doc.get_collection(object_type).get(name)
    if source is None:
        raise ToolError(f"Object '{name}' of type '{object_type}' not found.")

    obj = doc.copyidfobject(source, new_name=new_name)
    logger.info("Duplicated %s %r as %r", object_type, name, new_name)
    return serialize_object(obj)

new_model(version=None)

Create an empty model.

Source code in src/idfkit_mcp/tools/write.py
@tool(annotations=_MUTATE)
def new_model(
    version: Annotated[str | None, Field(description='EnergyPlus version as "X.Y.Z" (default: latest).')] = None,
) -> NewModelResult:
    """Create an empty model."""
    from idfkit import LATEST_VERSION, new_document, version_string

    ver = LATEST_VERSION
    if version is not None:
        parts = version.split(".")
        ver = (int(parts[0]), int(parts[1]), int(parts[2]))

    doc = new_document(version=ver, strict=True)
    state = get_state()
    state.document = doc
    state.schema = doc.schema
    state.file_path = None
    state.simulation_result = None

    logger.info("Created new model (version=%s)", version_string(ver))
    return NewModelResult(status="created", version=version_string(ver))

remove_object(object_type, name, force=False)

Delete an object. Blocked if referenced unless force=True.

Source code in src/idfkit_mcp/tools/write.py
@tool(annotations=_DESTRUCTIVE)
def remove_object(
    object_type: Annotated[str, Field(description="EnergyPlus object type.")],
    name: Annotated[str, Field(description="Object name.")],
    force: Annotated[bool, Field(description="Remove even if referenced by other objects.")] = False,
) -> RemoveObjectResult:
    """Delete an object. Blocked if referenced unless force=True."""
    state = get_state()
    doc = state.require_model()
    obj = doc.get_collection(object_type).get(name)
    if obj is None:
        raise ToolError(f"Object '{name}' of type '{object_type}' not found.")

    if not force:
        ref_name = obj.name or name
        referencing = doc.get_referencing(ref_name)
        if referencing:
            refs = [{"object_type": r.obj_type, "name": r.name} for r in referencing]
            raise ToolError(
                f"Object is referenced by other objects. Use force=True to remove anyway.\n{json.dumps(refs)}"
            )

    doc.removeidfobject(obj)
    logger.info("Removed %s %r", object_type, obj.name)
    return RemoveObjectResult(status="removed", object_type=object_type, name=obj.name)

rename_object(object_type, old_name, new_name)

Rename and auto-update all references.

Source code in src/idfkit_mcp/tools/write.py
@tool(annotations=_MUTATE)
def rename_object(
    object_type: Annotated[str, Field(description="EnergyPlus object type.")],
    old_name: Annotated[str, Field(description="Current object name.")],
    new_name: Annotated[str, Field(description="New object name.")],
) -> RenameObjectResult:
    """Rename and auto-update all references."""
    state = get_state()
    doc = state.require_model()

    referencing_before = doc.get_referencing(old_name)
    ref_count = len(referencing_before)

    doc.rename(object_type, old_name, new_name)
    logger.info("Renamed %s %r -> %r (%d references updated)", object_type, old_name, new_name, ref_count)

    return RenameObjectResult(
        status="renamed",
        object_type=object_type,
        old_name=old_name,
        new_name=new_name,
        references_updated=ref_count,
    )

save_model(file_path=None, output_format='idf', overwrite=False)

Write model to disk as IDF or epJSON.

When file_path is omitted the model is re-saved to its original load path. An explicit file_path must resolve within an allowed output directory (IDFKIT_MCP_OUTPUT_DIRS, defaults to CWD) and will not overwrite an existing file unless overwrite is True.

Source code in src/idfkit_mcp/tools/write.py
@tool(annotations=_SAVE)
def save_model(
    file_path: Annotated[str | None, Field(description="Output path (default: original load path).")] = None,
    output_format: Annotated[Literal["idf", "epjson"], Field(description="Output format.")] = "idf",
    overwrite: Annotated[bool, Field(description="Overwrite existing output.")] = False,
) -> SaveModelResult:
    """Write model to disk as IDF or epJSON.

    When *file_path* is omitted the model is re-saved to its original load
    path.  An explicit *file_path* must resolve within an allowed output
    directory (``IDFKIT_MCP_OUTPUT_DIRS``, defaults to CWD) and will not
    overwrite an existing file unless *overwrite* is ``True``.
    """
    from pathlib import Path

    from idfkit import write_epjson, write_idf

    from idfkit_mcp.tools._path_validation import validate_output_path

    state = get_state()
    doc = state.require_model()

    # When an explicit path is given, validate it stays within CWD.
    # Re-saving to the original load path (file_path=None) is always allowed.
    explicit_path = file_path is not None
    if explicit_path:
        path = validate_output_path(Path(file_path), label="Save path")
    elif state.file_path is not None:
        path = state.file_path
    else:
        raise ToolError("No file path specified and no original path available.")

    if explicit_path and path.exists() and not overwrite:
        raise ToolError(f"File already exists: '{path}'. Set overwrite=True to replace it.")

    if output_format == "epjson":
        write_epjson(doc, path)
    else:
        write_idf(doc, path)

    state.file_path = path
    state.save_session()
    logger.info("Saved model to %s (format=%s)", path, output_format)
    return SaveModelResult(status="saved", file_path=str(path), format=output_format)

update_object(object_type, name, fields)

Update fields on an existing object.

Source code in src/idfkit_mcp/tools/write.py
@tool(annotations=_MUTATE, output_schema=None)
def update_object(
    object_type: Annotated[str, Field(description="EnergyPlus object type.")],
    name: Annotated[str, Field(description="Object name.")],
    fields: Annotated[dict[str, Any], Field(description="Fields to update as {field_name: value}.")],
) -> dict[str, Any]:
    """Update fields on an existing object."""
    state = get_state()
    doc = state.require_model()
    obj = doc.get_collection(object_type).get(name)
    if obj is None:
        raise ToolError(f"Object '{name}' of type '{object_type}' not found.")

    for field_name, value in fields.items():
        setattr(obj, field_name, value)

    logger.info("Updated %s %r (%d fields)", object_type, name, len(fields))
    logger.debug("update_object fields: %s", fields)
    return serialize_object(obj)