Copy the MCP server to the top level (#354)
I want to experiment with using `uvx` from this location, and if it manages all the use cases correctly, we won't clone and copy the server codemain
parent
697e0fb69b
commit
0f506e4bee
|
|
@ -0,0 +1,15 @@
|
|||
FROM python:3.13-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN pip install uv
|
||||
|
||||
COPY . /app
|
||||
|
||||
RUN uv sync
|
||||
|
||||
CMD ["uv", "run", "server.py"]
|
||||
|
|
@ -0,0 +1,157 @@
|
|||
# MCP for Unity Server
|
||||
|
||||
[](https://modelcontextprotocol.io/introduction)
|
||||
[](https://www.python.org)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://discord.gg/y4p8KfzrN4)
|
||||
|
||||
Model Context Protocol server for Unity Editor integration. Control Unity through natural language using AI assistants like Claude, Cursor, and more.
|
||||
|
||||
**Maintained by [Coplay](https://www.coplay.dev/?ref=unity-mcp)** - This project is not affiliated with Unity Technologies.
|
||||
|
||||
💬 **Join our community:** [Discord Server](https://discord.gg/y4p8KfzrN4)
|
||||
|
||||
**Required:** Install the [Unity MCP Plugin](https://github.com/CoplayDev/unity-mcp?tab=readme-ov-file#-step-1-install-the-unity-package) to connect Unity Editor with this MCP server.
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
### Option 1: Using uvx (Recommended)
|
||||
|
||||
Run directly from GitHub without installation:
|
||||
|
||||
```bash
|
||||
uvx --from git+https://github.com/CoplayDev/unity-mcp@v6.3.0#subdirectory=Server mcp-for-unity
|
||||
```
|
||||
|
||||
**MCP Client Configuration:**
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"UnityMCP": {
|
||||
"command": "uvx",
|
||||
"args": [
|
||||
"--from",
|
||||
"git+https://github.com/CoplayDev/unity-mcp@v6.3.0#subdirectory=Server",
|
||||
"mcp-for-unity"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Option 2: Using uv (Local Installation)
|
||||
|
||||
For local development or custom installations:
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://github.com/CoplayDev/unity-mcp.git
|
||||
cd unity-mcp/Server
|
||||
|
||||
# Run with uv
|
||||
uv run server.py
|
||||
```
|
||||
|
||||
**MCP Client Configuration:**
|
||||
|
||||
**Windows:**
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"UnityMCP": {
|
||||
"command": "uv",
|
||||
"args": [
|
||||
"run",
|
||||
"--directory",
|
||||
"C:\\path\\to\\unity-mcp\\Server",
|
||||
"server.py"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**macOS/Linux:**
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"UnityMCP": {
|
||||
"command": "uv",
|
||||
"args": [
|
||||
"run",
|
||||
"--directory",
|
||||
"/path/to/unity-mcp/Server",
|
||||
"server.py"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Option 3: Using Docker
|
||||
|
||||
```bash
|
||||
docker build -t unity-mcp-server .
|
||||
docker run unity-mcp-server
|
||||
```
|
||||
|
||||
**MCP Client Configuration:**
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"UnityMCP": {
|
||||
"command": "docker",
|
||||
"args": ["run", "-i", "unity-mcp-server"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
|
||||
The server connects to Unity Editor automatically when both are running. No additional configuration needed.
|
||||
|
||||
**Environment Variables:**
|
||||
|
||||
- `DISABLE_TELEMETRY=true` - Opt out of anonymous usage analytics
|
||||
- `LOG_LEVEL=DEBUG` - Enable detailed logging (default: INFO)
|
||||
|
||||
---
|
||||
|
||||
## Example Prompts
|
||||
|
||||
Once connected, try these commands in your AI assistant:
|
||||
|
||||
- "Create a 3D player controller with WASD movement"
|
||||
- "Add a rotating cube to the scene with a red material"
|
||||
- "Create a simple platformer level with obstacles"
|
||||
- "Generate a shader that creates a holographic effect"
|
||||
- "List all GameObjects in the current scene"
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
|
||||
For complete documentation, troubleshooting, and advanced usage:
|
||||
|
||||
📖 **[Full Documentation](https://github.com/CoplayDev/unity-mcp#readme)**
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
- **Python:** 3.11 or newer
|
||||
- **Unity Editor:** 2021.3 LTS or newer
|
||||
- **uv:** Python package manager ([Installation Guide](https://docs.astral.sh/uv/getting-started/installation/))
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
MIT License - See [LICENSE](https://github.com/CoplayDev/unity-mcp/blob/main/LICENSE)
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
"""
|
||||
Configuration settings for the MCP for Unity Server.
|
||||
This file contains all configurable parameters for the server.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class ServerConfig:
|
||||
"""Main configuration class for the MCP server."""
|
||||
|
||||
# Network settings
|
||||
unity_host: str = "localhost"
|
||||
unity_port: int = 6400
|
||||
mcp_port: int = 6500
|
||||
|
||||
# Connection settings
|
||||
connection_timeout: float = 30.0
|
||||
buffer_size: int = 16 * 1024 * 1024 # 16MB buffer
|
||||
# Framed receive behavior
|
||||
# max seconds to wait while consuming heartbeats only
|
||||
framed_receive_timeout: float = 2.0
|
||||
# cap heartbeat frames consumed before giving up
|
||||
max_heartbeat_frames: int = 16
|
||||
|
||||
# Logging settings
|
||||
log_level: str = "INFO"
|
||||
log_format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
|
||||
# Server settings
|
||||
max_retries: int = 5
|
||||
retry_delay: float = 0.25
|
||||
# Backoff hint returned to clients when Unity is reloading (milliseconds)
|
||||
reload_retry_ms: int = 250
|
||||
# Number of polite retries when Unity reports reloading
|
||||
# 40 × 250ms ≈ 10s default window
|
||||
reload_max_retries: int = 40
|
||||
|
||||
# Telemetry settings
|
||||
telemetry_enabled: bool = True
|
||||
# Align with telemetry.py default Cloud Run endpoint
|
||||
telemetry_endpoint: str = "https://api-prod.coplay.dev/telemetry/events"
|
||||
|
||||
|
||||
# Create a global config instance
|
||||
config = ServerConfig()
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
from typing import Any
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class MCPResponse(BaseModel):
|
||||
success: bool
|
||||
message: str | None = None
|
||||
error: str | None = None
|
||||
data: Any | None = None
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
"""
|
||||
Shared module discovery utilities for auto-registering tools and resources.
|
||||
"""
|
||||
import importlib
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import pkgutil
|
||||
from typing import Generator
|
||||
|
||||
logger = logging.getLogger("mcp-for-unity-server")
|
||||
|
||||
|
||||
def discover_modules(base_dir: Path, package_name: str) -> Generator[str, None, None]:
|
||||
"""
|
||||
Discover and import all Python modules in a directory and its subdirectories.
|
||||
|
||||
Args:
|
||||
base_dir: The base directory to search for modules
|
||||
package_name: The package name to use for relative imports (e.g., 'tools' or 'resources')
|
||||
|
||||
Yields:
|
||||
Full module names that were successfully imported
|
||||
"""
|
||||
# Discover modules in the top level
|
||||
for _, module_name, _ in pkgutil.iter_modules([str(base_dir)]):
|
||||
# Skip private modules and __init__
|
||||
if module_name.startswith('_'):
|
||||
continue
|
||||
|
||||
try:
|
||||
full_module_name = f'.{module_name}'
|
||||
importlib.import_module(full_module_name, package_name)
|
||||
yield full_module_name
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to import module {module_name}: {e}")
|
||||
|
||||
# Discover modules in subdirectories (one level deep)
|
||||
for subdir in base_dir.iterdir():
|
||||
if not subdir.is_dir() or subdir.name.startswith('_') or subdir.name.startswith('.'):
|
||||
continue
|
||||
|
||||
# Check if subdirectory contains Python modules
|
||||
for _, module_name, _ in pkgutil.iter_modules([str(subdir)]):
|
||||
# Skip private modules and __init__
|
||||
if module_name.startswith('_'):
|
||||
continue
|
||||
|
||||
try:
|
||||
# Import as package.subdirname.modulename
|
||||
full_module_name = f'.{subdir.name}.{module_name}'
|
||||
importlib.import_module(full_module_name, package_name)
|
||||
yield full_module_name
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Failed to import module {subdir.name}.{module_name}: {e}")
|
||||
|
|
@ -0,0 +1,160 @@
|
|||
"""
|
||||
Port discovery utility for MCP for Unity Server.
|
||||
|
||||
What changed and why:
|
||||
- Unity now writes a per-project port file named like
|
||||
`~/.unity-mcp/unity-mcp-port-<hash>.json` to avoid projects overwriting
|
||||
each other's saved port. The legacy file `unity-mcp-port.json` may still
|
||||
exist.
|
||||
- This module now scans for both patterns, prefers the most recently
|
||||
modified file, and verifies that the port is actually a MCP for Unity listener
|
||||
(quick socket connect + ping) before choosing it.
|
||||
"""
|
||||
|
||||
import glob
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import socket
|
||||
from typing import Optional, List
|
||||
|
||||
logger = logging.getLogger("mcp-for-unity-server")
|
||||
|
||||
|
||||
class PortDiscovery:
|
||||
"""Handles port discovery from Unity Bridge registry"""
|
||||
REGISTRY_FILE = "unity-mcp-port.json" # legacy single-project file
|
||||
DEFAULT_PORT = 6400
|
||||
CONNECT_TIMEOUT = 0.3 # seconds, keep this snappy during discovery
|
||||
|
||||
@staticmethod
|
||||
def get_registry_path() -> Path:
|
||||
"""Get the path to the port registry file"""
|
||||
return Path.home() / ".unity-mcp" / PortDiscovery.REGISTRY_FILE
|
||||
|
||||
@staticmethod
|
||||
def get_registry_dir() -> Path:
|
||||
return Path.home() / ".unity-mcp"
|
||||
|
||||
@staticmethod
|
||||
def list_candidate_files() -> List[Path]:
|
||||
"""Return candidate registry files, newest first.
|
||||
Includes hashed per-project files and the legacy file (if present).
|
||||
"""
|
||||
base = PortDiscovery.get_registry_dir()
|
||||
hashed = sorted(
|
||||
(Path(p) for p in glob.glob(str(base / "unity-mcp-port-*.json"))),
|
||||
key=lambda p: p.stat().st_mtime,
|
||||
reverse=True,
|
||||
)
|
||||
legacy = PortDiscovery.get_registry_path()
|
||||
if legacy.exists():
|
||||
# Put legacy at the end so hashed, per-project files win
|
||||
hashed.append(legacy)
|
||||
return hashed
|
||||
|
||||
@staticmethod
|
||||
def _try_probe_unity_mcp(port: int) -> bool:
|
||||
"""Quickly check if a MCP for Unity listener is on this port.
|
||||
Tries a short TCP connect, sends 'ping', expects Unity bridge welcome message.
|
||||
"""
|
||||
try:
|
||||
with socket.create_connection(("127.0.0.1", port), PortDiscovery.CONNECT_TIMEOUT) as s:
|
||||
s.settimeout(PortDiscovery.CONNECT_TIMEOUT)
|
||||
try:
|
||||
s.sendall(b"ping")
|
||||
data = s.recv(512)
|
||||
# Check for Unity bridge welcome message format
|
||||
if data and (b"WELCOME UNITY-MCP" in data or b'"message":"pong"' in data):
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
except Exception:
|
||||
return False
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _read_latest_status() -> Optional[dict]:
|
||||
try:
|
||||
base = PortDiscovery.get_registry_dir()
|
||||
status_files = sorted(
|
||||
(Path(p)
|
||||
for p in glob.glob(str(base / "unity-mcp-status-*.json"))),
|
||||
key=lambda p: p.stat().st_mtime,
|
||||
reverse=True,
|
||||
)
|
||||
if not status_files:
|
||||
return None
|
||||
with status_files[0].open('r') as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def discover_unity_port() -> int:
|
||||
"""
|
||||
Discover Unity port by scanning per-project and legacy registry files.
|
||||
Prefer the newest file whose port responds; fall back to first parsed
|
||||
value; finally default to 6400.
|
||||
|
||||
Returns:
|
||||
Port number to connect to
|
||||
"""
|
||||
# Prefer the latest heartbeat status if it points to a responsive port
|
||||
status = PortDiscovery._read_latest_status()
|
||||
if status:
|
||||
port = status.get('unity_port')
|
||||
if isinstance(port, int) and PortDiscovery._try_probe_unity_mcp(port):
|
||||
logger.info(f"Using Unity port from status: {port}")
|
||||
return port
|
||||
|
||||
candidates = PortDiscovery.list_candidate_files()
|
||||
|
||||
first_seen_port: Optional[int] = None
|
||||
|
||||
for path in candidates:
|
||||
try:
|
||||
with open(path, 'r') as f:
|
||||
cfg = json.load(f)
|
||||
unity_port = cfg.get('unity_port')
|
||||
if isinstance(unity_port, int):
|
||||
if first_seen_port is None:
|
||||
first_seen_port = unity_port
|
||||
if PortDiscovery._try_probe_unity_mcp(unity_port):
|
||||
logger.info(
|
||||
f"Using Unity port from {path.name}: {unity_port}")
|
||||
return unity_port
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not read port registry {path}: {e}")
|
||||
|
||||
if first_seen_port is not None:
|
||||
logger.info(
|
||||
f"No responsive port found; using first seen value {first_seen_port}")
|
||||
return first_seen_port
|
||||
|
||||
# Fallback to default port
|
||||
logger.info(
|
||||
f"No port registry found; using default port {PortDiscovery.DEFAULT_PORT}")
|
||||
return PortDiscovery.DEFAULT_PORT
|
||||
|
||||
@staticmethod
|
||||
def get_port_config() -> Optional[dict]:
|
||||
"""
|
||||
Get the most relevant port configuration from registry.
|
||||
Returns the most recent hashed file's config if present,
|
||||
otherwise the legacy file's config. Returns None if nothing exists.
|
||||
|
||||
Returns:
|
||||
Port configuration dict or None if not found
|
||||
"""
|
||||
candidates = PortDiscovery.list_candidate_files()
|
||||
if not candidates:
|
||||
return None
|
||||
for path in candidates:
|
||||
try:
|
||||
with open(path, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Could not read port configuration {path}: {e}")
|
||||
return None
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
[project]
|
||||
name = "MCPForUnityServer"
|
||||
version = "6.3.0"
|
||||
description = "MCP for Unity Server: A Unity package for Unity Editor integration via the Model Context Protocol (MCP)."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
"httpx>=0.27.2",
|
||||
"fastmcp>=2.12.5",
|
||||
"mcp>=1.16.0",
|
||||
"pydantic>=2.12.0",
|
||||
"tomli>=2.3.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
dev = [
|
||||
"pytest>=8.0.0",
|
||||
"pytest-asyncio>=0.23",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
mcp-for-unity = "server:main"
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools>=64.0.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.setuptools]
|
||||
py-modules = [
|
||||
"config",
|
||||
"models",
|
||||
"module_discovery",
|
||||
"port_discovery",
|
||||
"reload_sentinel",
|
||||
"server",
|
||||
"telemetry",
|
||||
"telemetry_decorator",
|
||||
"unity_connection"
|
||||
]
|
||||
packages = ["tools", "resources", "registry"]
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
"typeCheckingMode": "basic",
|
||||
"reportMissingImports": "none",
|
||||
"pythonVersion": "3.11",
|
||||
"executionEnvironments": [
|
||||
{
|
||||
"root": ".",
|
||||
"pythonVersion": "3.11"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
"""
|
||||
Registry package for MCP tool auto-discovery.
|
||||
"""
|
||||
from .tool_registry import (
|
||||
mcp_for_unity_tool,
|
||||
get_registered_tools,
|
||||
clear_tool_registry,
|
||||
)
|
||||
from .resource_registry import (
|
||||
mcp_for_unity_resource,
|
||||
get_registered_resources,
|
||||
clear_resource_registry,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'mcp_for_unity_tool',
|
||||
'get_registered_tools',
|
||||
'clear_tool_registry',
|
||||
'mcp_for_unity_resource',
|
||||
'get_registered_resources',
|
||||
'clear_resource_registry'
|
||||
]
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
"""
|
||||
Resource registry for auto-discovery of MCP resources.
|
||||
"""
|
||||
from typing import Callable, Any
|
||||
|
||||
# Global registry to collect decorated resources
|
||||
_resource_registry: list[dict[str, Any]] = []
|
||||
|
||||
|
||||
def mcp_for_unity_resource(
|
||||
uri: str,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
**kwargs
|
||||
) -> Callable:
|
||||
"""
|
||||
Decorator for registering MCP resources in the server's resources directory.
|
||||
|
||||
Resources are registered in the global resource registry.
|
||||
|
||||
Args:
|
||||
name: Resource name (defaults to function name)
|
||||
description: Resource description
|
||||
**kwargs: Additional arguments passed to @mcp.resource()
|
||||
|
||||
Example:
|
||||
@mcp_for_unity_resource("mcpforunity://resource", description="Gets something interesting")
|
||||
async def my_custom_resource(ctx: Context, ...):
|
||||
pass
|
||||
"""
|
||||
def decorator(func: Callable) -> Callable:
|
||||
resource_name = name if name is not None else func.__name__
|
||||
_resource_registry.append({
|
||||
'func': func,
|
||||
'uri': uri,
|
||||
'name': resource_name,
|
||||
'description': description,
|
||||
'kwargs': kwargs
|
||||
})
|
||||
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def get_registered_resources() -> list[dict[str, Any]]:
|
||||
"""Get all registered resources"""
|
||||
return _resource_registry.copy()
|
||||
|
||||
|
||||
def clear_resource_registry():
|
||||
"""Clear the resource registry (useful for testing)"""
|
||||
_resource_registry.clear()
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
"""
|
||||
Tool registry for auto-discovery of MCP tools.
|
||||
"""
|
||||
from typing import Callable, Any
|
||||
|
||||
# Global registry to collect decorated tools
|
||||
_tool_registry: list[dict[str, Any]] = []
|
||||
|
||||
|
||||
def mcp_for_unity_tool(
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
**kwargs
|
||||
) -> Callable:
|
||||
"""
|
||||
Decorator for registering MCP tools in the server's tools directory.
|
||||
|
||||
Tools are registered in the global tool registry.
|
||||
|
||||
Args:
|
||||
name: Tool name (defaults to function name)
|
||||
description: Tool description
|
||||
**kwargs: Additional arguments passed to @mcp.tool()
|
||||
|
||||
Example:
|
||||
@mcp_for_unity_tool(description="Does something cool")
|
||||
async def my_custom_tool(ctx: Context, ...):
|
||||
pass
|
||||
"""
|
||||
def decorator(func: Callable) -> Callable:
|
||||
tool_name = name if name is not None else func.__name__
|
||||
_tool_registry.append({
|
||||
'func': func,
|
||||
'name': tool_name,
|
||||
'description': description,
|
||||
'kwargs': kwargs
|
||||
})
|
||||
|
||||
return func
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def get_registered_tools() -> list[dict[str, Any]]:
|
||||
"""Get all registered tools"""
|
||||
return _tool_registry.copy()
|
||||
|
||||
|
||||
def clear_tool_registry():
|
||||
"""Clear the tool registry (useful for testing)"""
|
||||
_tool_registry.clear()
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
"""
|
||||
Deprecated: Sentinel flipping is handled inside Unity via the MCP menu
|
||||
'MCP/Flip Reload Sentinel'. This module remains only as a compatibility shim.
|
||||
All functions are no-ops to prevent accidental external writes.
|
||||
"""
|
||||
|
||||
|
||||
def flip_reload_sentinel(*args, **kwargs) -> str:
|
||||
return "reload_sentinel.py is deprecated; use execute_menu_item → 'MCP/Flip Reload Sentinel'"
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
"""
|
||||
MCP Resources package - Auto-discovers and registers all resources in this directory.
|
||||
"""
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from telemetry_decorator import telemetry_resource
|
||||
|
||||
from registry import get_registered_resources
|
||||
from module_discovery import discover_modules
|
||||
|
||||
logger = logging.getLogger("mcp-for-unity-server")
|
||||
|
||||
# Export decorator for easy imports within tools
|
||||
__all__ = ['register_all_resources']
|
||||
|
||||
|
||||
def register_all_resources(mcp: FastMCP):
|
||||
"""
|
||||
Auto-discover and register all resources in the resources/ directory.
|
||||
|
||||
Any .py file in this directory or subdirectories with @mcp_for_unity_resource decorated
|
||||
functions will be automatically registered.
|
||||
"""
|
||||
logger.info("Auto-discovering MCP for Unity Server resources...")
|
||||
# Dynamic import of all modules in this directory
|
||||
resources_dir = Path(__file__).parent
|
||||
|
||||
# Discover and import all modules
|
||||
list(discover_modules(resources_dir, __package__))
|
||||
|
||||
resources = get_registered_resources()
|
||||
|
||||
if not resources:
|
||||
logger.warning("No MCP resources registered!")
|
||||
return
|
||||
|
||||
for resource_info in resources:
|
||||
func = resource_info['func']
|
||||
uri = resource_info['uri']
|
||||
resource_name = resource_info['name']
|
||||
description = resource_info['description']
|
||||
kwargs = resource_info['kwargs']
|
||||
|
||||
# Apply the @mcp.resource decorator and telemetry
|
||||
wrapped = telemetry_resource(resource_name)(func)
|
||||
wrapped = mcp.resource(uri=uri, name=resource_name,
|
||||
description=description, **kwargs)(wrapped)
|
||||
resource_info['func'] = wrapped
|
||||
logger.debug(f"Registered resource: {resource_name} - {description}")
|
||||
|
||||
logger.info(f"Registered {len(resources)} MCP resources")
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
from models import MCPResponse
|
||||
from registry import mcp_for_unity_resource
|
||||
from unity_connection import async_send_command_with_retry
|
||||
|
||||
|
||||
class GetMenuItemsResponse(MCPResponse):
|
||||
data: list[str] = []
|
||||
|
||||
|
||||
@mcp_for_unity_resource(
|
||||
uri="mcpforunity://menu-items",
|
||||
name="get_menu_items",
|
||||
description="Provides a list of all menu items."
|
||||
)
|
||||
async def get_menu_items() -> GetMenuItemsResponse:
|
||||
"""Provides a list of all menu items."""
|
||||
# Later versions of FastMCP support these as query parameters
|
||||
# See: https://gofastmcp.com/servers/resources#query-parameters
|
||||
params = {
|
||||
"refresh": True,
|
||||
"search": "",
|
||||
}
|
||||
|
||||
response = await async_send_command_with_retry("get_menu_items", params)
|
||||
return GetMenuItemsResponse(**response) if isinstance(response, dict) else response
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
from typing import Annotated, Literal
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from models import MCPResponse
|
||||
from registry import mcp_for_unity_resource
|
||||
from unity_connection import async_send_command_with_retry
|
||||
|
||||
|
||||
class TestItem(BaseModel):
|
||||
name: Annotated[str, Field(description="The name of the test.")]
|
||||
full_name: Annotated[str, Field(description="The full name of the test.")]
|
||||
mode: Annotated[Literal["EditMode", "PlayMode"],
|
||||
Field(description="The mode the test is for.")]
|
||||
|
||||
|
||||
class GetTestsResponse(MCPResponse):
|
||||
data: list[TestItem] = []
|
||||
|
||||
|
||||
@mcp_for_unity_resource(uri="mcpforunity://tests", name="get_tests", description="Provides a list of all tests.")
|
||||
async def get_tests() -> GetTestsResponse:
|
||||
"""Provides a list of all tests."""
|
||||
response = await async_send_command_with_retry("get_tests", {})
|
||||
return GetTestsResponse(**response) if isinstance(response, dict) else response
|
||||
|
||||
|
||||
@mcp_for_unity_resource(uri="mcpforunity://tests/{mode}", name="get_tests_for_mode", description="Provides a list of tests for a specific mode.")
|
||||
async def get_tests_for_mode(mode: Annotated[Literal["EditMode", "PlayMode"], Field(description="The mode to filter tests by.")]) -> GetTestsResponse:
|
||||
"""Provides a list of tests for a specific mode."""
|
||||
response = await async_send_command_with_retry("get_tests_for_mode", {"mode": mode})
|
||||
return GetTestsResponse(**response) if isinstance(response, dict) else response
|
||||
|
|
@ -0,0 +1,196 @@
|
|||
from telemetry import record_telemetry, record_milestone, RecordType, MilestoneType
|
||||
from fastmcp import FastMCP
|
||||
import logging
|
||||
from logging.handlers import RotatingFileHandler
|
||||
import os
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import AsyncIterator, Dict, Any
|
||||
from config import config
|
||||
from tools import register_all_tools
|
||||
from resources import register_all_resources
|
||||
from unity_connection import get_unity_connection, UnityConnection
|
||||
import time
|
||||
|
||||
# Configure logging using settings from config
|
||||
logging.basicConfig(
|
||||
level=getattr(logging, config.log_level),
|
||||
format=config.log_format,
|
||||
stream=None, # None -> defaults to sys.stderr; avoid stdout used by MCP stdio
|
||||
force=True # Ensure our handler replaces any prior stdout handlers
|
||||
)
|
||||
logger = logging.getLogger("mcp-for-unity-server")
|
||||
|
||||
# Also write logs to a rotating file so logs are available when launched via stdio
|
||||
try:
|
||||
import os as _os
|
||||
_log_dir = _os.path.join(_os.path.expanduser(
|
||||
"~/Library/Application Support/UnityMCP"), "Logs")
|
||||
_os.makedirs(_log_dir, exist_ok=True)
|
||||
_file_path = _os.path.join(_log_dir, "unity_mcp_server.log")
|
||||
_fh = RotatingFileHandler(
|
||||
_file_path, maxBytes=512*1024, backupCount=2, encoding="utf-8")
|
||||
_fh.setFormatter(logging.Formatter(config.log_format))
|
||||
_fh.setLevel(getattr(logging, config.log_level))
|
||||
logger.addHandler(_fh)
|
||||
# Also route telemetry logger to the same rotating file and normal level
|
||||
try:
|
||||
tlog = logging.getLogger("unity-mcp-telemetry")
|
||||
tlog.setLevel(getattr(logging, config.log_level))
|
||||
tlog.addHandler(_fh)
|
||||
except Exception:
|
||||
# Never let logging setup break startup
|
||||
pass
|
||||
except Exception:
|
||||
# Never let logging setup break startup
|
||||
pass
|
||||
# Quieten noisy third-party loggers to avoid clutter during stdio handshake
|
||||
for noisy in ("httpx", "urllib3"):
|
||||
try:
|
||||
logging.getLogger(noisy).setLevel(
|
||||
max(logging.WARNING, getattr(logging, config.log_level)))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Import telemetry only after logging is configured to ensure its logs use stderr and proper levels
|
||||
# Ensure a slightly higher telemetry timeout unless explicitly overridden by env
|
||||
try:
|
||||
|
||||
# Ensure generous timeout unless explicitly overridden by env
|
||||
if not os.environ.get("UNITY_MCP_TELEMETRY_TIMEOUT"):
|
||||
os.environ["UNITY_MCP_TELEMETRY_TIMEOUT"] = "5.0"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Global connection state
|
||||
_unity_connection: UnityConnection = None
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def server_lifespan(server: FastMCP) -> AsyncIterator[Dict[str, Any]]:
|
||||
"""Handle server startup and shutdown."""
|
||||
global _unity_connection
|
||||
logger.info("MCP for Unity Server starting up")
|
||||
|
||||
# Record server startup telemetry
|
||||
start_time = time.time()
|
||||
start_clk = time.perf_counter()
|
||||
try:
|
||||
from pathlib import Path
|
||||
ver_path = Path(__file__).parent / "server_version.txt"
|
||||
server_version = ver_path.read_text(encoding="utf-8").strip()
|
||||
except Exception:
|
||||
server_version = "unknown"
|
||||
# Defer initial telemetry by 1s to avoid stdio handshake interference
|
||||
import threading
|
||||
|
||||
def _emit_startup():
|
||||
try:
|
||||
record_telemetry(RecordType.STARTUP, {
|
||||
"server_version": server_version,
|
||||
"startup_time": start_time,
|
||||
})
|
||||
record_milestone(MilestoneType.FIRST_STARTUP)
|
||||
except Exception:
|
||||
logger.debug("Deferred startup telemetry failed", exc_info=True)
|
||||
threading.Timer(1.0, _emit_startup).start()
|
||||
|
||||
try:
|
||||
skip_connect = os.environ.get(
|
||||
"UNITY_MCP_SKIP_STARTUP_CONNECT", "").lower() in ("1", "true", "yes", "on")
|
||||
if skip_connect:
|
||||
logger.info(
|
||||
"Skipping Unity connection on startup (UNITY_MCP_SKIP_STARTUP_CONNECT=1)")
|
||||
else:
|
||||
_unity_connection = get_unity_connection()
|
||||
logger.info("Connected to Unity on startup")
|
||||
|
||||
# Record successful Unity connection (deferred)
|
||||
import threading as _t
|
||||
_t.Timer(1.0, lambda: record_telemetry(
|
||||
RecordType.UNITY_CONNECTION,
|
||||
{
|
||||
"status": "connected",
|
||||
"connection_time_ms": (time.perf_counter() - start_clk) * 1000,
|
||||
}
|
||||
)).start()
|
||||
|
||||
except ConnectionError as e:
|
||||
logger.warning("Could not connect to Unity on startup: %s", e)
|
||||
_unity_connection = None
|
||||
|
||||
# Record connection failure (deferred)
|
||||
import threading as _t
|
||||
_err_msg = str(e)[:200]
|
||||
_t.Timer(1.0, lambda: record_telemetry(
|
||||
RecordType.UNITY_CONNECTION,
|
||||
{
|
||||
"status": "failed",
|
||||
"error": _err_msg,
|
||||
"connection_time_ms": (time.perf_counter() - start_clk) * 1000,
|
||||
}
|
||||
)).start()
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"Unexpected error connecting to Unity on startup: %s", e)
|
||||
_unity_connection = None
|
||||
import threading as _t
|
||||
_err_msg = str(e)[:200]
|
||||
_t.Timer(1.0, lambda: record_telemetry(
|
||||
RecordType.UNITY_CONNECTION,
|
||||
{
|
||||
"status": "failed",
|
||||
"error": _err_msg,
|
||||
"connection_time_ms": (time.perf_counter() - start_clk) * 1000,
|
||||
}
|
||||
)).start()
|
||||
|
||||
try:
|
||||
# Yield the connection object so it can be attached to the context
|
||||
# The key 'bridge' matches how tools like read_console expect to access it (ctx.bridge)
|
||||
yield {"bridge": _unity_connection}
|
||||
finally:
|
||||
if _unity_connection:
|
||||
_unity_connection.disconnect()
|
||||
_unity_connection = None
|
||||
logger.info("MCP for Unity Server shut down")
|
||||
|
||||
# Initialize MCP server
|
||||
mcp = FastMCP(
|
||||
name="mcp-for-unity-server",
|
||||
lifespan=server_lifespan,
|
||||
instructions="""
|
||||
This server provides tools to interact with the Unity Game Engine Editor.\n\n
|
||||
Available tools:\n
|
||||
- `manage_editor`: Controls editor state and queries info.\n
|
||||
- `execute_menu_item`: Executes, lists and checks for the existence of Unity Editor menu items.\n
|
||||
- `read_console`: Reads or clears Unity console messages, with filtering options.\n
|
||||
- `manage_scene`: Manages scenes.\n
|
||||
- `manage_gameobject`: Manages GameObjects in the scene.\n
|
||||
- `manage_script`: Manages C# script files.\n
|
||||
- `manage_asset`: Manages prefabs and assets.\n
|
||||
- `manage_shader`: Manages shaders.\n\n
|
||||
- Tips:\n
|
||||
- Create prefabs for reusable GameObjects.\n
|
||||
- Always include a camera and main light in your scenes.\n
|
||||
- Unless specified otherwise, paths are relative to the project's `Assets/` folder.\n
|
||||
- After creating or modifying scripts with `manage_script`, allow Unity to recompile; use `read_console` to check for compile errors.\n
|
||||
- Use `execute_menu_item` for interacting with Unity systems and third party tools like a user would.\n
|
||||
|
||||
"""
|
||||
)
|
||||
|
||||
# Register all tools
|
||||
register_all_tools(mcp)
|
||||
|
||||
# Register all resources
|
||||
register_all_resources(mcp)
|
||||
|
||||
|
||||
def main():
|
||||
"""Entry point for uvx and console scripts."""
|
||||
mcp.run(transport='stdio')
|
||||
|
||||
|
||||
# Run the server
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1 @@
|
|||
6.3.0
|
||||
|
|
@ -0,0 +1,513 @@
|
|||
"""
|
||||
Privacy-focused, anonymous telemetry system for MCP for Unity
|
||||
Inspired by Onyx's telemetry implementation with Unity-specific adaptations
|
||||
|
||||
Fire-and-forget telemetry sender with a single background worker.
|
||||
- No context/thread-local propagation to avoid re-entrancy into tool resolution.
|
||||
- Small network timeouts to prevent stalls.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from importlib import import_module, metadata
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import platform
|
||||
import queue
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from typing import Any
|
||||
from urllib.parse import urlparse
|
||||
import uuid
|
||||
|
||||
import tomli
|
||||
|
||||
try:
|
||||
import httpx
|
||||
HAS_HTTPX = True
|
||||
except ImportError:
|
||||
httpx = None # type: ignore
|
||||
HAS_HTTPX = False
|
||||
|
||||
logger = logging.getLogger("unity-mcp-telemetry")
|
||||
|
||||
|
||||
def get_package_version() -> str:
|
||||
"""
|
||||
Get package version in different ways:
|
||||
1. First we try the installed metadata - this is because uvx is used on the asset store
|
||||
2. If that fails, we try to read from pyproject.toml - this is available for users who download via Git
|
||||
Default is "unknown", but that should never happen
|
||||
"""
|
||||
try:
|
||||
return metadata.version("MCPForUnityServer")
|
||||
except Exception:
|
||||
# Fallback for development: read from pyproject.toml
|
||||
try:
|
||||
pyproject_path = Path(__file__).parent / "pyproject.toml"
|
||||
with open(pyproject_path, "rb") as f:
|
||||
data = tomli.load(f)
|
||||
return data["project"]["version"]
|
||||
except Exception:
|
||||
return "unknown"
|
||||
|
||||
|
||||
MCP_VERSION = get_package_version()
|
||||
|
||||
|
||||
class RecordType(str, Enum):
|
||||
"""Types of telemetry records we collect"""
|
||||
VERSION = "version"
|
||||
STARTUP = "startup"
|
||||
USAGE = "usage"
|
||||
LATENCY = "latency"
|
||||
FAILURE = "failure"
|
||||
RESOURCE_RETRIEVAL = "resource_retrieval"
|
||||
TOOL_EXECUTION = "tool_execution"
|
||||
UNITY_CONNECTION = "unity_connection"
|
||||
CLIENT_CONNECTION = "client_connection"
|
||||
|
||||
|
||||
class MilestoneType(str, Enum):
|
||||
"""Major user journey milestones"""
|
||||
FIRST_STARTUP = "first_startup"
|
||||
FIRST_TOOL_USAGE = "first_tool_usage"
|
||||
FIRST_SCRIPT_CREATION = "first_script_creation"
|
||||
FIRST_SCENE_MODIFICATION = "first_scene_modification"
|
||||
MULTIPLE_SESSIONS = "multiple_sessions"
|
||||
DAILY_ACTIVE_USER = "daily_active_user"
|
||||
WEEKLY_ACTIVE_USER = "weekly_active_user"
|
||||
|
||||
|
||||
@dataclass
|
||||
class TelemetryRecord:
|
||||
"""Structure for telemetry data"""
|
||||
record_type: RecordType
|
||||
timestamp: float
|
||||
customer_uuid: str
|
||||
session_id: str
|
||||
data: dict[str, Any]
|
||||
milestone: MilestoneType | None = None
|
||||
|
||||
|
||||
class TelemetryConfig:
|
||||
"""Telemetry configuration"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Prefer config file, then allow env overrides
|
||||
"""
|
||||
server_config = None
|
||||
for modname in (
|
||||
# Prefer plain module to respect test-time overrides and sys.path injection
|
||||
"config",
|
||||
"src.config",
|
||||
"MCPForUnity.UnityMcpServer~.src.config",
|
||||
"MCPForUnity.UnityMcpServer.src.config",
|
||||
):
|
||||
try:
|
||||
mod = import_module(modname)
|
||||
server_config = getattr(mod, "config", None)
|
||||
if server_config is not None:
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# Determine enabled flag: config -> env DISABLE_* opt-out
|
||||
cfg_enabled = True if server_config is None else bool(
|
||||
getattr(server_config, "telemetry_enabled", True))
|
||||
self.enabled = cfg_enabled and not self._is_disabled()
|
||||
|
||||
# Telemetry endpoint (Cloud Run default; override via env)
|
||||
cfg_default = None if server_config is None else getattr(
|
||||
server_config, "telemetry_endpoint", None)
|
||||
default_ep = cfg_default or "https://api-prod.coplay.dev/telemetry/events"
|
||||
self.default_endpoint = default_ep
|
||||
# Prefer config default; allow explicit env override only when set
|
||||
env_ep = os.environ.get("UNITY_MCP_TELEMETRY_ENDPOINT")
|
||||
if env_ep is not None and env_ep != "":
|
||||
self.endpoint = self._validated_endpoint(env_ep, default_ep)
|
||||
else:
|
||||
# Validate config-provided default as well to enforce scheme/host rules
|
||||
self.endpoint = self._validated_endpoint(default_ep, default_ep)
|
||||
try:
|
||||
logger.info(
|
||||
"Telemetry configured: endpoint=%s (default=%s), timeout_env=%s",
|
||||
self.endpoint,
|
||||
default_ep,
|
||||
os.environ.get("UNITY_MCP_TELEMETRY_TIMEOUT") or "<unset>"
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Local storage for UUID and milestones
|
||||
self.data_dir = self._get_data_directory()
|
||||
self.uuid_file = self.data_dir / "customer_uuid.txt"
|
||||
self.milestones_file = self.data_dir / "milestones.json"
|
||||
|
||||
# Request timeout (small, fail fast). Override with UNITY_MCP_TELEMETRY_TIMEOUT
|
||||
try:
|
||||
self.timeout = float(os.environ.get(
|
||||
"UNITY_MCP_TELEMETRY_TIMEOUT", "1.5"))
|
||||
except Exception:
|
||||
self.timeout = 1.5
|
||||
try:
|
||||
logger.info("Telemetry timeout=%.2fs", self.timeout)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Session tracking
|
||||
self.session_id = str(uuid.uuid4())
|
||||
|
||||
def _is_disabled(self) -> bool:
|
||||
"""Check if telemetry is disabled via environment variables"""
|
||||
disable_vars = [
|
||||
"DISABLE_TELEMETRY",
|
||||
"UNITY_MCP_DISABLE_TELEMETRY",
|
||||
"MCP_DISABLE_TELEMETRY"
|
||||
]
|
||||
|
||||
for var in disable_vars:
|
||||
if os.environ.get(var, "").lower() in ("true", "1", "yes", "on"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_data_directory(self) -> Path:
|
||||
"""Get directory for storing telemetry data"""
|
||||
if os.name == 'nt': # Windows
|
||||
base_dir = Path(os.environ.get(
|
||||
'APPDATA', Path.home() / 'AppData' / 'Roaming'))
|
||||
elif os.name == 'posix': # macOS/Linux
|
||||
if 'darwin' in os.uname().sysname.lower(): # macOS
|
||||
base_dir = Path.home() / 'Library' / 'Application Support'
|
||||
else: # Linux
|
||||
base_dir = Path(os.environ.get('XDG_DATA_HOME',
|
||||
Path.home() / '.local' / 'share'))
|
||||
else:
|
||||
base_dir = Path.home() / '.unity-mcp'
|
||||
|
||||
data_dir = base_dir / 'UnityMCP'
|
||||
data_dir.mkdir(parents=True, exist_ok=True)
|
||||
return data_dir
|
||||
|
||||
def _validated_endpoint(self, candidate: str, fallback: str) -> str:
|
||||
"""Validate telemetry endpoint URL scheme; allow only http/https.
|
||||
Falls back to the provided default on error.
|
||||
"""
|
||||
try:
|
||||
parsed = urlparse(candidate)
|
||||
if parsed.scheme not in ("https", "http"):
|
||||
raise ValueError(f"Unsupported scheme: {parsed.scheme}")
|
||||
# Basic sanity: require network location and path
|
||||
if not parsed.netloc:
|
||||
raise ValueError("Missing netloc in endpoint")
|
||||
# Reject localhost/loopback endpoints in production to avoid accidental local overrides
|
||||
host = parsed.hostname or ""
|
||||
if host in ("localhost", "127.0.0.1", "::1"):
|
||||
raise ValueError(
|
||||
"Localhost endpoints are not allowed for telemetry")
|
||||
return candidate
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
f"Invalid telemetry endpoint '{candidate}', using default. Error: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
return fallback
|
||||
|
||||
|
||||
class TelemetryCollector:
|
||||
"""Main telemetry collection class"""
|
||||
|
||||
def __init__(self):
|
||||
self.config = TelemetryConfig()
|
||||
self._customer_uuid: str | None = None
|
||||
self._milestones: dict[str, dict[str, Any]] = {}
|
||||
self._lock: threading.Lock = threading.Lock()
|
||||
# Bounded queue with single background worker (records only; no context propagation)
|
||||
self._queue: "queue.Queue[TelemetryRecord]" = queue.Queue(maxsize=1000)
|
||||
# Load persistent data before starting worker so first events have UUID
|
||||
self._load_persistent_data()
|
||||
self._worker: threading.Thread = threading.Thread(
|
||||
target=self._worker_loop, daemon=True)
|
||||
self._worker.start()
|
||||
|
||||
def _load_persistent_data(self):
|
||||
"""Load UUID and milestones from disk"""
|
||||
# Load customer UUID
|
||||
try:
|
||||
if self.config.uuid_file.exists():
|
||||
self._customer_uuid = self.config.uuid_file.read_text(
|
||||
encoding="utf-8").strip() or str(uuid.uuid4())
|
||||
else:
|
||||
self._customer_uuid = str(uuid.uuid4())
|
||||
try:
|
||||
self.config.uuid_file.write_text(
|
||||
self._customer_uuid, encoding="utf-8")
|
||||
if os.name == "posix":
|
||||
os.chmod(self.config.uuid_file, 0o600)
|
||||
except OSError as e:
|
||||
logger.debug(
|
||||
f"Failed to persist customer UUID: {e}", exc_info=True)
|
||||
except OSError as e:
|
||||
logger.debug(f"Failed to load customer UUID: {e}", exc_info=True)
|
||||
self._customer_uuid = str(uuid.uuid4())
|
||||
|
||||
# Load milestones (failure here must not affect UUID)
|
||||
try:
|
||||
if self.config.milestones_file.exists():
|
||||
content = self.config.milestones_file.read_text(
|
||||
encoding="utf-8")
|
||||
self._milestones = json.loads(content) or {}
|
||||
if not isinstance(self._milestones, dict):
|
||||
self._milestones = {}
|
||||
except (OSError, json.JSONDecodeError, ValueError) as e:
|
||||
logger.debug(f"Failed to load milestones: {e}", exc_info=True)
|
||||
self._milestones = {}
|
||||
|
||||
def _save_milestones(self):
|
||||
"""Save milestones to disk. Caller must hold self._lock."""
|
||||
try:
|
||||
self.config.milestones_file.write_text(
|
||||
json.dumps(self._milestones, indent=2),
|
||||
encoding="utf-8",
|
||||
)
|
||||
except OSError as e:
|
||||
logger.warning(f"Failed to save milestones: {e}", exc_info=True)
|
||||
|
||||
def record_milestone(self, milestone: MilestoneType, data: dict[str, Any] | None = None) -> bool:
|
||||
"""Record a milestone event, returns True if this is the first occurrence"""
|
||||
if not self.config.enabled:
|
||||
return False
|
||||
milestone_key = milestone.value
|
||||
with self._lock:
|
||||
if milestone_key in self._milestones:
|
||||
return False # Already recorded
|
||||
milestone_data = {
|
||||
"timestamp": time.time(),
|
||||
"data": data or {},
|
||||
}
|
||||
self._milestones[milestone_key] = milestone_data
|
||||
self._save_milestones()
|
||||
|
||||
# Also send as telemetry record
|
||||
self.record(
|
||||
record_type=RecordType.USAGE,
|
||||
data={"milestone": milestone_key, **(data or {})},
|
||||
milestone=milestone
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
def record(self,
|
||||
record_type: RecordType,
|
||||
data: dict[str, Any],
|
||||
milestone: MilestoneType | None = None):
|
||||
"""Record a telemetry event (async, non-blocking)"""
|
||||
if not self.config.enabled:
|
||||
return
|
||||
|
||||
# Allow fallback sender when httpx is unavailable (no early return)
|
||||
|
||||
record = TelemetryRecord(
|
||||
record_type=record_type,
|
||||
timestamp=time.time(),
|
||||
customer_uuid=self._customer_uuid or "unknown",
|
||||
session_id=self.config.session_id,
|
||||
data=data,
|
||||
milestone=milestone
|
||||
)
|
||||
# Enqueue for background worker (non-blocking). Drop on backpressure.
|
||||
try:
|
||||
self._queue.put_nowait(record)
|
||||
except queue.Full:
|
||||
logger.debug("Telemetry queue full; dropping %s",
|
||||
record.record_type)
|
||||
|
||||
def _worker_loop(self):
|
||||
"""Background worker that serializes telemetry sends."""
|
||||
while True:
|
||||
rec = self._queue.get()
|
||||
try:
|
||||
# Run sender directly; do not reuse caller context/thread-locals
|
||||
self._send_telemetry(rec)
|
||||
except Exception:
|
||||
logger.debug("Telemetry worker send failed", exc_info=True)
|
||||
finally:
|
||||
with contextlib.suppress(Exception):
|
||||
self._queue.task_done()
|
||||
|
||||
def _send_telemetry(self, record: TelemetryRecord):
|
||||
"""Send telemetry data to endpoint"""
|
||||
try:
|
||||
# System fingerprint (top-level remains concise; details stored in data JSON)
|
||||
_platform = platform.system() # 'Darwin' | 'Linux' | 'Windows'
|
||||
_source = sys.platform # 'darwin' | 'linux' | 'win32'
|
||||
_platform_detail = f"{_platform} {platform.release()} ({platform.machine()})"
|
||||
_python_version = platform.python_version()
|
||||
|
||||
# Enrich data JSON so BigQuery stores detailed fields without schema change
|
||||
enriched_data = dict(record.data or {})
|
||||
enriched_data.setdefault("platform_detail", _platform_detail)
|
||||
enriched_data.setdefault("python_version", _python_version)
|
||||
|
||||
payload = {
|
||||
"record": record.record_type.value,
|
||||
"timestamp": record.timestamp,
|
||||
"customer_uuid": record.customer_uuid,
|
||||
"session_id": record.session_id,
|
||||
"data": enriched_data,
|
||||
"version": MCP_VERSION,
|
||||
"platform": _platform,
|
||||
"source": _source,
|
||||
}
|
||||
|
||||
if record.milestone:
|
||||
payload["milestone"] = record.milestone.value
|
||||
|
||||
# Prefer httpx when available; otherwise fall back to urllib
|
||||
if httpx:
|
||||
with httpx.Client(timeout=self.config.timeout) as client:
|
||||
# Re-validate endpoint at send time to handle dynamic changes
|
||||
endpoint = self.config._validated_endpoint(
|
||||
self.config.endpoint, self.config.default_endpoint)
|
||||
response = client.post(endpoint, json=payload)
|
||||
if 200 <= response.status_code < 300:
|
||||
logger.debug(f"Telemetry sent: {record.record_type}")
|
||||
else:
|
||||
logger.warning(
|
||||
f"Telemetry failed: HTTP {response.status_code}")
|
||||
else:
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
data_bytes = json.dumps(payload).encode("utf-8")
|
||||
endpoint = self.config._validated_endpoint(
|
||||
self.config.endpoint, self.config.default_endpoint)
|
||||
req = urllib.request.Request(
|
||||
endpoint,
|
||||
data=data_bytes,
|
||||
headers={"Content-Type": "application/json"},
|
||||
method="POST",
|
||||
)
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=self.config.timeout) as resp:
|
||||
if 200 <= resp.getcode() < 300:
|
||||
logger.debug(
|
||||
f"Telemetry sent (urllib): {record.record_type}")
|
||||
else:
|
||||
logger.warning(
|
||||
f"Telemetry failed (urllib): HTTP {resp.getcode()}")
|
||||
except urllib.error.URLError as ue:
|
||||
logger.warning(f"Telemetry send failed (urllib): {ue}")
|
||||
|
||||
except Exception as e:
|
||||
# Never let telemetry errors interfere with app functionality
|
||||
logger.debug(f"Telemetry send failed: {e}")
|
||||
|
||||
|
||||
# Global telemetry instance
|
||||
_telemetry_collector: TelemetryCollector | None = None
|
||||
|
||||
|
||||
def get_telemetry() -> TelemetryCollector:
|
||||
"""Get the global telemetry collector instance"""
|
||||
global _telemetry_collector
|
||||
if _telemetry_collector is None:
|
||||
_telemetry_collector = TelemetryCollector()
|
||||
return _telemetry_collector
|
||||
|
||||
|
||||
def record_telemetry(record_type: RecordType,
|
||||
data: dict[str, Any],
|
||||
milestone: MilestoneType | None = None):
|
||||
"""Convenience function to record telemetry"""
|
||||
get_telemetry().record(record_type, data, milestone)
|
||||
|
||||
|
||||
def record_milestone(milestone: MilestoneType, data: dict[str, Any] | None = None) -> bool:
|
||||
"""Convenience function to record a milestone"""
|
||||
return get_telemetry().record_milestone(milestone, data)
|
||||
|
||||
|
||||
def record_tool_usage(tool_name: str, success: bool, duration_ms: float, error: str | None = None, sub_action: str | None = None):
|
||||
"""Record tool usage telemetry
|
||||
|
||||
Args:
|
||||
tool_name: Name of the tool invoked (e.g., 'manage_scene').
|
||||
success: Whether the tool completed successfully.
|
||||
duration_ms: Execution duration in milliseconds.
|
||||
error: Optional error message (truncated if present).
|
||||
sub_action: Optional sub-action/operation within the tool (e.g., 'get_hierarchy').
|
||||
"""
|
||||
data = {
|
||||
"tool_name": tool_name,
|
||||
"success": success,
|
||||
"duration_ms": round(duration_ms, 2)
|
||||
}
|
||||
|
||||
if sub_action is not None:
|
||||
try:
|
||||
data["sub_action"] = str(sub_action)
|
||||
except Exception:
|
||||
# Ensure telemetry is never disruptive
|
||||
data["sub_action"] = "unknown"
|
||||
|
||||
if error:
|
||||
data["error"] = str(error)[:200] # Limit error message length
|
||||
|
||||
record_telemetry(RecordType.TOOL_EXECUTION, data)
|
||||
|
||||
|
||||
def record_resource_usage(resource_name: str, success: bool, duration_ms: float, error: str | None = None):
|
||||
"""Record resource usage telemetry
|
||||
|
||||
Args:
|
||||
resource_name: Name of the resource invoked (e.g., 'get_tests').
|
||||
success: Whether the resource completed successfully.
|
||||
duration_ms: Execution duration in milliseconds.
|
||||
error: Optional error message (truncated if present).
|
||||
"""
|
||||
data = {
|
||||
"resource_name": resource_name,
|
||||
"success": success,
|
||||
"duration_ms": round(duration_ms, 2)
|
||||
}
|
||||
|
||||
if error:
|
||||
data["error"] = str(error)[:200] # Limit error message length
|
||||
|
||||
record_telemetry(RecordType.RESOURCE_RETRIEVAL, data)
|
||||
|
||||
|
||||
def record_latency(operation: str, duration_ms: float, metadata: dict[str, Any] | None = None):
|
||||
"""Record latency telemetry"""
|
||||
data = {
|
||||
"operation": operation,
|
||||
"duration_ms": round(duration_ms, 2)
|
||||
}
|
||||
|
||||
if metadata:
|
||||
data.update(metadata)
|
||||
|
||||
record_telemetry(RecordType.LATENCY, data)
|
||||
|
||||
|
||||
def record_failure(component: str, error: str, metadata: dict[str, Any] | None = None):
|
||||
"""Record failure telemetry"""
|
||||
data = {
|
||||
"component": component,
|
||||
"error": str(error)[:500] # Limit error message length
|
||||
}
|
||||
|
||||
if metadata:
|
||||
data.update(metadata)
|
||||
|
||||
record_telemetry(RecordType.FAILURE, data)
|
||||
|
||||
|
||||
def is_telemetry_enabled() -> bool:
|
||||
"""Check if telemetry is enabled"""
|
||||
return get_telemetry().config.enabled
|
||||
|
|
@ -0,0 +1,164 @@
|
|||
"""
|
||||
Telemetry decorator for MCP for Unity tools
|
||||
"""
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import logging
|
||||
import time
|
||||
from typing import Callable, Any
|
||||
|
||||
from telemetry import record_resource_usage, record_tool_usage, record_milestone, MilestoneType
|
||||
|
||||
_log = logging.getLogger("unity-mcp-telemetry")
|
||||
_decorator_log_count = 0
|
||||
|
||||
|
||||
def telemetry_tool(tool_name: str):
|
||||
"""Decorator to add telemetry tracking to MCP tools"""
|
||||
def decorator(func: Callable) -> Callable:
|
||||
@functools.wraps(func)
|
||||
def _sync_wrapper(*args, **kwargs) -> Any:
|
||||
start_time = time.time()
|
||||
success = False
|
||||
error = None
|
||||
# Extract sub-action (e.g., 'get_hierarchy') from bound args when available
|
||||
sub_action = None
|
||||
try:
|
||||
sig = inspect.signature(func)
|
||||
bound = sig.bind_partial(*args, **kwargs)
|
||||
bound.apply_defaults()
|
||||
sub_action = bound.arguments.get("action")
|
||||
except Exception:
|
||||
sub_action = None
|
||||
try:
|
||||
global _decorator_log_count
|
||||
if _decorator_log_count < 10:
|
||||
_log.info(f"telemetry_decorator sync: tool={tool_name}")
|
||||
_decorator_log_count += 1
|
||||
result = func(*args, **kwargs)
|
||||
success = True
|
||||
action_val = sub_action or kwargs.get("action")
|
||||
try:
|
||||
if tool_name == "manage_script" and action_val == "create":
|
||||
record_milestone(MilestoneType.FIRST_SCRIPT_CREATION)
|
||||
elif tool_name.startswith("manage_scene"):
|
||||
record_milestone(
|
||||
MilestoneType.FIRST_SCENE_MODIFICATION)
|
||||
record_milestone(MilestoneType.FIRST_TOOL_USAGE)
|
||||
except Exception:
|
||||
_log.debug("milestone emit failed", exc_info=True)
|
||||
return result
|
||||
except Exception as e:
|
||||
error = str(e)
|
||||
raise
|
||||
finally:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
try:
|
||||
record_tool_usage(tool_name, success,
|
||||
duration_ms, error, sub_action=sub_action)
|
||||
except Exception:
|
||||
_log.debug("record_tool_usage failed", exc_info=True)
|
||||
|
||||
@functools.wraps(func)
|
||||
async def _async_wrapper(*args, **kwargs) -> Any:
|
||||
start_time = time.time()
|
||||
success = False
|
||||
error = None
|
||||
# Extract sub-action (e.g., 'get_hierarchy') from bound args when available
|
||||
sub_action = None
|
||||
try:
|
||||
sig = inspect.signature(func)
|
||||
bound = sig.bind_partial(*args, **kwargs)
|
||||
bound.apply_defaults()
|
||||
sub_action = bound.arguments.get("action")
|
||||
except Exception:
|
||||
sub_action = None
|
||||
try:
|
||||
global _decorator_log_count
|
||||
if _decorator_log_count < 10:
|
||||
_log.info(f"telemetry_decorator async: tool={tool_name}")
|
||||
_decorator_log_count += 1
|
||||
result = await func(*args, **kwargs)
|
||||
success = True
|
||||
action_val = sub_action or kwargs.get("action")
|
||||
try:
|
||||
if tool_name == "manage_script" and action_val == "create":
|
||||
record_milestone(MilestoneType.FIRST_SCRIPT_CREATION)
|
||||
elif tool_name.startswith("manage_scene"):
|
||||
record_milestone(
|
||||
MilestoneType.FIRST_SCENE_MODIFICATION)
|
||||
record_milestone(MilestoneType.FIRST_TOOL_USAGE)
|
||||
except Exception:
|
||||
_log.debug("milestone emit failed", exc_info=True)
|
||||
return result
|
||||
except Exception as e:
|
||||
error = str(e)
|
||||
raise
|
||||
finally:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
try:
|
||||
record_tool_usage(tool_name, success,
|
||||
duration_ms, error, sub_action=sub_action)
|
||||
except Exception:
|
||||
_log.debug("record_tool_usage failed", exc_info=True)
|
||||
|
||||
return _async_wrapper if inspect.iscoroutinefunction(func) else _sync_wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def telemetry_resource(resource_name: str):
|
||||
"""Decorator to add telemetry tracking to MCP resources"""
|
||||
def decorator(func: Callable) -> Callable:
|
||||
@functools.wraps(func)
|
||||
def _sync_wrapper(*args, **kwargs) -> Any:
|
||||
start_time = time.time()
|
||||
success = False
|
||||
error = None
|
||||
try:
|
||||
global _decorator_log_count
|
||||
if _decorator_log_count < 10:
|
||||
_log.info(
|
||||
f"telemetry_decorator sync: resource={resource_name}")
|
||||
_decorator_log_count += 1
|
||||
result = func(*args, **kwargs)
|
||||
success = True
|
||||
return result
|
||||
except Exception as e:
|
||||
error = str(e)
|
||||
raise
|
||||
finally:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
try:
|
||||
record_resource_usage(resource_name, success,
|
||||
duration_ms, error)
|
||||
except Exception:
|
||||
_log.debug("record_resource_usage failed", exc_info=True)
|
||||
|
||||
@functools.wraps(func)
|
||||
async def _async_wrapper(*args, **kwargs) -> Any:
|
||||
start_time = time.time()
|
||||
success = False
|
||||
error = None
|
||||
try:
|
||||
global _decorator_log_count
|
||||
if _decorator_log_count < 10:
|
||||
_log.info(
|
||||
f"telemetry_decorator async: resource={resource_name}")
|
||||
_decorator_log_count += 1
|
||||
result = await func(*args, **kwargs)
|
||||
success = True
|
||||
return result
|
||||
except Exception as e:
|
||||
error = str(e)
|
||||
raise
|
||||
finally:
|
||||
duration_ms = (time.time() - start_time) * 1000
|
||||
try:
|
||||
record_resource_usage(resource_name, success,
|
||||
duration_ms, error)
|
||||
except Exception:
|
||||
_log.debug("record_resource_usage failed", exc_info=True)
|
||||
|
||||
return _async_wrapper if inspect.iscoroutinefunction(func) else _sync_wrapper
|
||||
return decorator
|
||||
|
|
@ -0,0 +1,161 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script for MCP for Unity Telemetry System
|
||||
Run this to verify telemetry is working correctly
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Add src to Python path for imports
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
|
||||
|
||||
def test_telemetry_basic():
|
||||
"""Test basic telemetry functionality"""
|
||||
# Avoid stdout noise in tests
|
||||
|
||||
try:
|
||||
from telemetry import (
|
||||
get_telemetry, record_telemetry, record_milestone,
|
||||
RecordType, MilestoneType, is_telemetry_enabled
|
||||
)
|
||||
pass
|
||||
except ImportError as e:
|
||||
# Silent failure path for tests
|
||||
return False
|
||||
|
||||
# Test telemetry enabled status
|
||||
_ = is_telemetry_enabled()
|
||||
|
||||
# Test basic record
|
||||
try:
|
||||
record_telemetry(RecordType.VERSION, {
|
||||
"version": "3.0.2",
|
||||
"test_run": True
|
||||
})
|
||||
pass
|
||||
except Exception as e:
|
||||
# Silent failure path for tests
|
||||
return False
|
||||
|
||||
# Test milestone recording
|
||||
try:
|
||||
is_first = record_milestone(MilestoneType.FIRST_STARTUP, {
|
||||
"test_mode": True
|
||||
})
|
||||
_ = is_first
|
||||
except Exception as e:
|
||||
# Silent failure path for tests
|
||||
return False
|
||||
|
||||
# Test telemetry collector
|
||||
try:
|
||||
collector = get_telemetry()
|
||||
_ = collector
|
||||
except Exception as e:
|
||||
# Silent failure path for tests
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def test_telemetry_disabled():
|
||||
"""Test telemetry with disabled state"""
|
||||
# Silent for tests
|
||||
|
||||
# Set environment variable to disable telemetry
|
||||
os.environ["DISABLE_TELEMETRY"] = "true"
|
||||
|
||||
# Re-import to get fresh config
|
||||
import importlib
|
||||
import telemetry
|
||||
importlib.reload(telemetry)
|
||||
|
||||
from telemetry import is_telemetry_enabled, record_telemetry, RecordType
|
||||
|
||||
_ = is_telemetry_enabled()
|
||||
|
||||
if not is_telemetry_enabled():
|
||||
pass
|
||||
|
||||
# Test that records are ignored when disabled
|
||||
record_telemetry(RecordType.USAGE, {"test": "should_be_ignored"})
|
||||
pass
|
||||
|
||||
return True
|
||||
else:
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
def test_data_storage():
|
||||
"""Test data storage functionality"""
|
||||
# Silent for tests
|
||||
|
||||
try:
|
||||
from telemetry import get_telemetry
|
||||
|
||||
collector = get_telemetry()
|
||||
data_dir = collector.config.data_dir
|
||||
|
||||
_ = (data_dir, collector.config.uuid_file,
|
||||
collector.config.milestones_file)
|
||||
|
||||
# Check if files exist
|
||||
if collector.config.uuid_file.exists():
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
|
||||
if collector.config.milestones_file.exists():
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
# Silent failure path for tests
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all telemetry tests"""
|
||||
# Silent runner for CI
|
||||
|
||||
tests = [
|
||||
test_telemetry_basic,
|
||||
test_data_storage,
|
||||
test_telemetry_disabled,
|
||||
]
|
||||
|
||||
passed = 0
|
||||
failed = 0
|
||||
|
||||
for test in tests:
|
||||
try:
|
||||
if test():
|
||||
passed += 1
|
||||
pass
|
||||
else:
|
||||
failed += 1
|
||||
pass
|
||||
except Exception as e:
|
||||
failed += 1
|
||||
pass
|
||||
|
||||
_ = (passed, failed)
|
||||
|
||||
if failed == 0:
|
||||
pass
|
||||
return True
|
||||
else:
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
"""
|
||||
MCP Tools package - Auto-discovers and registers all tools in this directory.
|
||||
"""
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from telemetry_decorator import telemetry_tool
|
||||
|
||||
from registry import get_registered_tools
|
||||
from module_discovery import discover_modules
|
||||
|
||||
logger = logging.getLogger("mcp-for-unity-server")
|
||||
|
||||
# Export decorator for easy imports within tools
|
||||
__all__ = ['register_all_tools']
|
||||
|
||||
|
||||
def register_all_tools(mcp: FastMCP):
|
||||
"""
|
||||
Auto-discover and register all tools in the tools/ directory.
|
||||
|
||||
Any .py file in this directory or subdirectories with @mcp_for_unity_tool decorated
|
||||
functions will be automatically registered.
|
||||
"""
|
||||
logger.info("Auto-discovering MCP for Unity Server tools...")
|
||||
# Dynamic import of all modules in this directory
|
||||
tools_dir = Path(__file__).parent
|
||||
|
||||
# Discover and import all modules
|
||||
list(discover_modules(tools_dir, __package__))
|
||||
|
||||
tools = get_registered_tools()
|
||||
|
||||
if not tools:
|
||||
logger.warning("No MCP tools registered!")
|
||||
return
|
||||
|
||||
for tool_info in tools:
|
||||
func = tool_info['func']
|
||||
tool_name = tool_info['name']
|
||||
description = tool_info['description']
|
||||
kwargs = tool_info['kwargs']
|
||||
|
||||
# Apply the @mcp.tool decorator and telemetry
|
||||
wrapped = telemetry_tool(tool_name)(func)
|
||||
wrapped = mcp.tool(
|
||||
name=tool_name, description=description, **kwargs)(wrapped)
|
||||
tool_info['func'] = wrapped
|
||||
logger.debug(f"Registered tool: {tool_name} - {description}")
|
||||
|
||||
logger.info(f"Registered {len(tools)} MCP tools")
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
"""
|
||||
Defines the execute_menu_item tool for executing and reading Unity Editor menu items.
|
||||
"""
|
||||
from typing import Annotated, Any
|
||||
|
||||
from fastmcp import Context
|
||||
|
||||
from models import MCPResponse
|
||||
from registry import mcp_for_unity_tool
|
||||
from unity_connection import async_send_command_with_retry
|
||||
|
||||
|
||||
@mcp_for_unity_tool(
|
||||
description="Execute a Unity menu item by path."
|
||||
)
|
||||
async def execute_menu_item(
|
||||
ctx: Context,
|
||||
menu_path: Annotated[str,
|
||||
"Menu path for 'execute' or 'exists' (e.g., 'File/Save Project')"] | None = None,
|
||||
) -> MCPResponse:
|
||||
await ctx.info(f"Processing execute_menu_item: {menu_path}")
|
||||
params_dict: dict[str, Any] = {"menuPath": menu_path}
|
||||
params_dict = {k: v for k, v in params_dict.items() if v is not None}
|
||||
result = await async_send_command_with_retry("execute_menu_item", params_dict)
|
||||
return MCPResponse(**result) if isinstance(result, dict) else result
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
"""
|
||||
Defines the manage_asset tool for interacting with Unity assets.
|
||||
"""
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Annotated, Any, Literal
|
||||
|
||||
from fastmcp import Context
|
||||
from registry import mcp_for_unity_tool
|
||||
from unity_connection import async_send_command_with_retry
|
||||
|
||||
|
||||
@mcp_for_unity_tool(
|
||||
description="Performs asset operations (import, create, modify, delete, etc.) in Unity."
|
||||
)
|
||||
async def manage_asset(
|
||||
ctx: Context,
|
||||
action: Annotated[Literal["import", "create", "modify", "delete", "duplicate", "move", "rename", "search", "get_info", "create_folder", "get_components"], "Perform CRUD operations on assets."],
|
||||
path: Annotated[str, "Asset path (e.g., 'Materials/MyMaterial.mat') or search scope."],
|
||||
asset_type: Annotated[str,
|
||||
"Asset type (e.g., 'Material', 'Folder') - required for 'create'."] | None = None,
|
||||
properties: Annotated[dict[str, Any],
|
||||
"Dictionary of properties for 'create'/'modify'."] | None = None,
|
||||
destination: Annotated[str,
|
||||
"Target path for 'duplicate'/'move'."] | None = None,
|
||||
generate_preview: Annotated[bool,
|
||||
"Generate a preview/thumbnail for the asset when supported."] = False,
|
||||
search_pattern: Annotated[str,
|
||||
"Search pattern (e.g., '*.prefab')."] | None = None,
|
||||
filter_type: Annotated[str, "Filter type for search"] | None = None,
|
||||
filter_date_after: Annotated[str,
|
||||
"Date after which to filter"] | None = None,
|
||||
page_size: Annotated[int | float | str, "Page size for pagination"] | None = None,
|
||||
page_number: Annotated[int | float | str, "Page number for pagination"] | None = None
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing manage_asset: {action}")
|
||||
# Coerce 'properties' from JSON string to dict for client compatibility
|
||||
if isinstance(properties, str):
|
||||
try:
|
||||
properties = json.loads(properties)
|
||||
ctx.info("manage_asset: coerced properties from JSON string to dict")
|
||||
except Exception as e:
|
||||
ctx.warn(f"manage_asset: failed to parse properties JSON string: {e}")
|
||||
# Leave properties as-is; Unity side may handle defaults
|
||||
# Ensure properties is a dict if None
|
||||
if properties is None:
|
||||
properties = {}
|
||||
|
||||
# Coerce numeric inputs defensively
|
||||
def _coerce_int(value, default=None):
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
if isinstance(value, bool):
|
||||
return default
|
||||
if isinstance(value, int):
|
||||
return int(value)
|
||||
s = str(value).strip()
|
||||
if s.lower() in ("", "none", "null"):
|
||||
return default
|
||||
return int(float(s))
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
page_size = _coerce_int(page_size)
|
||||
page_number = _coerce_int(page_number)
|
||||
|
||||
# Prepare parameters for the C# handler
|
||||
params_dict = {
|
||||
"action": action.lower(),
|
||||
"path": path,
|
||||
"assetType": asset_type,
|
||||
"properties": properties,
|
||||
"destination": destination,
|
||||
"generatePreview": generate_preview,
|
||||
"searchPattern": search_pattern,
|
||||
"filterType": filter_type,
|
||||
"filterDateAfter": filter_date_after,
|
||||
"pageSize": page_size,
|
||||
"pageNumber": page_number
|
||||
}
|
||||
|
||||
# Remove None values to avoid sending unnecessary nulls
|
||||
params_dict = {k: v for k, v in params_dict.items() if v is not None}
|
||||
|
||||
# Get the current asyncio event loop
|
||||
loop = asyncio.get_running_loop()
|
||||
|
||||
# Use centralized async retry helper to avoid blocking the event loop
|
||||
result = await async_send_command_with_retry("manage_asset", params_dict, loop=loop)
|
||||
# Return the result obtained from Unity
|
||||
return result if isinstance(result, dict) else {"success": False, "message": str(result)}
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
from typing import Annotated, Any, Literal
|
||||
|
||||
from fastmcp import Context
|
||||
from registry import mcp_for_unity_tool
|
||||
from telemetry import is_telemetry_enabled, record_tool_usage
|
||||
from unity_connection import send_command_with_retry
|
||||
|
||||
|
||||
@mcp_for_unity_tool(
|
||||
description="Controls and queries the Unity editor's state and settings. Tip: pass booleans as true/false; if your client only sends strings, 'true'/'false' are accepted."
|
||||
)
|
||||
def manage_editor(
|
||||
ctx: Context,
|
||||
action: Annotated[Literal["telemetry_status", "telemetry_ping", "play", "pause", "stop", "get_state", "get_project_root", "get_windows",
|
||||
"get_active_tool", "get_selection", "get_prefab_stage", "set_active_tool", "add_tag", "remove_tag", "get_tags", "add_layer", "remove_layer", "get_layers"], "Get and update the Unity Editor state."],
|
||||
wait_for_completion: Annotated[bool | str,
|
||||
"Optional. If True, waits for certain actions (accepts true/false or 'true'/'false')"] | None = None,
|
||||
tool_name: Annotated[str,
|
||||
"Tool name when setting active tool"] | None = None,
|
||||
tag_name: Annotated[str,
|
||||
"Tag name when adding and removing tags"] | None = None,
|
||||
layer_name: Annotated[str,
|
||||
"Layer name when adding and removing layers"] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing manage_editor: {action}")
|
||||
|
||||
# Coerce boolean parameters defensively to tolerate 'true'/'false' strings
|
||||
def _coerce_bool(value, default=None):
|
||||
if value is None:
|
||||
return default
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
v = value.strip().lower()
|
||||
if v in ("true", "1", "yes", "on"): # common truthy strings
|
||||
return True
|
||||
if v in ("false", "0", "no", "off"):
|
||||
return False
|
||||
return bool(value)
|
||||
|
||||
wait_for_completion = _coerce_bool(wait_for_completion)
|
||||
|
||||
try:
|
||||
# Diagnostics: quick telemetry checks
|
||||
if action == "telemetry_status":
|
||||
return {"success": True, "telemetry_enabled": is_telemetry_enabled()}
|
||||
|
||||
if action == "telemetry_ping":
|
||||
record_tool_usage("diagnostic_ping", True, 1.0, None)
|
||||
return {"success": True, "message": "telemetry ping queued"}
|
||||
# Prepare parameters, removing None values
|
||||
params = {
|
||||
"action": action,
|
||||
"waitForCompletion": wait_for_completion,
|
||||
"toolName": tool_name, # Corrected parameter name to match C#
|
||||
"tagName": tag_name, # Pass tag name
|
||||
"layerName": layer_name, # Pass layer name
|
||||
# Add other parameters based on the action being performed
|
||||
# "width": width,
|
||||
# "height": height,
|
||||
# etc.
|
||||
}
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
# Send command using centralized retry helper
|
||||
response = send_command_with_retry("manage_editor", params)
|
||||
|
||||
# Preserve structured failure data; unwrap success into a friendlier shape
|
||||
if isinstance(response, dict) and response.get("success"):
|
||||
return {"success": True, "message": response.get("message", "Editor operation successful."), "data": response.get("data")}
|
||||
return response if isinstance(response, dict) else {"success": False, "message": str(response)}
|
||||
|
||||
except Exception as e:
|
||||
return {"success": False, "message": f"Python error managing editor: {str(e)}"}
|
||||
|
|
@ -0,0 +1,208 @@
|
|||
import json
|
||||
from typing import Annotated, Any, Literal
|
||||
|
||||
from fastmcp import Context
|
||||
from registry import mcp_for_unity_tool
|
||||
from unity_connection import send_command_with_retry
|
||||
|
||||
|
||||
@mcp_for_unity_tool(
|
||||
description="Manage GameObjects. For booleans, send true/false; if your client only sends strings, 'true'/'false' are accepted. Vectors may be [x,y,z] or a string like '[x,y,z]'. For 'get_components', the `data` field contains a dictionary of component names and their serialized properties. For 'get_component', specify 'component_name' to retrieve only that component's serialized data."
|
||||
)
|
||||
def manage_gameobject(
|
||||
ctx: Context,
|
||||
action: Annotated[Literal["create", "modify", "delete", "find", "add_component", "remove_component", "set_component_property", "get_components", "get_component"], "Perform CRUD operations on GameObjects and components."],
|
||||
target: Annotated[str,
|
||||
"GameObject identifier by name or path for modify/delete/component actions"] | None = None,
|
||||
search_method: Annotated[Literal["by_id", "by_name", "by_path", "by_tag", "by_layer", "by_component"],
|
||||
"How to find objects. Used with 'find' and some 'target' lookups."] | None = None,
|
||||
name: Annotated[str,
|
||||
"GameObject name for 'create' (initial name) and 'modify' (rename) actions ONLY. For 'find' action, use 'search_term' instead."] | None = None,
|
||||
tag: Annotated[str,
|
||||
"Tag name - used for both 'create' (initial tag) and 'modify' (change tag)"] | None = None,
|
||||
parent: Annotated[str,
|
||||
"Parent GameObject reference - used for both 'create' (initial parent) and 'modify' (change parent)"] | None = None,
|
||||
position: Annotated[list[float] | str,
|
||||
"Position - [x,y,z] or string '[x,y,z]' for client compatibility"] | None = None,
|
||||
rotation: Annotated[list[float] | str,
|
||||
"Rotation - [x,y,z] or string '[x,y,z]' for client compatibility"] | None = None,
|
||||
scale: Annotated[list[float] | str,
|
||||
"Scale - [x,y,z] or string '[x,y,z]' for client compatibility"] | None = None,
|
||||
components_to_add: Annotated[list[str],
|
||||
"List of component names to add"] | None = None,
|
||||
primitive_type: Annotated[str,
|
||||
"Primitive type for 'create' action"] | None = None,
|
||||
save_as_prefab: Annotated[bool | str,
|
||||
"If True, saves the created GameObject as a prefab (accepts true/false or 'true'/'false')"] | None = None,
|
||||
prefab_path: Annotated[str, "Path for prefab creation"] | None = None,
|
||||
prefab_folder: Annotated[str,
|
||||
"Folder for prefab creation"] | None = None,
|
||||
# --- Parameters for 'modify' ---
|
||||
set_active: Annotated[bool | str,
|
||||
"If True, sets the GameObject active (accepts true/false or 'true'/'false')"] | None = None,
|
||||
layer: Annotated[str, "Layer name"] | None = None,
|
||||
components_to_remove: Annotated[list[str],
|
||||
"List of component names to remove"] | None = None,
|
||||
component_properties: Annotated[dict[str, dict[str, Any]] | str,
|
||||
"""Dictionary of component names to their properties to set. For example:
|
||||
`{"MyScript": {"otherObject": {"find": "Player", "method": "by_name"}}}` assigns GameObject
|
||||
`{"MyScript": {"playerHealth": {"find": "Player", "component": "HealthComponent"}}}` assigns Component
|
||||
Example set nested property:
|
||||
- Access shared material: `{"MeshRenderer": {"sharedMaterial.color": [1, 0, 0, 1]}}`"""] | None = None,
|
||||
# --- Parameters for 'find' ---
|
||||
search_term: Annotated[str,
|
||||
"Search term for 'find' action ONLY. Use this (not 'name') when searching for GameObjects."] | None = None,
|
||||
find_all: Annotated[bool | str,
|
||||
"If True, finds all GameObjects matching the search term (accepts true/false or 'true'/'false')"] | None = None,
|
||||
search_in_children: Annotated[bool | str,
|
||||
"If True, searches in children of the GameObject (accepts true/false or 'true'/'false')"] | None = None,
|
||||
search_inactive: Annotated[bool | str,
|
||||
"If True, searches inactive GameObjects (accepts true/false or 'true'/'false')"] | None = None,
|
||||
# -- Component Management Arguments --
|
||||
component_name: Annotated[str,
|
||||
"Component name for 'add_component' and 'remove_component' actions"] | None = None,
|
||||
# Controls whether serialization of private [SerializeField] fields is included
|
||||
includeNonPublicSerialized: Annotated[bool | str,
|
||||
"Controls whether serialization of private [SerializeField] fields is included (accepts true/false or 'true'/'false')"] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing manage_gameobject: {action}")
|
||||
|
||||
# Coercers to tolerate stringified booleans and vectors
|
||||
def _coerce_bool(value, default=None):
|
||||
if value is None:
|
||||
return default
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
v = value.strip().lower()
|
||||
if v in ("true", "1", "yes", "on"):
|
||||
return True
|
||||
if v in ("false", "0", "no", "off"):
|
||||
return False
|
||||
return bool(value)
|
||||
|
||||
def _coerce_vec(value, default=None):
|
||||
if value is None:
|
||||
return default
|
||||
import math
|
||||
def _to_vec3(parts):
|
||||
try:
|
||||
vec = [float(parts[0]), float(parts[1]), float(parts[2])]
|
||||
except (ValueError, TypeError):
|
||||
return default
|
||||
return vec if all(math.isfinite(n) for n in vec) else default
|
||||
if isinstance(value, list) and len(value) == 3:
|
||||
return _to_vec3(value)
|
||||
if isinstance(value, str):
|
||||
s = value.strip()
|
||||
# minimal tolerant parse for "[x,y,z]" or "x,y,z"
|
||||
if s.startswith("[") and s.endswith("]"):
|
||||
s = s[1:-1]
|
||||
# support "x,y,z" and "x y z"
|
||||
parts = [p.strip() for p in (s.split(",") if "," in s else s.split())]
|
||||
if len(parts) == 3:
|
||||
return _to_vec3(parts)
|
||||
return default
|
||||
|
||||
position = _coerce_vec(position, default=position)
|
||||
rotation = _coerce_vec(rotation, default=rotation)
|
||||
scale = _coerce_vec(scale, default=scale)
|
||||
save_as_prefab = _coerce_bool(save_as_prefab)
|
||||
set_active = _coerce_bool(set_active)
|
||||
find_all = _coerce_bool(find_all)
|
||||
search_in_children = _coerce_bool(search_in_children)
|
||||
search_inactive = _coerce_bool(search_inactive)
|
||||
includeNonPublicSerialized = _coerce_bool(includeNonPublicSerialized)
|
||||
|
||||
# Coerce 'component_properties' from JSON string to dict for client compatibility
|
||||
if isinstance(component_properties, str):
|
||||
try:
|
||||
component_properties = json.loads(component_properties)
|
||||
ctx.info("manage_gameobject: coerced component_properties from JSON string to dict")
|
||||
except json.JSONDecodeError as e:
|
||||
return {"success": False, "message": f"Invalid JSON in component_properties: {e}"}
|
||||
# Ensure final type is a dict (object) if provided
|
||||
if component_properties is not None and not isinstance(component_properties, dict):
|
||||
return {"success": False, "message": "component_properties must be a JSON object (dict)."}
|
||||
try:
|
||||
# Map tag to search_term when search_method is by_tag for backward compatibility
|
||||
if action == "find" and search_method == "by_tag" and tag is not None and search_term is None:
|
||||
search_term = tag
|
||||
|
||||
# Validate parameter usage to prevent silent failures
|
||||
if action == "find":
|
||||
if name is not None:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "For 'find' action, use 'search_term' parameter, not 'name'. Remove 'name' parameter. Example: search_term='Player', search_method='by_name'"
|
||||
}
|
||||
if search_term is None:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "For 'find' action, 'search_term' parameter is required. Use search_term (not 'name') to specify what to find."
|
||||
}
|
||||
|
||||
if action in ["create", "modify"]:
|
||||
if search_term is not None:
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"For '{action}' action, use 'name' parameter, not 'search_term'."
|
||||
}
|
||||
|
||||
# Prepare parameters, removing None values
|
||||
params = {
|
||||
"action": action,
|
||||
"target": target,
|
||||
"searchMethod": search_method,
|
||||
"name": name,
|
||||
"tag": tag,
|
||||
"parent": parent,
|
||||
"position": position,
|
||||
"rotation": rotation,
|
||||
"scale": scale,
|
||||
"componentsToAdd": components_to_add,
|
||||
"primitiveType": primitive_type,
|
||||
"saveAsPrefab": save_as_prefab,
|
||||
"prefabPath": prefab_path,
|
||||
"prefabFolder": prefab_folder,
|
||||
"setActive": set_active,
|
||||
"layer": layer,
|
||||
"componentsToRemove": components_to_remove,
|
||||
"componentProperties": component_properties,
|
||||
"searchTerm": search_term,
|
||||
"findAll": find_all,
|
||||
"searchInChildren": search_in_children,
|
||||
"searchInactive": search_inactive,
|
||||
"componentName": component_name,
|
||||
"includeNonPublicSerialized": includeNonPublicSerialized
|
||||
}
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
# --- Handle Prefab Path Logic ---
|
||||
# Check if 'saveAsPrefab' is explicitly True in params
|
||||
if action == "create" and params.get("saveAsPrefab"):
|
||||
if "prefabPath" not in params:
|
||||
if "name" not in params or not params["name"]:
|
||||
return {"success": False, "message": "Cannot create default prefab path: 'name' parameter is missing."}
|
||||
# Use the provided prefab_folder (which has a default) and the name to construct the path
|
||||
constructed_path = f"{prefab_folder}/{params['name']}.prefab"
|
||||
# Ensure clean path separators (Unity prefers '/')
|
||||
params["prefabPath"] = constructed_path.replace("\\", "/")
|
||||
elif not params["prefabPath"].lower().endswith(".prefab"):
|
||||
return {"success": False, "message": f"Invalid prefab_path: '{params['prefabPath']}' must end with .prefab"}
|
||||
# Ensure prefabFolder itself isn't sent if prefabPath was constructed or provided
|
||||
# The C# side only needs the final prefabPath
|
||||
params.pop("prefabFolder", None)
|
||||
# --------------------------------
|
||||
|
||||
# Use centralized retry helper
|
||||
response = send_command_with_retry("manage_gameobject", params)
|
||||
|
||||
# Check if the response indicates success
|
||||
# If the response is not successful, raise an exception with the error message
|
||||
if isinstance(response, dict) and response.get("success"):
|
||||
return {"success": True, "message": response.get("message", "GameObject operation successful."), "data": response.get("data")}
|
||||
return response if isinstance(response, dict) else {"success": False, "message": str(response)}
|
||||
|
||||
except Exception as e:
|
||||
return {"success": False, "message": f"Python error managing GameObject: {str(e)}"}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
from typing import Annotated, Any, Literal
|
||||
|
||||
from fastmcp import Context
|
||||
from registry import mcp_for_unity_tool
|
||||
from unity_connection import send_command_with_retry
|
||||
|
||||
|
||||
@mcp_for_unity_tool(
|
||||
description="Bridge for prefab management commands (stage control and creation)."
|
||||
)
|
||||
def manage_prefabs(
|
||||
ctx: Context,
|
||||
action: Annotated[Literal[
|
||||
"open_stage",
|
||||
"close_stage",
|
||||
"save_open_stage",
|
||||
"create_from_gameobject",
|
||||
], "Manage prefabs (stage control and creation)."],
|
||||
prefab_path: Annotated[str,
|
||||
"Prefab asset path relative to Assets e.g. Assets/Prefabs/favorite.prefab"] | None = None,
|
||||
mode: Annotated[str,
|
||||
"Optional prefab stage mode (only 'InIsolation' is currently supported)"] | None = None,
|
||||
save_before_close: Annotated[bool,
|
||||
"When true, `close_stage` will save the prefab before exiting the stage."] | None = None,
|
||||
target: Annotated[str,
|
||||
"Scene GameObject name required for create_from_gameobject"] | None = None,
|
||||
allow_overwrite: Annotated[bool,
|
||||
"Allow replacing an existing prefab at the same path"] | None = None,
|
||||
search_inactive: Annotated[bool,
|
||||
"Include inactive objects when resolving the target name"] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing manage_prefabs: {action}")
|
||||
try:
|
||||
params: dict[str, Any] = {"action": action}
|
||||
|
||||
if prefab_path:
|
||||
params["prefabPath"] = prefab_path
|
||||
if mode:
|
||||
params["mode"] = mode
|
||||
if save_before_close is not None:
|
||||
params["saveBeforeClose"] = bool(save_before_close)
|
||||
if target:
|
||||
params["target"] = target
|
||||
if allow_overwrite is not None:
|
||||
params["allowOverwrite"] = bool(allow_overwrite)
|
||||
if search_inactive is not None:
|
||||
params["searchInactive"] = bool(search_inactive)
|
||||
response = send_command_with_retry("manage_prefabs", params)
|
||||
|
||||
if isinstance(response, dict) and response.get("success"):
|
||||
return {
|
||||
"success": True,
|
||||
"message": response.get("message", "Prefab operation successful."),
|
||||
"data": response.get("data"),
|
||||
}
|
||||
return response if isinstance(response, dict) else {"success": False, "message": str(response)}
|
||||
except Exception as exc:
|
||||
return {"success": False, "message": f"Python error managing prefabs: {exc}"}
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
from typing import Annotated, Literal, Any
|
||||
|
||||
from fastmcp import Context
|
||||
from registry import mcp_for_unity_tool
|
||||
from unity_connection import send_command_with_retry
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description="Manage Unity scenes. Tip: For broad client compatibility, pass build_index as a quoted string (e.g., '0').")
|
||||
def manage_scene(
|
||||
ctx: Context,
|
||||
action: Annotated[Literal["create", "load", "save", "get_hierarchy", "get_active", "get_build_settings"], "Perform CRUD operations on Unity scenes."],
|
||||
name: Annotated[str,
|
||||
"Scene name. Not required get_active/get_build_settings"] | None = None,
|
||||
path: Annotated[str,
|
||||
"Asset path for scene operations (default: 'Assets/')"] | None = None,
|
||||
build_index: Annotated[int | str,
|
||||
"Build index for load/build settings actions (accepts int or string, e.g., 0 or '0')"] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing manage_scene: {action}")
|
||||
try:
|
||||
# Coerce numeric inputs defensively
|
||||
def _coerce_int(value, default=None):
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
if isinstance(value, bool):
|
||||
return default
|
||||
if isinstance(value, int):
|
||||
return int(value)
|
||||
s = str(value).strip()
|
||||
if s.lower() in ("", "none", "null"):
|
||||
return default
|
||||
return int(float(s))
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
coerced_build_index = _coerce_int(build_index, default=None)
|
||||
|
||||
params = {"action": action}
|
||||
if name:
|
||||
params["name"] = name
|
||||
if path:
|
||||
params["path"] = path
|
||||
if coerced_build_index is not None:
|
||||
params["buildIndex"] = coerced_build_index
|
||||
|
||||
# Use centralized retry helper
|
||||
response = send_command_with_retry("manage_scene", params)
|
||||
|
||||
# Preserve structured failure data; unwrap success into a friendlier shape
|
||||
if isinstance(response, dict) and response.get("success"):
|
||||
return {"success": True, "message": response.get("message", "Scene operation successful."), "data": response.get("data")}
|
||||
return response if isinstance(response, dict) else {"success": False, "message": str(response)}
|
||||
|
||||
except Exception as e:
|
||||
return {"success": False, "message": f"Python error managing scene: {str(e)}"}
|
||||
|
|
@ -0,0 +1,552 @@
|
|||
import base64
|
||||
import os
|
||||
from typing import Annotated, Any, Literal
|
||||
from urllib.parse import urlparse, unquote
|
||||
|
||||
from fastmcp import FastMCP, Context
|
||||
|
||||
from registry import mcp_for_unity_tool
|
||||
import unity_connection
|
||||
|
||||
|
||||
def _split_uri(uri: str) -> tuple[str, str]:
|
||||
"""Split an incoming URI or path into (name, directory) suitable for Unity.
|
||||
|
||||
Rules:
|
||||
- unity://path/Assets/... → keep as Assets-relative (after decode/normalize)
|
||||
- file://... → percent-decode, normalize, strip host and leading slashes,
|
||||
then, if any 'Assets' segment exists, return path relative to that 'Assets' root.
|
||||
Otherwise, fall back to original name/dir behavior.
|
||||
- plain paths → decode/normalize separators; if they contain an 'Assets' segment,
|
||||
return relative to 'Assets'.
|
||||
"""
|
||||
raw_path: str
|
||||
if uri.startswith("unity://path/"):
|
||||
raw_path = uri[len("unity://path/"):]
|
||||
elif uri.startswith("file://"):
|
||||
parsed = urlparse(uri)
|
||||
host = (parsed.netloc or "").strip()
|
||||
p = parsed.path or ""
|
||||
# UNC: file://server/share/... -> //server/share/...
|
||||
if host and host.lower() != "localhost":
|
||||
p = f"//{host}{p}"
|
||||
# Use percent-decoded path, preserving leading slashes
|
||||
raw_path = unquote(p)
|
||||
else:
|
||||
raw_path = uri
|
||||
|
||||
# Percent-decode any residual encodings and normalize separators
|
||||
raw_path = unquote(raw_path).replace("\\", "/")
|
||||
# Strip leading slash only for Windows drive-letter forms like "/C:/..."
|
||||
if os.name == "nt" and len(raw_path) >= 3 and raw_path[0] == "/" and raw_path[2] == ":":
|
||||
raw_path = raw_path[1:]
|
||||
|
||||
# Normalize path (collapse ../, ./)
|
||||
norm = os.path.normpath(raw_path).replace("\\", "/")
|
||||
|
||||
# If an 'Assets' segment exists, compute path relative to it (case-insensitive)
|
||||
parts = [p for p in norm.split("/") if p not in ("", ".")]
|
||||
idx = next((i for i, seg in enumerate(parts)
|
||||
if seg.lower() == "assets"), None)
|
||||
assets_rel = "/".join(parts[idx:]) if idx is not None else None
|
||||
|
||||
effective_path = assets_rel if assets_rel else norm
|
||||
# For POSIX absolute paths outside Assets, drop the leading '/'
|
||||
# to return a clean relative-like directory (e.g., '/tmp' -> 'tmp').
|
||||
if effective_path.startswith("/"):
|
||||
effective_path = effective_path[1:]
|
||||
|
||||
name = os.path.splitext(os.path.basename(effective_path))[0]
|
||||
directory = os.path.dirname(effective_path)
|
||||
return name, directory
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description=(
|
||||
"""Apply small text edits to a C# script identified by URI.
|
||||
IMPORTANT: This tool replaces EXACT character positions. Always verify content at target lines/columns BEFORE editing!
|
||||
RECOMMENDED WORKFLOW:
|
||||
1. First call resources/read with start_line/line_count to verify exact content
|
||||
2. Count columns carefully (or use find_in_file to locate patterns)
|
||||
3. Apply your edit with precise coordinates
|
||||
4. Consider script_apply_edits with anchors for safer pattern-based replacements
|
||||
Notes:
|
||||
- For method/class operations, use script_apply_edits (safer, structured edits)
|
||||
- For pattern-based replacements, consider anchor operations in script_apply_edits
|
||||
- Lines, columns are 1-indexed
|
||||
- Tabs count as 1 column"""
|
||||
))
|
||||
def apply_text_edits(
|
||||
ctx: Context,
|
||||
uri: Annotated[str, "URI of the script to edit under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."],
|
||||
edits: Annotated[list[dict[str, Any]], "List of edits to apply to the script, i.e. a list of {startLine,startCol,endLine,endCol,newText} (1-indexed!)"],
|
||||
precondition_sha256: Annotated[str,
|
||||
"Optional SHA256 of the script to edit, used to prevent concurrent edits"] | None = None,
|
||||
strict: Annotated[bool,
|
||||
"Optional strict flag, used to enforce strict mode"] | None = None,
|
||||
options: Annotated[dict[str, Any],
|
||||
"Optional options, used to pass additional options to the script editor"] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing apply_text_edits: {uri}")
|
||||
name, directory = _split_uri(uri)
|
||||
|
||||
# Normalize common aliases/misuses for resilience:
|
||||
# - Accept LSP-style range objects: {range:{start:{line,character}, end:{...}}, newText|text}
|
||||
# - Accept index ranges as a 2-int array: {range:[startIndex,endIndex], text}
|
||||
# If normalization is required, read current contents to map indices -> 1-based line/col.
|
||||
def _needs_normalization(arr: list[dict[str, Any]]) -> bool:
|
||||
for e in arr or []:
|
||||
if ("startLine" not in e) or ("startCol" not in e) or ("endLine" not in e) or ("endCol" not in e) or ("newText" not in e and "text" in e):
|
||||
return True
|
||||
return False
|
||||
|
||||
normalized_edits: list[dict[str, Any]] = []
|
||||
warnings: list[str] = []
|
||||
if _needs_normalization(edits):
|
||||
# Read file to support index->line/col conversion when needed
|
||||
read_resp = unity_connection.send_command_with_retry("manage_script", {
|
||||
"action": "read",
|
||||
"name": name,
|
||||
"path": directory,
|
||||
})
|
||||
if not (isinstance(read_resp, dict) and read_resp.get("success")):
|
||||
return read_resp if isinstance(read_resp, dict) else {"success": False, "message": str(read_resp)}
|
||||
data = read_resp.get("data", {})
|
||||
contents = data.get("contents")
|
||||
if not contents and data.get("contentsEncoded"):
|
||||
try:
|
||||
contents = base64.b64decode(data.get("encodedContents", "").encode(
|
||||
"utf-8")).decode("utf-8", "replace")
|
||||
except Exception:
|
||||
contents = contents or ""
|
||||
|
||||
# Helper to map 0-based character index to 1-based line/col
|
||||
def line_col_from_index(idx: int) -> tuple[int, int]:
|
||||
if idx <= 0:
|
||||
return 1, 1
|
||||
# Count lines up to idx and position within line
|
||||
nl_count = contents.count("\n", 0, idx)
|
||||
line = nl_count + 1
|
||||
last_nl = contents.rfind("\n", 0, idx)
|
||||
col = (idx - (last_nl + 1)) + 1 if last_nl >= 0 else idx + 1
|
||||
return line, col
|
||||
|
||||
for e in edits or []:
|
||||
e2 = dict(e)
|
||||
# Map text->newText if needed
|
||||
if "newText" not in e2 and "text" in e2:
|
||||
e2["newText"] = e2.pop("text")
|
||||
|
||||
if "startLine" in e2 and "startCol" in e2 and "endLine" in e2 and "endCol" in e2:
|
||||
# Guard: explicit fields must be 1-based.
|
||||
zero_based = False
|
||||
for k in ("startLine", "startCol", "endLine", "endCol"):
|
||||
try:
|
||||
if int(e2.get(k, 1)) < 1:
|
||||
zero_based = True
|
||||
except Exception:
|
||||
pass
|
||||
if zero_based:
|
||||
if strict:
|
||||
return {"success": False, "code": "zero_based_explicit_fields", "message": "Explicit line/col fields are 1-based; received zero-based.", "data": {"normalizedEdits": normalized_edits}}
|
||||
# Normalize by clamping to 1 and warn
|
||||
for k in ("startLine", "startCol", "endLine", "endCol"):
|
||||
try:
|
||||
if int(e2.get(k, 1)) < 1:
|
||||
e2[k] = 1
|
||||
except Exception:
|
||||
pass
|
||||
warnings.append(
|
||||
"zero_based_explicit_fields_normalized")
|
||||
normalized_edits.append(e2)
|
||||
continue
|
||||
|
||||
rng = e2.get("range")
|
||||
if isinstance(rng, dict):
|
||||
# LSP style: 0-based
|
||||
s = rng.get("start", {})
|
||||
t = rng.get("end", {})
|
||||
e2["startLine"] = int(s.get("line", 0)) + 1
|
||||
e2["startCol"] = int(s.get("character", 0)) + 1
|
||||
e2["endLine"] = int(t.get("line", 0)) + 1
|
||||
e2["endCol"] = int(t.get("character", 0)) + 1
|
||||
e2.pop("range", None)
|
||||
normalized_edits.append(e2)
|
||||
continue
|
||||
if isinstance(rng, (list, tuple)) and len(rng) == 2:
|
||||
try:
|
||||
a = int(rng[0])
|
||||
b = int(rng[1])
|
||||
if b < a:
|
||||
a, b = b, a
|
||||
sl, sc = line_col_from_index(a)
|
||||
el, ec = line_col_from_index(b)
|
||||
e2["startLine"] = sl
|
||||
e2["startCol"] = sc
|
||||
e2["endLine"] = el
|
||||
e2["endCol"] = ec
|
||||
e2.pop("range", None)
|
||||
normalized_edits.append(e2)
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
# Could not normalize this edit
|
||||
return {
|
||||
"success": False,
|
||||
"code": "missing_field",
|
||||
"message": "apply_text_edits requires startLine/startCol/endLine/endCol/newText or a normalizable 'range'",
|
||||
"data": {"expected": ["startLine", "startCol", "endLine", "endCol", "newText"], "got": e}
|
||||
}
|
||||
else:
|
||||
# Even when edits appear already in explicit form, validate 1-based coordinates.
|
||||
normalized_edits = []
|
||||
for e in edits or []:
|
||||
e2 = dict(e)
|
||||
has_all = all(k in e2 for k in (
|
||||
"startLine", "startCol", "endLine", "endCol"))
|
||||
if has_all:
|
||||
zero_based = False
|
||||
for k in ("startLine", "startCol", "endLine", "endCol"):
|
||||
try:
|
||||
if int(e2.get(k, 1)) < 1:
|
||||
zero_based = True
|
||||
except Exception:
|
||||
pass
|
||||
if zero_based:
|
||||
if strict:
|
||||
return {"success": False, "code": "zero_based_explicit_fields", "message": "Explicit line/col fields are 1-based; received zero-based.", "data": {"normalizedEdits": [e2]}}
|
||||
for k in ("startLine", "startCol", "endLine", "endCol"):
|
||||
try:
|
||||
if int(e2.get(k, 1)) < 1:
|
||||
e2[k] = 1
|
||||
except Exception:
|
||||
pass
|
||||
if "zero_based_explicit_fields_normalized" not in warnings:
|
||||
warnings.append(
|
||||
"zero_based_explicit_fields_normalized")
|
||||
normalized_edits.append(e2)
|
||||
|
||||
# Preflight: detect overlapping ranges among normalized line/col spans
|
||||
def _pos_tuple(e: dict[str, Any], key_start: bool) -> tuple[int, int]:
|
||||
return (
|
||||
int(e.get("startLine", 1)) if key_start else int(
|
||||
e.get("endLine", 1)),
|
||||
int(e.get("startCol", 1)) if key_start else int(
|
||||
e.get("endCol", 1)),
|
||||
)
|
||||
|
||||
def _le(a: tuple[int, int], b: tuple[int, int]) -> bool:
|
||||
return a[0] < b[0] or (a[0] == b[0] and a[1] <= b[1])
|
||||
|
||||
# Consider only true replace ranges (non-zero length). Pure insertions (zero-width) don't overlap.
|
||||
spans = []
|
||||
for e in normalized_edits or []:
|
||||
try:
|
||||
s = _pos_tuple(e, True)
|
||||
t = _pos_tuple(e, False)
|
||||
if s != t:
|
||||
spans.append((s, t))
|
||||
except Exception:
|
||||
# If coordinates missing or invalid, let the server validate later
|
||||
pass
|
||||
|
||||
if spans:
|
||||
spans_sorted = sorted(spans, key=lambda p: (p[0][0], p[0][1]))
|
||||
for i in range(1, len(spans_sorted)):
|
||||
prev_end = spans_sorted[i-1][1]
|
||||
curr_start = spans_sorted[i][0]
|
||||
# Overlap if prev_end > curr_start (strict), i.e., not prev_end <= curr_start
|
||||
if not _le(prev_end, curr_start):
|
||||
conflicts = [{
|
||||
"startA": {"line": spans_sorted[i-1][0][0], "col": spans_sorted[i-1][0][1]},
|
||||
"endA": {"line": spans_sorted[i-1][1][0], "col": spans_sorted[i-1][1][1]},
|
||||
"startB": {"line": spans_sorted[i][0][0], "col": spans_sorted[i][0][1]},
|
||||
"endB": {"line": spans_sorted[i][1][0], "col": spans_sorted[i][1][1]},
|
||||
}]
|
||||
return {"success": False, "code": "overlap", "data": {"status": "overlap", "conflicts": conflicts}}
|
||||
|
||||
# Note: Do not auto-compute precondition if missing; callers should supply it
|
||||
# via mcp__unity__get_sha or a prior read. This avoids hidden extra calls and
|
||||
# preserves existing call-count expectations in clients/tests.
|
||||
|
||||
# Default options: for multi-span batches, prefer atomic to avoid mid-apply imbalance
|
||||
opts: dict[str, Any] = dict(options or {})
|
||||
try:
|
||||
if len(normalized_edits) > 1 and "applyMode" not in opts:
|
||||
opts["applyMode"] = "atomic"
|
||||
except Exception:
|
||||
pass
|
||||
# Support optional debug preview for span-by-span simulation without write
|
||||
if opts.get("debug_preview"):
|
||||
try:
|
||||
import difflib
|
||||
# Apply locally to preview final result
|
||||
lines = []
|
||||
# Build an indexable original from a read if we normalized from read; otherwise skip
|
||||
prev = ""
|
||||
# We cannot guarantee file contents here without a read; return normalized spans only
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Preview only (no write)",
|
||||
"data": {
|
||||
"normalizedEdits": normalized_edits,
|
||||
"preview": True
|
||||
}
|
||||
}
|
||||
except Exception as e:
|
||||
return {"success": False, "code": "preview_failed", "message": f"debug_preview failed: {e}", "data": {"normalizedEdits": normalized_edits}}
|
||||
|
||||
params = {
|
||||
"action": "apply_text_edits",
|
||||
"name": name,
|
||||
"path": directory,
|
||||
"edits": normalized_edits,
|
||||
"precondition_sha256": precondition_sha256,
|
||||
"options": opts,
|
||||
}
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
resp = unity_connection.send_command_with_retry("manage_script", params)
|
||||
if isinstance(resp, dict):
|
||||
data = resp.setdefault("data", {})
|
||||
data.setdefault("normalizedEdits", normalized_edits)
|
||||
if warnings:
|
||||
data.setdefault("warnings", warnings)
|
||||
if resp.get("success") and (options or {}).get("force_sentinel_reload"):
|
||||
# Optional: flip sentinel via menu if explicitly requested
|
||||
try:
|
||||
import threading
|
||||
import time
|
||||
import json
|
||||
import glob
|
||||
import os
|
||||
|
||||
def _latest_status() -> dict | None:
|
||||
try:
|
||||
files = sorted(glob.glob(os.path.expanduser(
|
||||
"~/.unity-mcp/unity-mcp-status-*.json")), key=os.path.getmtime, reverse=True)
|
||||
if not files:
|
||||
return None
|
||||
with open(files[0], "r") as f:
|
||||
return json.loads(f.read())
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
def _flip_async():
|
||||
try:
|
||||
time.sleep(0.1)
|
||||
st = _latest_status()
|
||||
if st and st.get("reloading"):
|
||||
return
|
||||
unity_connection.send_command_with_retry(
|
||||
"execute_menu_item",
|
||||
{"menuPath": "MCP/Flip Reload Sentinel"},
|
||||
max_retries=0,
|
||||
retry_ms=0,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
threading.Thread(target=_flip_async, daemon=True).start()
|
||||
except Exception:
|
||||
pass
|
||||
return resp
|
||||
return resp
|
||||
return {"success": False, "message": str(resp)}
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description=("Create a new C# script at the given project path."))
|
||||
def create_script(
|
||||
ctx: Context,
|
||||
path: Annotated[str, "Path under Assets/ to create the script at, e.g., 'Assets/Scripts/My.cs'"],
|
||||
contents: Annotated[str, "Contents of the script to create. Note, this is Base64 encoded over transport."],
|
||||
script_type: Annotated[str, "Script type (e.g., 'C#')"] | None = None,
|
||||
namespace: Annotated[str, "Namespace for the script"] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing create_script: {path}")
|
||||
name = os.path.splitext(os.path.basename(path))[0]
|
||||
directory = os.path.dirname(path)
|
||||
# Local validation to avoid round-trips on obviously bad input
|
||||
norm_path = os.path.normpath(
|
||||
(path or "").replace("\\", "/")).replace("\\", "/")
|
||||
if not directory or directory.split("/")[0].lower() != "assets":
|
||||
return {"success": False, "code": "path_outside_assets", "message": f"path must be under 'Assets/'; got '{path}'."}
|
||||
if ".." in norm_path.split("/") or norm_path.startswith("/"):
|
||||
return {"success": False, "code": "bad_path", "message": "path must not contain traversal or be absolute."}
|
||||
if not name:
|
||||
return {"success": False, "code": "bad_path", "message": "path must include a script file name."}
|
||||
if not norm_path.lower().endswith(".cs"):
|
||||
return {"success": False, "code": "bad_extension", "message": "script file must end with .cs."}
|
||||
params: dict[str, Any] = {
|
||||
"action": "create",
|
||||
"name": name,
|
||||
"path": directory,
|
||||
"namespace": namespace,
|
||||
"scriptType": script_type,
|
||||
}
|
||||
if contents:
|
||||
params["encodedContents"] = base64.b64encode(
|
||||
contents.encode("utf-8")).decode("utf-8")
|
||||
params["contentsEncoded"] = True
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
resp = unity_connection.send_command_with_retry("manage_script", params)
|
||||
return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)}
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description=("Delete a C# script by URI or Assets-relative path."))
|
||||
def delete_script(
|
||||
ctx: Context,
|
||||
uri: Annotated[str, "URI of the script to delete under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."]
|
||||
) -> dict[str, Any]:
|
||||
"""Delete a C# script by URI."""
|
||||
ctx.info(f"Processing delete_script: {uri}")
|
||||
name, directory = _split_uri(uri)
|
||||
if not directory or directory.split("/")[0].lower() != "assets":
|
||||
return {"success": False, "code": "path_outside_assets", "message": "URI must resolve under 'Assets/'."}
|
||||
params = {"action": "delete", "name": name, "path": directory}
|
||||
resp = unity_connection.send_command_with_retry("manage_script", params)
|
||||
return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)}
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description=("Validate a C# script and return diagnostics."))
|
||||
def validate_script(
|
||||
ctx: Context,
|
||||
uri: Annotated[str, "URI of the script to validate under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."],
|
||||
level: Annotated[Literal['basic', 'standard'],
|
||||
"Validation level"] = "basic",
|
||||
include_diagnostics: Annotated[bool,
|
||||
"Include full diagnostics and summary"] = False
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing validate_script: {uri}")
|
||||
name, directory = _split_uri(uri)
|
||||
if not directory or directory.split("/")[0].lower() != "assets":
|
||||
return {"success": False, "code": "path_outside_assets", "message": "URI must resolve under 'Assets/'."}
|
||||
if level not in ("basic", "standard"):
|
||||
return {"success": False, "code": "bad_level", "message": "level must be 'basic' or 'standard'."}
|
||||
params = {
|
||||
"action": "validate",
|
||||
"name": name,
|
||||
"path": directory,
|
||||
"level": level,
|
||||
}
|
||||
resp = unity_connection.send_command_with_retry("manage_script", params)
|
||||
if isinstance(resp, dict) and resp.get("success"):
|
||||
diags = resp.get("data", {}).get("diagnostics", []) or []
|
||||
warnings = sum(1 for d in diags if str(
|
||||
d.get("severity", "")).lower() == "warning")
|
||||
errors = sum(1 for d in diags if str(
|
||||
d.get("severity", "")).lower() in ("error", "fatal"))
|
||||
if include_diagnostics:
|
||||
return {"success": True, "data": {"diagnostics": diags, "summary": {"warnings": warnings, "errors": errors}}}
|
||||
return {"success": True, "data": {"warnings": warnings, "errors": errors}}
|
||||
return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)}
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description=("Compatibility router for legacy script operations. Prefer apply_text_edits (ranges) or script_apply_edits (structured) for edits."))
|
||||
def manage_script(
|
||||
ctx: Context,
|
||||
action: Annotated[Literal['create', 'read', 'delete'], "Perform CRUD operations on C# scripts."],
|
||||
name: Annotated[str, "Script name (no .cs extension)", "Name of the script to create"],
|
||||
path: Annotated[str, "Asset path (default: 'Assets/')", "Path under Assets/ to create the script at, e.g., 'Assets/Scripts/My.cs'"],
|
||||
contents: Annotated[str, "Contents of the script to create",
|
||||
"C# code for 'create'/'update'"] | None = None,
|
||||
script_type: Annotated[str, "Script type (e.g., 'C#')",
|
||||
"Type hint (e.g., 'MonoBehaviour')"] | None = None,
|
||||
namespace: Annotated[str, "Namespace for the script"] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing manage_script: {action}")
|
||||
try:
|
||||
# Prepare parameters for Unity
|
||||
params = {
|
||||
"action": action,
|
||||
"name": name,
|
||||
"path": path,
|
||||
"namespace": namespace,
|
||||
"scriptType": script_type,
|
||||
}
|
||||
|
||||
# Base64 encode the contents if they exist to avoid JSON escaping issues
|
||||
if contents:
|
||||
if action == 'create':
|
||||
params["encodedContents"] = base64.b64encode(
|
||||
contents.encode('utf-8')).decode('utf-8')
|
||||
params["contentsEncoded"] = True
|
||||
else:
|
||||
params["contents"] = contents
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
response = unity_connection.send_command_with_retry("manage_script", params)
|
||||
|
||||
if isinstance(response, dict):
|
||||
if response.get("success"):
|
||||
if response.get("data", {}).get("contentsEncoded"):
|
||||
decoded_contents = base64.b64decode(
|
||||
response["data"]["encodedContents"]).decode('utf-8')
|
||||
response["data"]["contents"] = decoded_contents
|
||||
del response["data"]["encodedContents"]
|
||||
del response["data"]["contentsEncoded"]
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": response.get("message", "Operation successful."),
|
||||
"data": response.get("data"),
|
||||
}
|
||||
return response
|
||||
|
||||
return {"success": False, "message": str(response)}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Python error managing script: {str(e)}",
|
||||
}
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description=(
|
||||
"""Get manage_script capabilities (supported ops, limits, and guards).
|
||||
Returns:
|
||||
- ops: list of supported structured ops
|
||||
- text_ops: list of supported text ops
|
||||
- max_edit_payload_bytes: server edit payload cap
|
||||
- guards: header/using guard enabled flag"""
|
||||
))
|
||||
def manage_script_capabilities(ctx: Context) -> dict[str, Any]:
|
||||
ctx.info("Processing manage_script_capabilities")
|
||||
try:
|
||||
# Keep in sync with server/Editor ManageScript implementation
|
||||
ops = [
|
||||
"replace_class", "delete_class", "replace_method", "delete_method",
|
||||
"insert_method", "anchor_insert", "anchor_delete", "anchor_replace"
|
||||
]
|
||||
text_ops = ["replace_range", "regex_replace", "prepend", "append"]
|
||||
# Match ManageScript.MaxEditPayloadBytes if exposed; hardcode a sensible default fallback
|
||||
max_edit_payload_bytes = 256 * 1024
|
||||
guards = {"using_guard": True}
|
||||
extras = {"get_sha": True}
|
||||
return {"success": True, "data": {
|
||||
"ops": ops,
|
||||
"text_ops": text_ops,
|
||||
"max_edit_payload_bytes": max_edit_payload_bytes,
|
||||
"guards": guards,
|
||||
"extras": extras,
|
||||
}}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": f"capabilities error: {e}"}
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description="Get SHA256 and basic metadata for a Unity C# script without returning file contents")
|
||||
def get_sha(
|
||||
ctx: Context,
|
||||
uri: Annotated[str, "URI of the script to edit under Assets/ directory, unity://path/Assets/... or file://... or Assets/..."]
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing get_sha: {uri}")
|
||||
try:
|
||||
name, directory = _split_uri(uri)
|
||||
params = {"action": "get_sha", "name": name, "path": directory}
|
||||
resp = unity_connection.send_command_with_retry("manage_script", params)
|
||||
if isinstance(resp, dict) and resp.get("success"):
|
||||
data = resp.get("data", {})
|
||||
minimal = {"sha256": data.get(
|
||||
"sha256"), "lengthBytes": data.get("lengthBytes")}
|
||||
return {"success": True, "data": minimal}
|
||||
return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)}
|
||||
except Exception as e:
|
||||
return {"success": False, "message": f"get_sha error: {e}"}
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
import base64
|
||||
from typing import Annotated, Any, Literal
|
||||
|
||||
from fastmcp import Context
|
||||
from registry import mcp_for_unity_tool
|
||||
from unity_connection import send_command_with_retry
|
||||
|
||||
|
||||
@mcp_for_unity_tool(
|
||||
description="Manages shader scripts in Unity (create, read, update, delete)."
|
||||
)
|
||||
def manage_shader(
|
||||
ctx: Context,
|
||||
action: Annotated[Literal['create', 'read', 'update', 'delete'], "Perform CRUD operations on shader scripts."],
|
||||
name: Annotated[str, "Shader name (no .cs extension)"],
|
||||
path: Annotated[str, "Asset path (default: \"Assets/\")"],
|
||||
contents: Annotated[str,
|
||||
"Shader code for 'create'/'update'"] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing manage_shader: {action}")
|
||||
try:
|
||||
# Prepare parameters for Unity
|
||||
params = {
|
||||
"action": action,
|
||||
"name": name,
|
||||
"path": path,
|
||||
}
|
||||
|
||||
# Base64 encode the contents if they exist to avoid JSON escaping issues
|
||||
if contents is not None:
|
||||
if action in ['create', 'update']:
|
||||
# Encode content for safer transmission
|
||||
params["encodedContents"] = base64.b64encode(
|
||||
contents.encode('utf-8')).decode('utf-8')
|
||||
params["contentsEncoded"] = True
|
||||
else:
|
||||
params["contents"] = contents
|
||||
|
||||
# Remove None values so they don't get sent as null
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
# Send command via centralized retry helper
|
||||
response = send_command_with_retry("manage_shader", params)
|
||||
|
||||
# Process response from Unity
|
||||
if isinstance(response, dict) and response.get("success"):
|
||||
# If the response contains base64 encoded content, decode it
|
||||
if response.get("data", {}).get("contentsEncoded"):
|
||||
decoded_contents = base64.b64decode(
|
||||
response["data"]["encodedContents"]).decode('utf-8')
|
||||
response["data"]["contents"] = decoded_contents
|
||||
del response["data"]["encodedContents"]
|
||||
del response["data"]["contentsEncoded"]
|
||||
|
||||
return {"success": True, "message": response.get("message", "Operation successful."), "data": response.get("data")}
|
||||
return response if isinstance(response, dict) else {"success": False, "message": str(response)}
|
||||
|
||||
except Exception as e:
|
||||
# Handle Python-side errors (e.g., connection issues)
|
||||
return {"success": False, "message": f"Python error managing shader: {str(e)}"}
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
"""
|
||||
Defines the read_console tool for accessing Unity Editor console messages.
|
||||
"""
|
||||
from typing import Annotated, Any, Literal
|
||||
|
||||
from fastmcp import Context
|
||||
from registry import mcp_for_unity_tool
|
||||
from unity_connection import send_command_with_retry
|
||||
|
||||
|
||||
@mcp_for_unity_tool(
|
||||
description="Gets messages from or clears the Unity Editor console. Note: For maximum client compatibility, pass count as a quoted string (e.g., '5')."
|
||||
)
|
||||
def read_console(
|
||||
ctx: Context,
|
||||
action: Annotated[Literal['get', 'clear'], "Get or clear the Unity Editor console."] | None = None,
|
||||
types: Annotated[list[Literal['error', 'warning',
|
||||
'log', 'all']], "Message types to get"] | None = None,
|
||||
count: Annotated[int | str, "Max messages to return (accepts int or string, e.g., 5 or '5')"] | None = None,
|
||||
filter_text: Annotated[str, "Text filter for messages"] | None = None,
|
||||
since_timestamp: Annotated[str,
|
||||
"Get messages after this timestamp (ISO 8601)"] | None = None,
|
||||
format: Annotated[Literal['plain', 'detailed',
|
||||
'json'], "Output format"] | None = None,
|
||||
include_stacktrace: Annotated[bool | str,
|
||||
"Include stack traces in output (accepts true/false or 'true'/'false')"] | None = None
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing read_console: {action}")
|
||||
# Set defaults if values are None
|
||||
action = action if action is not None else 'get'
|
||||
types = types if types is not None else ['error', 'warning', 'log']
|
||||
format = format if format is not None else 'detailed'
|
||||
# Coerce booleans defensively (strings like 'true'/'false')
|
||||
def _coerce_bool(value, default=None):
|
||||
if value is None:
|
||||
return default
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
v = value.strip().lower()
|
||||
if v in ("true", "1", "yes", "on"):
|
||||
return True
|
||||
if v in ("false", "0", "no", "off"):
|
||||
return False
|
||||
return bool(value)
|
||||
|
||||
include_stacktrace = _coerce_bool(include_stacktrace, True)
|
||||
|
||||
# Normalize action if it's a string
|
||||
if isinstance(action, str):
|
||||
action = action.lower()
|
||||
|
||||
# Coerce count defensively (string/float -> int)
|
||||
def _coerce_int(value, default=None):
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
if isinstance(value, bool):
|
||||
return default
|
||||
if isinstance(value, int):
|
||||
return int(value)
|
||||
s = str(value).strip()
|
||||
if s.lower() in ("", "none", "null"):
|
||||
return default
|
||||
return int(float(s))
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
count = _coerce_int(count)
|
||||
|
||||
# Prepare parameters for the C# handler
|
||||
params_dict = {
|
||||
"action": action,
|
||||
"types": types,
|
||||
"count": count,
|
||||
"filterText": filter_text,
|
||||
"sinceTimestamp": since_timestamp,
|
||||
"format": format.lower() if isinstance(format, str) else format,
|
||||
"includeStacktrace": include_stacktrace
|
||||
}
|
||||
|
||||
# Remove None values unless it's 'count' (as None might mean 'all')
|
||||
params_dict = {k: v for k, v in params_dict.items()
|
||||
if v is not None or k == 'count'}
|
||||
|
||||
# Add count back if it was None, explicitly sending null might be important for C# logic
|
||||
if 'count' not in params_dict:
|
||||
params_dict['count'] = None
|
||||
|
||||
# Use centralized retry helper
|
||||
resp = send_command_with_retry("read_console", params_dict)
|
||||
if isinstance(resp, dict) and resp.get("success") and not include_stacktrace:
|
||||
# Strip stacktrace fields from returned lines if present
|
||||
try:
|
||||
lines = resp.get("data", {}).get("lines", [])
|
||||
for line in lines:
|
||||
if isinstance(line, dict) and "stacktrace" in line:
|
||||
line.pop("stacktrace", None)
|
||||
except Exception:
|
||||
pass
|
||||
return resp if isinstance(resp, dict) else {"success": False, "message": str(resp)}
|
||||
|
|
@ -0,0 +1,406 @@
|
|||
"""
|
||||
Resource wrapper tools so clients that do not expose MCP resources primitives
|
||||
can still list and read files via normal tools. These call into the same
|
||||
safe path logic (re-implemented here to avoid importing server.py).
|
||||
"""
|
||||
import fnmatch
|
||||
import hashlib
|
||||
import os
|
||||
from pathlib import Path
|
||||
import re
|
||||
from typing import Annotated, Any
|
||||
from urllib.parse import urlparse, unquote
|
||||
|
||||
from fastmcp import Context
|
||||
|
||||
from registry import mcp_for_unity_tool
|
||||
from unity_connection import send_command_with_retry
|
||||
|
||||
|
||||
def _coerce_int(value: Any, default: int | None = None, minimum: int | None = None) -> int | None:
|
||||
"""Safely coerce various inputs (str/float/etc.) to an int.
|
||||
Returns default on failure; clamps to minimum when provided.
|
||||
"""
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
# Avoid treating booleans as ints implicitly
|
||||
if isinstance(value, bool):
|
||||
return default
|
||||
if isinstance(value, int):
|
||||
result = int(value)
|
||||
else:
|
||||
s = str(value).strip()
|
||||
if s.lower() in ("", "none", "null"):
|
||||
return default
|
||||
# Allow "10.0" or similar inputs
|
||||
result = int(float(s))
|
||||
if minimum is not None and result < minimum:
|
||||
return minimum
|
||||
return result
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
|
||||
def _resolve_project_root(override: str | None) -> Path:
|
||||
# 1) Explicit override
|
||||
if override:
|
||||
pr = Path(override).expanduser().resolve()
|
||||
if (pr / "Assets").exists():
|
||||
return pr
|
||||
# 2) Environment
|
||||
env = os.environ.get("UNITY_PROJECT_ROOT")
|
||||
if env:
|
||||
env_path = Path(env).expanduser()
|
||||
# If UNITY_PROJECT_ROOT is relative, resolve against repo root (cwd's repo) instead of src dir
|
||||
pr = (Path.cwd(
|
||||
) / env_path).resolve() if not env_path.is_absolute() else env_path.resolve()
|
||||
if (pr / "Assets").exists():
|
||||
return pr
|
||||
# 3) Ask Unity via manage_editor.get_project_root
|
||||
try:
|
||||
resp = send_command_with_retry(
|
||||
"manage_editor", {"action": "get_project_root"})
|
||||
if isinstance(resp, dict) and resp.get("success"):
|
||||
pr = Path(resp.get("data", {}).get(
|
||||
"projectRoot", "")).expanduser().resolve()
|
||||
if pr and (pr / "Assets").exists():
|
||||
return pr
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 4) Walk up from CWD to find a Unity project (Assets + ProjectSettings)
|
||||
cur = Path.cwd().resolve()
|
||||
for _ in range(6):
|
||||
if (cur / "Assets").exists() and (cur / "ProjectSettings").exists():
|
||||
return cur
|
||||
if cur.parent == cur:
|
||||
break
|
||||
cur = cur.parent
|
||||
# 5) Search downwards (shallow) from repo root for first folder with Assets + ProjectSettings
|
||||
try:
|
||||
import os as _os
|
||||
root = Path.cwd().resolve()
|
||||
max_depth = 3
|
||||
for dirpath, dirnames, _ in _os.walk(root):
|
||||
rel = Path(dirpath).resolve()
|
||||
try:
|
||||
depth = len(rel.relative_to(root).parts)
|
||||
except Exception:
|
||||
# Unrelated mount/permission edge; skip deeper traversal
|
||||
dirnames[:] = []
|
||||
continue
|
||||
if depth > max_depth:
|
||||
# Prune deeper traversal
|
||||
dirnames[:] = []
|
||||
continue
|
||||
if (rel / "Assets").exists() and (rel / "ProjectSettings").exists():
|
||||
return rel
|
||||
except Exception:
|
||||
pass
|
||||
# 6) Fallback: CWD
|
||||
return Path.cwd().resolve()
|
||||
|
||||
|
||||
def _resolve_safe_path_from_uri(uri: str, project: Path) -> Path | None:
|
||||
raw: str | None = None
|
||||
if uri.startswith("unity://path/"):
|
||||
raw = uri[len("unity://path/"):]
|
||||
elif uri.startswith("file://"):
|
||||
parsed = urlparse(uri)
|
||||
raw = unquote(parsed.path or "")
|
||||
# On Windows, urlparse('file:///C:/x') -> path='/C:/x'. Strip the leading slash for drive letters.
|
||||
try:
|
||||
import os as _os
|
||||
if _os.name == "nt" and raw.startswith("/") and re.match(r"^/[A-Za-z]:/", raw):
|
||||
raw = raw[1:]
|
||||
# UNC paths: file://server/share -> netloc='server', path='/share'. Treat as \\\\server/share
|
||||
if _os.name == "nt" and parsed.netloc:
|
||||
raw = f"//{parsed.netloc}{raw}"
|
||||
except Exception:
|
||||
pass
|
||||
elif uri.startswith("Assets/"):
|
||||
raw = uri
|
||||
if raw is None:
|
||||
return None
|
||||
# Normalize separators early
|
||||
raw = raw.replace("\\", "/")
|
||||
p = (project / raw).resolve()
|
||||
try:
|
||||
p.relative_to(project)
|
||||
except ValueError:
|
||||
return None
|
||||
return p
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description=("List project URIs (unity://path/...) under a folder (default: Assets). Only .cs files are returned by default; always appends unity://spec/script-edits.\n"))
|
||||
async def list_resources(
|
||||
ctx: Context,
|
||||
pattern: Annotated[str, "Glob, default is *.cs"] | None = "*.cs",
|
||||
under: Annotated[str,
|
||||
"Folder under project root, default is Assets"] = "Assets",
|
||||
limit: Annotated[int, "Page limit"] = 200,
|
||||
project_root: Annotated[str, "Project path"] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing list_resources: {pattern}")
|
||||
try:
|
||||
project = _resolve_project_root(project_root)
|
||||
base = (project / under).resolve()
|
||||
try:
|
||||
base.relative_to(project)
|
||||
except ValueError:
|
||||
return {"success": False, "error": "Base path must be under project root"}
|
||||
# Enforce listing only under Assets
|
||||
try:
|
||||
base.relative_to(project / "Assets")
|
||||
except ValueError:
|
||||
return {"success": False, "error": "Listing is restricted to Assets/"}
|
||||
|
||||
matches: list[str] = []
|
||||
limit_int = _coerce_int(limit, default=200, minimum=1)
|
||||
for p in base.rglob("*"):
|
||||
if not p.is_file():
|
||||
continue
|
||||
# Resolve symlinks and ensure the real path stays under project/Assets
|
||||
try:
|
||||
rp = p.resolve()
|
||||
rp.relative_to(project / "Assets")
|
||||
except Exception:
|
||||
continue
|
||||
# Enforce .cs extension regardless of provided pattern
|
||||
if p.suffix.lower() != ".cs":
|
||||
continue
|
||||
if pattern and not fnmatch.fnmatch(p.name, pattern):
|
||||
continue
|
||||
rel = p.relative_to(project).as_posix()
|
||||
matches.append(f"unity://path/{rel}")
|
||||
if len(matches) >= max(1, limit_int):
|
||||
break
|
||||
|
||||
# Always include the canonical spec resource so NL clients can discover it
|
||||
if "unity://spec/script-edits" not in matches:
|
||||
matches.append("unity://spec/script-edits")
|
||||
|
||||
return {"success": True, "data": {"uris": matches, "count": len(matches)}}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description=("Reads a resource by unity://path/... URI with optional slicing."))
|
||||
async def read_resource(
|
||||
ctx: Context,
|
||||
uri: Annotated[str, "The resource URI to read under Assets/"],
|
||||
start_line: Annotated[int | float | str,
|
||||
"The starting line number (0-based)"] | None = None,
|
||||
line_count: Annotated[int | float | str,
|
||||
"The number of lines to read"] | None = None,
|
||||
head_bytes: Annotated[int | float | str,
|
||||
"The number of bytes to read from the start of the file"] | None = None,
|
||||
tail_lines: Annotated[int | float | str,
|
||||
"The number of lines to read from the end of the file"] | None = None,
|
||||
project_root: Annotated[str,
|
||||
"The project root directory"] | None = None,
|
||||
request: Annotated[str, "The request ID"] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing read_resource: {uri}")
|
||||
try:
|
||||
# Serve the canonical spec directly when requested (allow bare or with scheme)
|
||||
if uri in ("unity://spec/script-edits", "spec/script-edits", "script-edits"):
|
||||
spec_json = (
|
||||
'{\n'
|
||||
' "name": "MCP for Unity - Script Edits v1",\n'
|
||||
' "target_tool": "script_apply_edits",\n'
|
||||
' "canonical_rules": {\n'
|
||||
' "always_use": ["op","className","methodName","replacement","afterMethodName","beforeMethodName"],\n'
|
||||
' "never_use": ["new_method","anchor_method","content","newText"],\n'
|
||||
' "defaults": {\n'
|
||||
' "className": "\u2190 server will default to \'name\' when omitted",\n'
|
||||
' "position": "end"\n'
|
||||
' }\n'
|
||||
' },\n'
|
||||
' "ops": [\n'
|
||||
' {"op":"replace_method","required":["className","methodName","replacement"],"optional":["returnType","parametersSignature","attributesContains"],"examples":[{"note":"match overload by signature","parametersSignature":"(int a, string b)"},{"note":"ensure attributes retained","attributesContains":"ContextMenu"}]},\n'
|
||||
' {"op":"insert_method","required":["className","replacement"],"position":{"enum":["start","end","after","before"],"after_requires":"afterMethodName","before_requires":"beforeMethodName"}},\n'
|
||||
' {"op":"delete_method","required":["className","methodName"]},\n'
|
||||
' {"op":"anchor_insert","required":["anchor","text"],"notes":"regex; position=before|after"}\n'
|
||||
' ],\n'
|
||||
' "apply_text_edits_recipe": {\n'
|
||||
' "step1_read": { "tool": "resources/read", "args": {"uri": "unity://path/Assets/Scripts/Interaction/SmartReach.cs"} },\n'
|
||||
' "step2_apply": {\n'
|
||||
' "tool": "manage_script",\n'
|
||||
' "args": {\n'
|
||||
' "action": "apply_text_edits",\n'
|
||||
' "name": "SmartReach", "path": "Assets/Scripts/Interaction",\n'
|
||||
' "edits": [{"startLine": 42, "startCol": 1, "endLine": 42, "endCol": 1, "newText": "[MyAttr]\\n"}],\n'
|
||||
' "precondition_sha256": "<sha-from-step1>",\n'
|
||||
' "options": {"refresh": "immediate", "validate": "standard"}\n'
|
||||
' }\n'
|
||||
' },\n'
|
||||
' "note": "newText is for apply_text_edits ranges only; use replacement in script_apply_edits ops."\n'
|
||||
' },\n'
|
||||
' "examples": [\n'
|
||||
' {\n'
|
||||
' "title": "Replace a method",\n'
|
||||
' "args": {\n'
|
||||
' "name": "SmartReach",\n'
|
||||
' "path": "Assets/Scripts/Interaction",\n'
|
||||
' "edits": [\n'
|
||||
' {"op":"replace_method","className":"SmartReach","methodName":"HasTarget","replacement":"public bool HasTarget() { return currentTarget != null; }"}\n'
|
||||
' ],\n'
|
||||
' "options": { "validate": "standard", "refresh": "immediate" }\n'
|
||||
' }\n'
|
||||
' },\n'
|
||||
' {\n'
|
||||
' "title": "Insert a method after another",\n'
|
||||
' "args": {\n'
|
||||
' "name": "SmartReach",\n'
|
||||
' "path": "Assets/Scripts/Interaction",\n'
|
||||
' "edits": [\n'
|
||||
' {"op":"insert_method","className":"SmartReach","replacement":"public void PrintSeries() { Debug.Log(seriesName); }","position":"after","afterMethodName":"GetCurrentTarget"}\n'
|
||||
' ]\n'
|
||||
' }\n'
|
||||
' }\n'
|
||||
' ]\n'
|
||||
'}\n'
|
||||
)
|
||||
sha = hashlib.sha256(spec_json.encode("utf-8")).hexdigest()
|
||||
return {"success": True, "data": {"text": spec_json, "metadata": {"sha256": sha}}}
|
||||
|
||||
project = _resolve_project_root(project_root)
|
||||
p = _resolve_safe_path_from_uri(uri, project)
|
||||
if not p or not p.exists() or not p.is_file():
|
||||
return {"success": False, "error": f"Resource not found: {uri}"}
|
||||
try:
|
||||
p.relative_to(project / "Assets")
|
||||
except ValueError:
|
||||
return {"success": False, "error": "Read restricted to Assets/"}
|
||||
# Natural-language convenience: request like "last 120 lines", "first 200 lines",
|
||||
# "show 40 lines around MethodName", etc.
|
||||
if request:
|
||||
req = request.strip().lower()
|
||||
m = re.search(r"last\s+(\d+)\s+lines", req)
|
||||
if m:
|
||||
tail_lines = int(m.group(1))
|
||||
m = re.search(r"first\s+(\d+)\s+lines", req)
|
||||
if m:
|
||||
start_line = 1
|
||||
line_count = int(m.group(1))
|
||||
m = re.search(r"first\s+(\d+)\s*bytes", req)
|
||||
if m:
|
||||
head_bytes = int(m.group(1))
|
||||
m = re.search(
|
||||
r"show\s+(\d+)\s+lines\s+around\s+([A-Za-z_][A-Za-z0-9_]*)", req)
|
||||
if m:
|
||||
window = int(m.group(1))
|
||||
method = m.group(2)
|
||||
# naive search for method header to get a line number
|
||||
text_all = p.read_text(encoding="utf-8")
|
||||
lines_all = text_all.splitlines()
|
||||
pat = re.compile(
|
||||
rf"^\s*(?:\[[^\]]+\]\s*)*(?:public|private|protected|internal|static|virtual|override|sealed|async|extern|unsafe|new|partial).*?\b{re.escape(method)}\s*\(", re.MULTILINE)
|
||||
hit_line = None
|
||||
for i, line in enumerate(lines_all, start=1):
|
||||
if pat.search(line):
|
||||
hit_line = i
|
||||
break
|
||||
if hit_line:
|
||||
half = max(1, window // 2)
|
||||
start_line = max(1, hit_line - half)
|
||||
line_count = window
|
||||
|
||||
# Coerce numeric inputs defensively (string/float -> int)
|
||||
start_line = _coerce_int(start_line)
|
||||
line_count = _coerce_int(line_count)
|
||||
head_bytes = _coerce_int(head_bytes, minimum=1)
|
||||
tail_lines = _coerce_int(tail_lines, minimum=1)
|
||||
|
||||
# Compute SHA over full file contents (metadata-only default)
|
||||
full_bytes = p.read_bytes()
|
||||
full_sha = hashlib.sha256(full_bytes).hexdigest()
|
||||
|
||||
# Selection only when explicitly requested via windowing args or request text hints
|
||||
selection_requested = bool(head_bytes or tail_lines or (
|
||||
start_line is not None and line_count is not None) or request)
|
||||
if selection_requested:
|
||||
# Mutually exclusive windowing options precedence:
|
||||
# 1) head_bytes, 2) tail_lines, 3) start_line+line_count, else full text
|
||||
if head_bytes and head_bytes > 0:
|
||||
raw = full_bytes[: head_bytes]
|
||||
text = raw.decode("utf-8", errors="replace")
|
||||
else:
|
||||
text = full_bytes.decode("utf-8", errors="replace")
|
||||
if tail_lines is not None and tail_lines > 0:
|
||||
lines = text.splitlines()
|
||||
n = max(0, tail_lines)
|
||||
text = "\n".join(lines[-n:])
|
||||
elif start_line is not None and line_count is not None and line_count >= 0:
|
||||
lines = text.splitlines()
|
||||
s = max(0, start_line - 1)
|
||||
e = min(len(lines), s + line_count)
|
||||
text = "\n".join(lines[s:e])
|
||||
return {"success": True, "data": {"text": text, "metadata": {"sha256": full_sha, "lengthBytes": len(full_bytes)}}}
|
||||
else:
|
||||
# Default: metadata only
|
||||
return {"success": True, "data": {"metadata": {"sha256": full_sha, "lengthBytes": len(full_bytes)}}}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description="Searches a file with a regex pattern and returns line numbers and excerpts.")
|
||||
async def find_in_file(
|
||||
ctx: Context,
|
||||
uri: Annotated[str, "The resource URI to search under Assets/ or file path form supported by read_resource"],
|
||||
pattern: Annotated[str, "The regex pattern to search for"],
|
||||
ignore_case: Annotated[bool | str, "Case-insensitive search (accepts true/false or 'true'/'false')"] | None = True,
|
||||
project_root: Annotated[str,
|
||||
"The project root directory"] | None = None,
|
||||
max_results: Annotated[int,
|
||||
"Cap results to avoid huge payloads"] = 200,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing find_in_file: {uri}")
|
||||
try:
|
||||
project = _resolve_project_root(project_root)
|
||||
p = _resolve_safe_path_from_uri(uri, project)
|
||||
if not p or not p.exists() or not p.is_file():
|
||||
return {"success": False, "error": f"Resource not found: {uri}"}
|
||||
|
||||
text = p.read_text(encoding="utf-8")
|
||||
# Tolerant boolean coercion for clients that stringify booleans
|
||||
def _coerce_bool(val, default=None):
|
||||
if val is None:
|
||||
return default
|
||||
if isinstance(val, bool):
|
||||
return val
|
||||
if isinstance(val, str):
|
||||
v = val.strip().lower()
|
||||
if v in ("true", "1", "yes", "on"):
|
||||
return True
|
||||
if v in ("false", "0", "no", "off"):
|
||||
return False
|
||||
return bool(val)
|
||||
ignore_case = _coerce_bool(ignore_case, default=True)
|
||||
flags = re.MULTILINE
|
||||
if ignore_case:
|
||||
flags |= re.IGNORECASE
|
||||
rx = re.compile(pattern, flags)
|
||||
|
||||
results = []
|
||||
max_results_int = _coerce_int(max_results, default=200, minimum=1)
|
||||
lines = text.splitlines()
|
||||
for i, line in enumerate(lines, start=1):
|
||||
m = rx.search(line)
|
||||
if m:
|
||||
start_col = m.start() + 1 # 1-based
|
||||
end_col = m.end() + 1 # 1-based, end exclusive
|
||||
results.append({
|
||||
"startLine": i,
|
||||
"startCol": start_col,
|
||||
"endLine": i,
|
||||
"endCol": end_col,
|
||||
})
|
||||
if max_results_int and len(results) >= max_results_int:
|
||||
break
|
||||
|
||||
return {"success": True, "data": {"matches": results, "count": len(results)}}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
"""Tool for executing Unity Test Runner suites."""
|
||||
from typing import Annotated, Literal, Any
|
||||
|
||||
from fastmcp import Context
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from models import MCPResponse
|
||||
from registry import mcp_for_unity_tool
|
||||
from unity_connection import async_send_command_with_retry
|
||||
|
||||
|
||||
class RunTestsSummary(BaseModel):
|
||||
total: int
|
||||
passed: int
|
||||
failed: int
|
||||
skipped: int
|
||||
durationSeconds: float
|
||||
resultState: str
|
||||
|
||||
|
||||
class RunTestsTestResult(BaseModel):
|
||||
name: str
|
||||
fullName: str
|
||||
state: str
|
||||
durationSeconds: float
|
||||
message: str | None = None
|
||||
stackTrace: str | None = None
|
||||
output: str | None = None
|
||||
|
||||
|
||||
class RunTestsResult(BaseModel):
|
||||
mode: str
|
||||
summary: RunTestsSummary
|
||||
results: list[RunTestsTestResult]
|
||||
|
||||
|
||||
class RunTestsResponse(MCPResponse):
|
||||
data: RunTestsResult | None = None
|
||||
|
||||
|
||||
@mcp_for_unity_tool(description="Runs Unity tests for the specified mode")
|
||||
async def run_tests(
|
||||
ctx: Context,
|
||||
mode: Annotated[Literal["edit", "play"], Field(
|
||||
description="Unity test mode to run")] = "edit",
|
||||
timeout_seconds: Annotated[str, Field(
|
||||
description="Optional timeout in seconds for the Unity test run (string, e.g. '30')")] | None = None,
|
||||
) -> RunTestsResponse:
|
||||
await ctx.info(f"Processing run_tests: mode={mode}")
|
||||
|
||||
# Coerce timeout defensively (string/float -> int)
|
||||
def _coerce_int(value, default=None):
|
||||
if value is None:
|
||||
return default
|
||||
try:
|
||||
if isinstance(value, bool):
|
||||
return default
|
||||
if isinstance(value, int):
|
||||
return int(value)
|
||||
s = str(value).strip()
|
||||
if s.lower() in ("", "none", "null"):
|
||||
return default
|
||||
return int(float(s))
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
params: dict[str, Any] = {"mode": mode}
|
||||
ts = _coerce_int(timeout_seconds)
|
||||
if ts is not None:
|
||||
params["timeoutSeconds"] = ts
|
||||
|
||||
response = await async_send_command_with_retry("run_tests", params)
|
||||
await ctx.info(f'Response {response}')
|
||||
return RunTestsResponse(**response) if isinstance(response, dict) else response
|
||||
|
|
@ -0,0 +1,966 @@
|
|||
import base64
|
||||
import hashlib
|
||||
import re
|
||||
from typing import Annotated, Any
|
||||
|
||||
from fastmcp import Context
|
||||
|
||||
from registry import mcp_for_unity_tool
|
||||
from unity_connection import send_command_with_retry
|
||||
|
||||
|
||||
def _apply_edits_locally(original_text: str, edits: list[dict[str, Any]]) -> str:
|
||||
text = original_text
|
||||
for edit in edits or []:
|
||||
op = (
|
||||
(edit.get("op")
|
||||
or edit.get("operation")
|
||||
or edit.get("type")
|
||||
or edit.get("mode")
|
||||
or "")
|
||||
.strip()
|
||||
.lower()
|
||||
)
|
||||
|
||||
if not op:
|
||||
allowed = "anchor_insert, prepend, append, replace_range, regex_replace"
|
||||
raise RuntimeError(
|
||||
f"op is required; allowed: {allowed}. Use 'op' (aliases accepted: type/mode/operation)."
|
||||
)
|
||||
|
||||
if op == "prepend":
|
||||
prepend_text = edit.get("text", "")
|
||||
text = (prepend_text if prepend_text.endswith(
|
||||
"\n") else prepend_text + "\n") + text
|
||||
elif op == "append":
|
||||
append_text = edit.get("text", "")
|
||||
if not text.endswith("\n"):
|
||||
text += "\n"
|
||||
text += append_text
|
||||
if not text.endswith("\n"):
|
||||
text += "\n"
|
||||
elif op == "anchor_insert":
|
||||
anchor = edit.get("anchor", "")
|
||||
position = (edit.get("position") or "before").lower()
|
||||
insert_text = edit.get("text", "")
|
||||
flags = re.MULTILINE | (
|
||||
re.IGNORECASE if edit.get("ignore_case") else 0)
|
||||
|
||||
# Find the best match using improved heuristics
|
||||
match = _find_best_anchor_match(
|
||||
anchor, text, flags, bool(edit.get("prefer_last", True)))
|
||||
if not match:
|
||||
if edit.get("allow_noop", True):
|
||||
continue
|
||||
raise RuntimeError(f"anchor not found: {anchor}")
|
||||
idx = match.start() if position == "before" else match.end()
|
||||
text = text[:idx] + insert_text + text[idx:]
|
||||
elif op == "replace_range":
|
||||
start_line = int(edit.get("startLine", 1))
|
||||
start_col = int(edit.get("startCol", 1))
|
||||
end_line = int(edit.get("endLine", start_line))
|
||||
end_col = int(edit.get("endCol", 1))
|
||||
replacement = edit.get("text", "")
|
||||
lines = text.splitlines(keepends=True)
|
||||
max_line = len(lines) + 1 # 1-based, exclusive end
|
||||
if (start_line < 1 or end_line < start_line or end_line > max_line
|
||||
or start_col < 1 or end_col < 1):
|
||||
raise RuntimeError("replace_range out of bounds")
|
||||
|
||||
def index_of(line: int, col: int) -> int:
|
||||
if line <= len(lines):
|
||||
return sum(len(l) for l in lines[: line - 1]) + (col - 1)
|
||||
return sum(len(l) for l in lines)
|
||||
a = index_of(start_line, start_col)
|
||||
b = index_of(end_line, end_col)
|
||||
text = text[:a] + replacement + text[b:]
|
||||
elif op == "regex_replace":
|
||||
pattern = edit.get("pattern", "")
|
||||
repl = edit.get("replacement", "")
|
||||
# Translate $n backrefs (our input) to Python \g<n>
|
||||
repl_py = re.sub(r"\$(\d+)", r"\\g<\1>", repl)
|
||||
count = int(edit.get("count", 0)) # 0 = replace all
|
||||
flags = re.MULTILINE
|
||||
if edit.get("ignore_case"):
|
||||
flags |= re.IGNORECASE
|
||||
text = re.sub(pattern, repl_py, text, count=count, flags=flags)
|
||||
else:
|
||||
allowed = "anchor_insert, prepend, append, replace_range, regex_replace"
|
||||
raise RuntimeError(
|
||||
f"unknown edit op: {op}; allowed: {allowed}. Use 'op' (aliases accepted: type/mode/operation).")
|
||||
return text
|
||||
|
||||
|
||||
def _find_best_anchor_match(pattern: str, text: str, flags: int, prefer_last: bool = True):
|
||||
"""
|
||||
Find the best anchor match using improved heuristics.
|
||||
|
||||
For patterns like \\s*}\\s*$ that are meant to find class-ending braces,
|
||||
this function uses heuristics to choose the most semantically appropriate match:
|
||||
|
||||
1. If prefer_last=True, prefer the last match (common for class-end insertions)
|
||||
2. Use indentation levels to distinguish class vs method braces
|
||||
3. Consider context to avoid matches inside strings/comments
|
||||
|
||||
Args:
|
||||
pattern: Regex pattern to search for
|
||||
text: Text to search in
|
||||
flags: Regex flags
|
||||
prefer_last: If True, prefer the last match over the first
|
||||
|
||||
Returns:
|
||||
Match object of the best match, or None if no match found
|
||||
"""
|
||||
|
||||
# Find all matches
|
||||
matches = list(re.finditer(pattern, text, flags))
|
||||
if not matches:
|
||||
return None
|
||||
|
||||
# If only one match, return it
|
||||
if len(matches) == 1:
|
||||
return matches[0]
|
||||
|
||||
# For patterns that look like they're trying to match closing braces at end of lines
|
||||
is_closing_brace_pattern = '}' in pattern and (
|
||||
'$' in pattern or pattern.endswith(r'\s*'))
|
||||
|
||||
if is_closing_brace_pattern and prefer_last:
|
||||
# Use heuristics to find the best closing brace match
|
||||
return _find_best_closing_brace_match(matches, text)
|
||||
|
||||
# Default behavior: use last match if prefer_last, otherwise first match
|
||||
return matches[-1] if prefer_last else matches[0]
|
||||
|
||||
|
||||
def _find_best_closing_brace_match(matches, text: str):
|
||||
"""
|
||||
Find the best closing brace match using C# structure heuristics.
|
||||
|
||||
Enhanced heuristics for scope-aware matching:
|
||||
1. Prefer matches with lower indentation (likely class-level)
|
||||
2. Prefer matches closer to end of file
|
||||
3. Avoid matches that seem to be inside method bodies
|
||||
4. For #endregion patterns, ensure class-level context
|
||||
5. Validate insertion point is at appropriate scope
|
||||
|
||||
Args:
|
||||
matches: List of regex match objects
|
||||
text: The full text being searched
|
||||
|
||||
Returns:
|
||||
The best match object
|
||||
"""
|
||||
if not matches:
|
||||
return None
|
||||
|
||||
scored_matches = []
|
||||
lines = text.splitlines()
|
||||
|
||||
for match in matches:
|
||||
score = 0
|
||||
start_pos = match.start()
|
||||
|
||||
# Find which line this match is on
|
||||
lines_before = text[:start_pos].count('\n')
|
||||
line_num = lines_before
|
||||
|
||||
if line_num < len(lines):
|
||||
line_content = lines[line_num]
|
||||
|
||||
# Calculate indentation level (lower is better for class braces)
|
||||
indentation = len(line_content) - len(line_content.lstrip())
|
||||
|
||||
# Prefer lower indentation (class braces are typically less indented than method braces)
|
||||
# Max 20 points for indentation=0
|
||||
score += max(0, 20 - indentation)
|
||||
|
||||
# Prefer matches closer to end of file (class closing braces are typically at the end)
|
||||
distance_from_end = len(lines) - line_num
|
||||
# More points for being closer to end
|
||||
score += max(0, 10 - distance_from_end)
|
||||
|
||||
# Look at surrounding context to avoid method braces
|
||||
context_start = max(0, line_num - 3)
|
||||
context_end = min(len(lines), line_num + 2)
|
||||
context_lines = lines[context_start:context_end]
|
||||
|
||||
# Penalize if this looks like it's inside a method (has method-like patterns above)
|
||||
for context_line in context_lines:
|
||||
if re.search(r'\b(void|public|private|protected)\s+\w+\s*\(', context_line):
|
||||
score -= 5 # Penalty for being near method signatures
|
||||
|
||||
# Bonus if this looks like a class-ending brace (very minimal indentation and near EOF)
|
||||
if indentation <= 4 and distance_from_end <= 3:
|
||||
score += 15 # Bonus for likely class-ending brace
|
||||
|
||||
scored_matches.append((score, match))
|
||||
|
||||
# Return the match with the highest score
|
||||
scored_matches.sort(key=lambda x: x[0], reverse=True)
|
||||
best_match = scored_matches[0][1]
|
||||
|
||||
return best_match
|
||||
|
||||
|
||||
def _infer_class_name(script_name: str) -> str:
|
||||
# Default to script name as class name (common Unity pattern)
|
||||
return (script_name or "").strip()
|
||||
|
||||
|
||||
def _extract_code_after(keyword: str, request: str) -> str:
|
||||
# Deprecated with NL removal; retained as no-op for compatibility
|
||||
idx = request.lower().find(keyword)
|
||||
if idx >= 0:
|
||||
return request[idx + len(keyword):].strip()
|
||||
return ""
|
||||
# Removed _is_structurally_balanced - validation now handled by C# side using Unity's compiler services
|
||||
|
||||
|
||||
def _normalize_script_locator(name: str, path: str) -> tuple[str, str]:
|
||||
"""Best-effort normalization of script "name" and "path".
|
||||
|
||||
Accepts any of:
|
||||
- name = "SmartReach", path = "Assets/Scripts/Interaction"
|
||||
- name = "SmartReach.cs", path = "Assets/Scripts/Interaction"
|
||||
- name = "Assets/Scripts/Interaction/SmartReach.cs", path = ""
|
||||
- path = "Assets/Scripts/Interaction/SmartReach.cs" (name empty)
|
||||
- name or path using uri prefixes: unity://path/..., file://...
|
||||
- accidental duplicates like "Assets/.../SmartReach.cs/SmartReach.cs"
|
||||
|
||||
Returns (name_without_extension, directory_path_under_Assets).
|
||||
"""
|
||||
n = (name or "").strip()
|
||||
p = (path or "").strip()
|
||||
|
||||
def strip_prefix(s: str) -> str:
|
||||
if s.startswith("unity://path/"):
|
||||
return s[len("unity://path/"):]
|
||||
if s.startswith("file://"):
|
||||
return s[len("file://"):]
|
||||
return s
|
||||
|
||||
def collapse_duplicate_tail(s: str) -> str:
|
||||
# Collapse trailing "/X.cs/X.cs" to "/X.cs"
|
||||
parts = s.split("/")
|
||||
if len(parts) >= 2 and parts[-1] == parts[-2]:
|
||||
parts = parts[:-1]
|
||||
return "/".join(parts)
|
||||
|
||||
# Prefer a full path if provided in either field
|
||||
candidate = ""
|
||||
for v in (n, p):
|
||||
v2 = strip_prefix(v)
|
||||
if v2.endswith(".cs") or v2.startswith("Assets/"):
|
||||
candidate = v2
|
||||
break
|
||||
|
||||
if candidate:
|
||||
candidate = collapse_duplicate_tail(candidate)
|
||||
# If a directory was passed in path and file in name, join them
|
||||
if not candidate.endswith(".cs") and n.endswith(".cs"):
|
||||
v2 = strip_prefix(n)
|
||||
candidate = (candidate.rstrip("/") + "/" + v2.split("/")[-1])
|
||||
if candidate.endswith(".cs"):
|
||||
parts = candidate.split("/")
|
||||
file_name = parts[-1]
|
||||
dir_path = "/".join(parts[:-1]) if len(parts) > 1 else "Assets"
|
||||
base = file_name[:-
|
||||
3] if file_name.lower().endswith(".cs") else file_name
|
||||
return base, dir_path
|
||||
|
||||
# Fall back: remove extension from name if present and return given path
|
||||
base_name = n[:-3] if n.lower().endswith(".cs") else n
|
||||
return base_name, (p or "Assets")
|
||||
|
||||
|
||||
def _with_norm(resp: dict[str, Any] | Any, edits: list[dict[str, Any]], routing: str | None = None) -> dict[str, Any] | Any:
|
||||
if not isinstance(resp, dict):
|
||||
return resp
|
||||
data = resp.setdefault("data", {})
|
||||
data.setdefault("normalizedEdits", edits)
|
||||
if routing:
|
||||
data["routing"] = routing
|
||||
return resp
|
||||
|
||||
|
||||
def _err(code: str, message: str, *, expected: dict[str, Any] | None = None, rewrite: dict[str, Any] | None = None,
|
||||
normalized: list[dict[str, Any]] | None = None, routing: str | None = None, extra: dict[str, Any] | None = None) -> dict[str, Any]:
|
||||
payload: dict[str, Any] = {"success": False,
|
||||
"code": code, "message": message}
|
||||
data: dict[str, Any] = {}
|
||||
if expected:
|
||||
data["expected"] = expected
|
||||
if rewrite:
|
||||
data["rewrite_suggestion"] = rewrite
|
||||
if normalized is not None:
|
||||
data["normalizedEdits"] = normalized
|
||||
if routing:
|
||||
data["routing"] = routing
|
||||
if extra:
|
||||
data.update(extra)
|
||||
if data:
|
||||
payload["data"] = data
|
||||
return payload
|
||||
|
||||
# Natural-language parsing removed; clients should send structured edits.
|
||||
|
||||
|
||||
@mcp_for_unity_tool(name="script_apply_edits", description=(
|
||||
"""Structured C# edits (methods/classes) with safer boundaries - prefer this over raw text.
|
||||
Best practices:
|
||||
- Prefer anchor_* ops for pattern-based insert/replace near stable markers
|
||||
- Use replace_method/delete_method for whole-method changes (keeps signatures balanced)
|
||||
- Avoid whole-file regex deletes; validators will guard unbalanced braces
|
||||
- For tail insertions, prefer anchor/regex_replace on final brace (class closing)
|
||||
- Pass options.validate='standard' for structural checks; 'relaxed' for interior-only edits
|
||||
Canonical fields (use these exact keys):
|
||||
- op: replace_method | insert_method | delete_method | anchor_insert | anchor_delete | anchor_replace
|
||||
- className: string (defaults to 'name' if omitted on method/class ops)
|
||||
- methodName: string (required for replace_method, delete_method)
|
||||
- replacement: string (required for replace_method, insert_method)
|
||||
- position: start | end | after | before (insert_method only)
|
||||
- afterMethodName / beforeMethodName: string (required when position='after'/'before')
|
||||
- anchor: regex string (for anchor_* ops)
|
||||
- text: string (for anchor_insert/anchor_replace)
|
||||
Examples:
|
||||
1) Replace a method:
|
||||
{
|
||||
"name": "SmartReach",
|
||||
"path": "Assets/Scripts/Interaction",
|
||||
"edits": [
|
||||
{
|
||||
"op": "replace_method",
|
||||
"className": "SmartReach",
|
||||
"methodName": "HasTarget",
|
||||
"replacement": "public bool HasTarget(){ return currentTarget!=null; }"
|
||||
}
|
||||
],
|
||||
"options": {"validate": "standard", "refresh": "immediate"}
|
||||
}
|
||||
"2) Insert a method after another:
|
||||
{
|
||||
"name": "SmartReach",
|
||||
"path": "Assets/Scripts/Interaction",
|
||||
"edits": [
|
||||
{
|
||||
"op": "insert_method",
|
||||
"className": "SmartReach",
|
||||
"replacement": "public void PrintSeries(){ Debug.Log(seriesName); }",
|
||||
"position": "after",
|
||||
"afterMethodName": "GetCurrentTarget"
|
||||
}
|
||||
],
|
||||
}
|
||||
]"""
|
||||
))
|
||||
def script_apply_edits(
|
||||
ctx: Context,
|
||||
name: Annotated[str, "Name of the script to edit"],
|
||||
path: Annotated[str, "Path to the script to edit under Assets/ directory"],
|
||||
edits: Annotated[list[dict[str, Any]], "List of edits to apply to the script"],
|
||||
options: Annotated[dict[str, Any],
|
||||
"Options for the script edit"] | None = None,
|
||||
script_type: Annotated[str,
|
||||
"Type of the script to edit"] = "MonoBehaviour",
|
||||
namespace: Annotated[str,
|
||||
"Namespace of the script to edit"] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
ctx.info(f"Processing script_apply_edits: {name}")
|
||||
# Normalize locator first so downstream calls target the correct script file.
|
||||
name, path = _normalize_script_locator(name, path)
|
||||
# Normalize unsupported or aliased ops to known structured/text paths
|
||||
|
||||
def _unwrap_and_alias(edit: dict[str, Any]) -> dict[str, Any]:
|
||||
# Unwrap single-key wrappers like {"replace_method": {...}}
|
||||
for wrapper_key in (
|
||||
"replace_method", "insert_method", "delete_method",
|
||||
"replace_class", "delete_class",
|
||||
"anchor_insert", "anchor_replace", "anchor_delete",
|
||||
):
|
||||
if wrapper_key in edit and isinstance(edit[wrapper_key], dict):
|
||||
inner = dict(edit[wrapper_key])
|
||||
inner["op"] = wrapper_key
|
||||
edit = inner
|
||||
break
|
||||
|
||||
e = dict(edit)
|
||||
op = (e.get("op") or e.get("operation") or e.get(
|
||||
"type") or e.get("mode") or "").strip().lower()
|
||||
if op:
|
||||
e["op"] = op
|
||||
|
||||
# Common field aliases
|
||||
if "class_name" in e and "className" not in e:
|
||||
e["className"] = e.pop("class_name")
|
||||
if "class" in e and "className" not in e:
|
||||
e["className"] = e.pop("class")
|
||||
if "method_name" in e and "methodName" not in e:
|
||||
e["methodName"] = e.pop("method_name")
|
||||
# Some clients use a generic 'target' for method name
|
||||
if "target" in e and "methodName" not in e:
|
||||
e["methodName"] = e.pop("target")
|
||||
if "method" in e and "methodName" not in e:
|
||||
e["methodName"] = e.pop("method")
|
||||
if "new_content" in e and "replacement" not in e:
|
||||
e["replacement"] = e.pop("new_content")
|
||||
if "newMethod" in e and "replacement" not in e:
|
||||
e["replacement"] = e.pop("newMethod")
|
||||
if "new_method" in e and "replacement" not in e:
|
||||
e["replacement"] = e.pop("new_method")
|
||||
if "content" in e and "replacement" not in e:
|
||||
e["replacement"] = e.pop("content")
|
||||
if "after" in e and "afterMethodName" not in e:
|
||||
e["afterMethodName"] = e.pop("after")
|
||||
if "after_method" in e and "afterMethodName" not in e:
|
||||
e["afterMethodName"] = e.pop("after_method")
|
||||
if "before" in e and "beforeMethodName" not in e:
|
||||
e["beforeMethodName"] = e.pop("before")
|
||||
if "before_method" in e and "beforeMethodName" not in e:
|
||||
e["beforeMethodName"] = e.pop("before_method")
|
||||
# anchor_method → before/after based on position (default after)
|
||||
if "anchor_method" in e:
|
||||
anchor = e.pop("anchor_method")
|
||||
pos = (e.get("position") or "after").strip().lower()
|
||||
if pos == "before" and "beforeMethodName" not in e:
|
||||
e["beforeMethodName"] = anchor
|
||||
elif "afterMethodName" not in e:
|
||||
e["afterMethodName"] = anchor
|
||||
if "anchorText" in e and "anchor" not in e:
|
||||
e["anchor"] = e.pop("anchorText")
|
||||
if "pattern" in e and "anchor" not in e and e.get("op") and e["op"].startswith("anchor_"):
|
||||
e["anchor"] = e.pop("pattern")
|
||||
if "newText" in e and "text" not in e:
|
||||
e["text"] = e.pop("newText")
|
||||
|
||||
# CI compatibility (T‑A/T‑E):
|
||||
# Accept method-anchored anchor_insert and upgrade to insert_method
|
||||
# Example incoming shape:
|
||||
# {"op":"anchor_insert","afterMethodName":"GetCurrentTarget","text":"..."}
|
||||
if (
|
||||
e.get("op") == "anchor_insert"
|
||||
and not e.get("anchor")
|
||||
and (e.get("afterMethodName") or e.get("beforeMethodName"))
|
||||
):
|
||||
e["op"] = "insert_method"
|
||||
if "replacement" not in e:
|
||||
e["replacement"] = e.get("text", "")
|
||||
|
||||
# LSP-like range edit -> replace_range
|
||||
if "range" in e and isinstance(e["range"], dict):
|
||||
rng = e.pop("range")
|
||||
start = rng.get("start", {})
|
||||
end = rng.get("end", {})
|
||||
# Convert 0-based to 1-based line/col
|
||||
e["op"] = "replace_range"
|
||||
e["startLine"] = int(start.get("line", 0)) + 1
|
||||
e["startCol"] = int(start.get("character", 0)) + 1
|
||||
e["endLine"] = int(end.get("line", 0)) + 1
|
||||
e["endCol"] = int(end.get("character", 0)) + 1
|
||||
if "newText" in edit and "text" not in e:
|
||||
e["text"] = edit.get("newText", "")
|
||||
return e
|
||||
|
||||
normalized_edits: list[dict[str, Any]] = []
|
||||
for raw in edits or []:
|
||||
e = _unwrap_and_alias(raw)
|
||||
op = (e.get("op") or e.get("operation") or e.get(
|
||||
"type") or e.get("mode") or "").strip().lower()
|
||||
|
||||
# Default className to script name if missing on structured method/class ops
|
||||
if op in ("replace_class", "delete_class", "replace_method", "delete_method", "insert_method") and not e.get("className"):
|
||||
e["className"] = name
|
||||
|
||||
# Map common aliases for text ops
|
||||
if op in ("text_replace",):
|
||||
e["op"] = "replace_range"
|
||||
normalized_edits.append(e)
|
||||
continue
|
||||
if op in ("regex_delete",):
|
||||
e["op"] = "regex_replace"
|
||||
e.setdefault("text", "")
|
||||
normalized_edits.append(e)
|
||||
continue
|
||||
if op == "regex_replace" and ("replacement" not in e):
|
||||
if "text" in e:
|
||||
e["replacement"] = e.get("text", "")
|
||||
elif "insert" in e or "content" in e:
|
||||
e["replacement"] = e.get(
|
||||
"insert") or e.get("content") or ""
|
||||
if op == "anchor_insert" and not (e.get("text") or e.get("insert") or e.get("content") or e.get("replacement")):
|
||||
e["op"] = "anchor_delete"
|
||||
normalized_edits.append(e)
|
||||
continue
|
||||
normalized_edits.append(e)
|
||||
|
||||
edits = normalized_edits
|
||||
normalized_for_echo = edits
|
||||
|
||||
# Validate required fields and produce machine-parsable hints
|
||||
def error_with_hint(message: str, expected: dict[str, Any], suggestion: dict[str, Any]) -> dict[str, Any]:
|
||||
return _err("missing_field", message, expected=expected, rewrite=suggestion, normalized=normalized_for_echo)
|
||||
|
||||
for e in edits or []:
|
||||
op = e.get("op", "")
|
||||
if op == "replace_method":
|
||||
if not e.get("methodName"):
|
||||
return error_with_hint(
|
||||
"replace_method requires 'methodName'.",
|
||||
{"op": "replace_method", "required": [
|
||||
"className", "methodName", "replacement"]},
|
||||
{"edits[0].methodName": "HasTarget"}
|
||||
)
|
||||
if not (e.get("replacement") or e.get("text")):
|
||||
return error_with_hint(
|
||||
"replace_method requires 'replacement' (inline or base64).",
|
||||
{"op": "replace_method", "required": [
|
||||
"className", "methodName", "replacement"]},
|
||||
{"edits[0].replacement": "public bool X(){ return true; }"}
|
||||
)
|
||||
elif op == "insert_method":
|
||||
if not (e.get("replacement") or e.get("text")):
|
||||
return error_with_hint(
|
||||
"insert_method requires a non-empty 'replacement'.",
|
||||
{"op": "insert_method", "required": ["className", "replacement"], "position": {
|
||||
"after_requires": "afterMethodName", "before_requires": "beforeMethodName"}},
|
||||
{"edits[0].replacement": "public void PrintSeries(){ Debug.Log(\"1,2,3\"); }"}
|
||||
)
|
||||
pos = (e.get("position") or "").lower()
|
||||
if pos == "after" and not e.get("afterMethodName"):
|
||||
return error_with_hint(
|
||||
"insert_method with position='after' requires 'afterMethodName'.",
|
||||
{"op": "insert_method", "position": {
|
||||
"after_requires": "afterMethodName"}},
|
||||
{"edits[0].afterMethodName": "GetCurrentTarget"}
|
||||
)
|
||||
if pos == "before" and not e.get("beforeMethodName"):
|
||||
return error_with_hint(
|
||||
"insert_method with position='before' requires 'beforeMethodName'.",
|
||||
{"op": "insert_method", "position": {
|
||||
"before_requires": "beforeMethodName"}},
|
||||
{"edits[0].beforeMethodName": "GetCurrentTarget"}
|
||||
)
|
||||
elif op == "delete_method":
|
||||
if not e.get("methodName"):
|
||||
return error_with_hint(
|
||||
"delete_method requires 'methodName'.",
|
||||
{"op": "delete_method", "required": [
|
||||
"className", "methodName"]},
|
||||
{"edits[0].methodName": "PrintSeries"}
|
||||
)
|
||||
elif op in ("anchor_insert", "anchor_replace", "anchor_delete"):
|
||||
if not e.get("anchor"):
|
||||
return error_with_hint(
|
||||
f"{op} requires 'anchor' (regex).",
|
||||
{"op": op, "required": ["anchor"]},
|
||||
{"edits[0].anchor": "(?m)^\\s*public\\s+bool\\s+HasTarget\\s*\\("}
|
||||
)
|
||||
if op in ("anchor_insert", "anchor_replace") and not (e.get("text") or e.get("replacement")):
|
||||
return error_with_hint(
|
||||
f"{op} requires 'text'.",
|
||||
{"op": op, "required": ["anchor", "text"]},
|
||||
{"edits[0].text": "/* comment */\n"}
|
||||
)
|
||||
|
||||
# Decide routing: structured vs text vs mixed
|
||||
STRUCT = {"replace_class", "delete_class", "replace_method", "delete_method",
|
||||
"insert_method", "anchor_delete", "anchor_replace", "anchor_insert"}
|
||||
TEXT = {"prepend", "append", "replace_range", "regex_replace"}
|
||||
ops_set = {(e.get("op") or "").lower() for e in edits or []}
|
||||
all_struct = ops_set.issubset(STRUCT)
|
||||
all_text = ops_set.issubset(TEXT)
|
||||
mixed = not (all_struct or all_text)
|
||||
|
||||
# If everything is structured (method/class/anchor ops), forward directly to Unity's structured editor.
|
||||
if all_struct:
|
||||
opts2 = dict(options or {})
|
||||
# For structured edits, prefer immediate refresh to avoid missed reloads when Editor is unfocused
|
||||
opts2.setdefault("refresh", "immediate")
|
||||
params_struct: dict[str, Any] = {
|
||||
"action": "edit",
|
||||
"name": name,
|
||||
"path": path,
|
||||
"namespace": namespace,
|
||||
"scriptType": script_type,
|
||||
"edits": edits,
|
||||
"options": opts2,
|
||||
}
|
||||
resp_struct = send_command_with_retry(
|
||||
"manage_script", params_struct)
|
||||
if isinstance(resp_struct, dict) and resp_struct.get("success"):
|
||||
pass # Optional sentinel reload removed (deprecated)
|
||||
return _with_norm(resp_struct if isinstance(resp_struct, dict) else {"success": False, "message": str(resp_struct)}, normalized_for_echo, routing="structured")
|
||||
|
||||
# 1) read from Unity
|
||||
read_resp = send_command_with_retry("manage_script", {
|
||||
"action": "read",
|
||||
"name": name,
|
||||
"path": path,
|
||||
"namespace": namespace,
|
||||
"scriptType": script_type,
|
||||
})
|
||||
if not isinstance(read_resp, dict) or not read_resp.get("success"):
|
||||
return read_resp if isinstance(read_resp, dict) else {"success": False, "message": str(read_resp)}
|
||||
|
||||
data = read_resp.get("data") or read_resp.get(
|
||||
"result", {}).get("data") or {}
|
||||
contents = data.get("contents")
|
||||
if contents is None and data.get("contentsEncoded") and data.get("encodedContents"):
|
||||
contents = base64.b64decode(
|
||||
data["encodedContents"]).decode("utf-8")
|
||||
if contents is None:
|
||||
return {"success": False, "message": "No contents returned from Unity read."}
|
||||
|
||||
# Optional preview/dry-run: apply locally and return diff without writing
|
||||
preview = bool((options or {}).get("preview"))
|
||||
|
||||
# If we have a mixed batch (TEXT + STRUCT), apply text first with precondition, then structured
|
||||
if mixed:
|
||||
text_edits = [e for e in edits or [] if (
|
||||
e.get("op") or "").lower() in TEXT]
|
||||
struct_edits = [e for e in edits or [] if (
|
||||
e.get("op") or "").lower() in STRUCT]
|
||||
try:
|
||||
base_text = contents
|
||||
|
||||
def line_col_from_index(idx: int) -> tuple[int, int]:
|
||||
line = base_text.count("\n", 0, idx) + 1
|
||||
last_nl = base_text.rfind("\n", 0, idx)
|
||||
col = (idx - (last_nl + 1)) + \
|
||||
1 if last_nl >= 0 else idx + 1
|
||||
return line, col
|
||||
|
||||
at_edits: list[dict[str, Any]] = []
|
||||
for e in text_edits:
|
||||
opx = (e.get("op") or e.get("operation") or e.get(
|
||||
"type") or e.get("mode") or "").strip().lower()
|
||||
text_field = e.get("text") or e.get("insert") or e.get(
|
||||
"content") or e.get("replacement") or ""
|
||||
if opx == "anchor_insert":
|
||||
anchor = e.get("anchor") or ""
|
||||
position = (e.get("position") or "after").lower()
|
||||
flags = re.MULTILINE | (
|
||||
re.IGNORECASE if e.get("ignore_case") else 0)
|
||||
try:
|
||||
# Use improved anchor matching logic
|
||||
m = _find_best_anchor_match(
|
||||
anchor, base_text, flags, prefer_last=True)
|
||||
except Exception as ex:
|
||||
return _with_norm(_err("bad_regex", f"Invalid anchor regex: {ex}", normalized=normalized_for_echo, routing="mixed/text-first", extra={"hint": "Escape parentheses/braces or use a simpler anchor."}), normalized_for_echo, routing="mixed/text-first")
|
||||
if not m:
|
||||
return _with_norm({"success": False, "code": "anchor_not_found", "message": f"anchor not found: {anchor}"}, normalized_for_echo, routing="mixed/text-first")
|
||||
idx = m.start() if position == "before" else m.end()
|
||||
# Normalize insertion to avoid jammed methods
|
||||
text_field_norm = text_field
|
||||
if not text_field_norm.startswith("\n"):
|
||||
text_field_norm = "\n" + text_field_norm
|
||||
if not text_field_norm.endswith("\n"):
|
||||
text_field_norm = text_field_norm + "\n"
|
||||
sl, sc = line_col_from_index(idx)
|
||||
at_edits.append(
|
||||
{"startLine": sl, "startCol": sc, "endLine": sl, "endCol": sc, "newText": text_field_norm})
|
||||
# do not mutate base_text when building atomic spans
|
||||
elif opx == "replace_range":
|
||||
if all(k in e for k in ("startLine", "startCol", "endLine", "endCol")):
|
||||
at_edits.append({
|
||||
"startLine": int(e.get("startLine", 1)),
|
||||
"startCol": int(e.get("startCol", 1)),
|
||||
"endLine": int(e.get("endLine", 1)),
|
||||
"endCol": int(e.get("endCol", 1)),
|
||||
"newText": text_field
|
||||
})
|
||||
else:
|
||||
return _with_norm(_err("missing_field", "replace_range requires startLine/startCol/endLine/endCol", normalized=normalized_for_echo, routing="mixed/text-first"), normalized_for_echo, routing="mixed/text-first")
|
||||
elif opx == "regex_replace":
|
||||
pattern = e.get("pattern") or ""
|
||||
try:
|
||||
regex_obj = re.compile(pattern, re.MULTILINE | (
|
||||
re.IGNORECASE if e.get("ignore_case") else 0))
|
||||
except Exception as ex:
|
||||
return _with_norm(_err("bad_regex", f"Invalid regex pattern: {ex}", normalized=normalized_for_echo, routing="mixed/text-first", extra={"hint": "Escape special chars or prefer structured delete for methods."}), normalized_for_echo, routing="mixed/text-first")
|
||||
m = regex_obj.search(base_text)
|
||||
if not m:
|
||||
continue
|
||||
# Expand $1, $2... in replacement using this match
|
||||
|
||||
def _expand_dollars(rep: str, _m=m) -> str:
|
||||
return re.sub(r"\$(\d+)", lambda g: _m.group(int(g.group(1))) or "", rep)
|
||||
repl = _expand_dollars(text_field)
|
||||
sl, sc = line_col_from_index(m.start())
|
||||
el, ec = line_col_from_index(m.end())
|
||||
at_edits.append(
|
||||
{"startLine": sl, "startCol": sc, "endLine": el, "endCol": ec, "newText": repl})
|
||||
# do not mutate base_text when building atomic spans
|
||||
elif opx in ("prepend", "append"):
|
||||
if opx == "prepend":
|
||||
sl, sc = 1, 1
|
||||
at_edits.append(
|
||||
{"startLine": sl, "startCol": sc, "endLine": sl, "endCol": sc, "newText": text_field})
|
||||
# prepend can be applied atomically without local mutation
|
||||
else:
|
||||
# Insert at true EOF position (handles both \n and \r\n correctly)
|
||||
eof_idx = len(base_text)
|
||||
sl, sc = line_col_from_index(eof_idx)
|
||||
new_text = ("\n" if not base_text.endswith(
|
||||
"\n") else "") + text_field
|
||||
at_edits.append(
|
||||
{"startLine": sl, "startCol": sc, "endLine": sl, "endCol": sc, "newText": new_text})
|
||||
# do not mutate base_text when building atomic spans
|
||||
else:
|
||||
return _with_norm(_err("unknown_op", f"Unsupported text edit op: {opx}", normalized=normalized_for_echo, routing="mixed/text-first"), normalized_for_echo, routing="mixed/text-first")
|
||||
|
||||
sha = hashlib.sha256(base_text.encode("utf-8")).hexdigest()
|
||||
if at_edits:
|
||||
params_text: dict[str, Any] = {
|
||||
"action": "apply_text_edits",
|
||||
"name": name,
|
||||
"path": path,
|
||||
"namespace": namespace,
|
||||
"scriptType": script_type,
|
||||
"edits": at_edits,
|
||||
"precondition_sha256": sha,
|
||||
"options": {"refresh": (options or {}).get("refresh", "debounced"), "validate": (options or {}).get("validate", "standard"), "applyMode": ("atomic" if len(at_edits) > 1 else (options or {}).get("applyMode", "sequential"))}
|
||||
}
|
||||
resp_text = send_command_with_retry(
|
||||
"manage_script", params_text)
|
||||
if not (isinstance(resp_text, dict) and resp_text.get("success")):
|
||||
return _with_norm(resp_text if isinstance(resp_text, dict) else {"success": False, "message": str(resp_text)}, normalized_for_echo, routing="mixed/text-first")
|
||||
# Optional sentinel reload removed (deprecated)
|
||||
except Exception as e:
|
||||
return _with_norm({"success": False, "message": f"Text edit conversion failed: {e}"}, normalized_for_echo, routing="mixed/text-first")
|
||||
|
||||
if struct_edits:
|
||||
opts2 = dict(options or {})
|
||||
# Prefer debounced background refresh unless explicitly overridden
|
||||
opts2.setdefault("refresh", "debounced")
|
||||
params_struct: dict[str, Any] = {
|
||||
"action": "edit",
|
||||
"name": name,
|
||||
"path": path,
|
||||
"namespace": namespace,
|
||||
"scriptType": script_type,
|
||||
"edits": struct_edits,
|
||||
"options": opts2
|
||||
}
|
||||
resp_struct = send_command_with_retry(
|
||||
"manage_script", params_struct)
|
||||
if isinstance(resp_struct, dict) and resp_struct.get("success"):
|
||||
pass # Optional sentinel reload removed (deprecated)
|
||||
return _with_norm(resp_struct if isinstance(resp_struct, dict) else {"success": False, "message": str(resp_struct)}, normalized_for_echo, routing="mixed/text-first")
|
||||
|
||||
return _with_norm({"success": True, "message": "Applied text edits (no structured ops)"}, normalized_for_echo, routing="mixed/text-first")
|
||||
|
||||
# If the edits are text-ops, prefer sending them to Unity's apply_text_edits with precondition
|
||||
# so header guards and validation run on the C# side.
|
||||
# Supported conversions: anchor_insert, replace_range, regex_replace (first match only).
|
||||
text_ops = {(e.get("op") or e.get("operation") or e.get("type") or e.get(
|
||||
"mode") or "").strip().lower() for e in (edits or [])}
|
||||
structured_kinds = {"replace_class", "delete_class",
|
||||
"replace_method", "delete_method", "insert_method", "anchor_insert"}
|
||||
if not text_ops.issubset(structured_kinds):
|
||||
# Convert to apply_text_edits payload
|
||||
try:
|
||||
base_text = contents
|
||||
|
||||
def line_col_from_index(idx: int) -> tuple[int, int]:
|
||||
# 1-based line/col against base buffer
|
||||
line = base_text.count("\n", 0, idx) + 1
|
||||
last_nl = base_text.rfind("\n", 0, idx)
|
||||
col = (idx - (last_nl + 1)) + \
|
||||
1 if last_nl >= 0 else idx + 1
|
||||
return line, col
|
||||
|
||||
at_edits: list[dict[str, Any]] = []
|
||||
import re as _re
|
||||
for e in edits or []:
|
||||
op = (e.get("op") or e.get("operation") or e.get(
|
||||
"type") or e.get("mode") or "").strip().lower()
|
||||
# aliasing for text field
|
||||
text_field = e.get("text") or e.get(
|
||||
"insert") or e.get("content") or ""
|
||||
if op == "anchor_insert":
|
||||
anchor = e.get("anchor") or ""
|
||||
position = (e.get("position") or "after").lower()
|
||||
# Use improved anchor matching logic with helpful errors, honoring ignore_case
|
||||
try:
|
||||
flags = re.MULTILINE | (
|
||||
re.IGNORECASE if e.get("ignore_case") else 0)
|
||||
m = _find_best_anchor_match(
|
||||
anchor, base_text, flags, prefer_last=True)
|
||||
except Exception as ex:
|
||||
return _with_norm(_err("bad_regex", f"Invalid anchor regex: {ex}", normalized=normalized_for_echo, routing="text", extra={"hint": "Escape parentheses/braces or use a simpler anchor."}), normalized_for_echo, routing="text")
|
||||
if not m:
|
||||
return _with_norm({"success": False, "code": "anchor_not_found", "message": f"anchor not found: {anchor}"}, normalized_for_echo, routing="text")
|
||||
idx = m.start() if position == "before" else m.end()
|
||||
# Normalize insertion newlines
|
||||
if text_field and not text_field.startswith("\n"):
|
||||
text_field = "\n" + text_field
|
||||
if text_field and not text_field.endswith("\n"):
|
||||
text_field = text_field + "\n"
|
||||
sl, sc = line_col_from_index(idx)
|
||||
at_edits.append({
|
||||
"startLine": sl,
|
||||
"startCol": sc,
|
||||
"endLine": sl,
|
||||
"endCol": sc,
|
||||
"newText": text_field or ""
|
||||
})
|
||||
# Do not mutate base buffer when building an atomic batch
|
||||
elif op == "replace_range":
|
||||
# Directly forward if already in line/col form
|
||||
if "startLine" in e:
|
||||
at_edits.append({
|
||||
"startLine": int(e.get("startLine", 1)),
|
||||
"startCol": int(e.get("startCol", 1)),
|
||||
"endLine": int(e.get("endLine", 1)),
|
||||
"endCol": int(e.get("endCol", 1)),
|
||||
"newText": text_field
|
||||
})
|
||||
else:
|
||||
# If only indices provided, skip (we don't support index-based here)
|
||||
return _with_norm({"success": False, "code": "missing_field", "message": "replace_range requires startLine/startCol/endLine/endCol"}, normalized_for_echo, routing="text")
|
||||
elif op == "regex_replace":
|
||||
pattern = e.get("pattern") or ""
|
||||
repl = text_field
|
||||
flags = re.MULTILINE | (
|
||||
re.IGNORECASE if e.get("ignore_case") else 0)
|
||||
# Early compile for clearer error messages
|
||||
try:
|
||||
regex_obj = re.compile(pattern, flags)
|
||||
except Exception as ex:
|
||||
return _with_norm(_err("bad_regex", f"Invalid regex pattern: {ex}", normalized=normalized_for_echo, routing="text", extra={"hint": "Escape special chars or prefer structured delete for methods."}), normalized_for_echo, routing="text")
|
||||
# Use smart anchor matching for consistent behavior with anchor_insert
|
||||
m = _find_best_anchor_match(
|
||||
pattern, base_text, flags, prefer_last=True)
|
||||
if not m:
|
||||
continue
|
||||
# Expand $1, $2... backrefs in replacement using the first match (consistent with mixed-path behavior)
|
||||
|
||||
def _expand_dollars(rep: str, _m=m) -> str:
|
||||
return re.sub(r"\$(\d+)", lambda g: _m.group(int(g.group(1))) or "", rep)
|
||||
repl_expanded = _expand_dollars(repl)
|
||||
# Let C# side handle validation using Unity's built-in compiler services
|
||||
sl, sc = line_col_from_index(m.start())
|
||||
el, ec = line_col_from_index(m.end())
|
||||
at_edits.append({
|
||||
"startLine": sl,
|
||||
"startCol": sc,
|
||||
"endLine": el,
|
||||
"endCol": ec,
|
||||
"newText": repl_expanded
|
||||
})
|
||||
# Do not mutate base buffer when building an atomic batch
|
||||
else:
|
||||
return _with_norm({"success": False, "code": "unsupported_op", "message": f"Unsupported text edit op for server-side apply_text_edits: {op}"}, normalized_for_echo, routing="text")
|
||||
|
||||
if not at_edits:
|
||||
return _with_norm({"success": False, "code": "no_spans", "message": "No applicable text edit spans computed (anchor not found or zero-length)."}, normalized_for_echo, routing="text")
|
||||
|
||||
sha = hashlib.sha256(base_text.encode("utf-8")).hexdigest()
|
||||
params: dict[str, Any] = {
|
||||
"action": "apply_text_edits",
|
||||
"name": name,
|
||||
"path": path,
|
||||
"namespace": namespace,
|
||||
"scriptType": script_type,
|
||||
"edits": at_edits,
|
||||
"precondition_sha256": sha,
|
||||
"options": {
|
||||
"refresh": (options or {}).get("refresh", "debounced"),
|
||||
"validate": (options or {}).get("validate", "standard"),
|
||||
"applyMode": ("atomic" if len(at_edits) > 1 else (options or {}).get("applyMode", "sequential"))
|
||||
}
|
||||
}
|
||||
resp = send_command_with_retry("manage_script", params)
|
||||
if isinstance(resp, dict) and resp.get("success"):
|
||||
pass # Optional sentinel reload removed (deprecated)
|
||||
return _with_norm(
|
||||
resp if isinstance(resp, dict) else {
|
||||
"success": False, "message": str(resp)},
|
||||
normalized_for_echo,
|
||||
routing="text"
|
||||
)
|
||||
except Exception as e:
|
||||
return _with_norm({"success": False, "code": "conversion_failed", "message": f"Edit conversion failed: {e}"}, normalized_for_echo, routing="text")
|
||||
|
||||
# For regex_replace, honor preview consistently: if preview=true, always return diff without writing.
|
||||
# If confirm=false (default) and preview not requested, return diff and instruct confirm=true to apply.
|
||||
if "regex_replace" in text_ops and (preview or not (options or {}).get("confirm")):
|
||||
try:
|
||||
preview_text = _apply_edits_locally(contents, edits)
|
||||
import difflib
|
||||
diff = list(difflib.unified_diff(contents.splitlines(
|
||||
), preview_text.splitlines(), fromfile="before", tofile="after", n=2))
|
||||
if len(diff) > 800:
|
||||
diff = diff[:800] + ["... (diff truncated) ..."]
|
||||
if preview:
|
||||
return {"success": True, "message": "Preview only (no write)", "data": {"diff": "\n".join(diff), "normalizedEdits": normalized_for_echo}}
|
||||
return _with_norm({"success": False, "message": "Preview diff; set options.confirm=true to apply.", "data": {"diff": "\n".join(diff)}}, normalized_for_echo, routing="text")
|
||||
except Exception as e:
|
||||
return _with_norm({"success": False, "code": "preview_failed", "message": f"Preview failed: {e}"}, normalized_for_echo, routing="text")
|
||||
# 2) apply edits locally (only if not text-ops)
|
||||
try:
|
||||
new_contents = _apply_edits_locally(contents, edits)
|
||||
except Exception as e:
|
||||
return {"success": False, "message": f"Edit application failed: {e}"}
|
||||
|
||||
# Short-circuit no-op edits to avoid false "applied" reports downstream
|
||||
if new_contents == contents:
|
||||
return _with_norm({
|
||||
"success": True,
|
||||
"message": "No-op: contents unchanged",
|
||||
"data": {"no_op": True, "evidence": {"reason": "identical_content"}}
|
||||
}, normalized_for_echo, routing="text")
|
||||
|
||||
if preview:
|
||||
# Produce a compact unified diff limited to small context
|
||||
import difflib
|
||||
a = contents.splitlines()
|
||||
b = new_contents.splitlines()
|
||||
diff = list(difflib.unified_diff(
|
||||
a, b, fromfile="before", tofile="after", n=3))
|
||||
# Limit diff size to keep responses small
|
||||
if len(diff) > 2000:
|
||||
diff = diff[:2000] + ["... (diff truncated) ..."]
|
||||
return {"success": True, "message": "Preview only (no write)", "data": {"diff": "\n".join(diff), "normalizedEdits": normalized_for_echo}}
|
||||
|
||||
# 3) update to Unity
|
||||
# Default refresh/validate for natural usage on text path as well
|
||||
options = dict(options or {})
|
||||
options.setdefault("validate", "standard")
|
||||
options.setdefault("refresh", "debounced")
|
||||
|
||||
# Compute the SHA of the current file contents for the precondition
|
||||
old_lines = contents.splitlines(keepends=True)
|
||||
end_line = len(old_lines) + 1 # 1-based exclusive end
|
||||
sha = hashlib.sha256(contents.encode("utf-8")).hexdigest()
|
||||
|
||||
# Apply a whole-file text edit rather than the deprecated 'update' action
|
||||
params = {
|
||||
"action": "apply_text_edits",
|
||||
"name": name,
|
||||
"path": path,
|
||||
"namespace": namespace,
|
||||
"scriptType": script_type,
|
||||
"edits": [
|
||||
{
|
||||
"startLine": 1,
|
||||
"startCol": 1,
|
||||
"endLine": end_line,
|
||||
"endCol": 1,
|
||||
"newText": new_contents,
|
||||
}
|
||||
],
|
||||
"precondition_sha256": sha,
|
||||
"options": options or {"validate": "standard", "refresh": "debounced"},
|
||||
}
|
||||
|
||||
write_resp = send_command_with_retry("manage_script", params)
|
||||
if isinstance(write_resp, dict) and write_resp.get("success"):
|
||||
pass # Optional sentinel reload removed (deprecated)
|
||||
return _with_norm(
|
||||
write_resp if isinstance(write_resp, dict)
|
||||
else {"success": False, "message": str(write_resp)},
|
||||
normalized_for_echo,
|
||||
routing="text",
|
||||
)
|
||||
|
|
@ -0,0 +1,451 @@
|
|||
from config import config
|
||||
import contextlib
|
||||
from dataclasses import dataclass
|
||||
import errno
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from port_discovery import PortDiscovery
|
||||
import random
|
||||
import socket
|
||||
import struct
|
||||
import threading
|
||||
import time
|
||||
from typing import Any, Dict
|
||||
|
||||
from models import MCPResponse
|
||||
|
||||
|
||||
# Configure logging using settings from config
|
||||
logging.basicConfig(
|
||||
level=getattr(logging, config.log_level),
|
||||
format=config.log_format
|
||||
)
|
||||
logger = logging.getLogger("mcp-for-unity-server")
|
||||
|
||||
# Module-level lock to guard global connection initialization
|
||||
_connection_lock = threading.Lock()
|
||||
|
||||
# Maximum allowed framed payload size (64 MiB)
|
||||
FRAMED_MAX = 64 * 1024 * 1024
|
||||
|
||||
|
||||
@dataclass
|
||||
class UnityConnection:
|
||||
"""Manages the socket connection to the Unity Editor."""
|
||||
host: str = config.unity_host
|
||||
port: int = None # Will be set dynamically
|
||||
sock: socket.socket = None # Socket for Unity communication
|
||||
use_framing: bool = False # Negotiated per-connection
|
||||
|
||||
def __post_init__(self):
|
||||
"""Set port from discovery if not explicitly provided"""
|
||||
if self.port is None:
|
||||
self.port = PortDiscovery.discover_unity_port()
|
||||
self._io_lock = threading.Lock()
|
||||
self._conn_lock = threading.Lock()
|
||||
|
||||
def connect(self) -> bool:
|
||||
"""Establish a connection to the Unity Editor."""
|
||||
if self.sock:
|
||||
return True
|
||||
with self._conn_lock:
|
||||
if self.sock:
|
||||
return True
|
||||
try:
|
||||
# Bounded connect to avoid indefinite blocking
|
||||
connect_timeout = float(
|
||||
getattr(config, "connect_timeout", getattr(config, "connection_timeout", 1.0)))
|
||||
self.sock = socket.create_connection(
|
||||
(self.host, self.port), connect_timeout)
|
||||
# Disable Nagle's algorithm to reduce small RPC latency
|
||||
with contextlib.suppress(Exception):
|
||||
self.sock.setsockopt(
|
||||
socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||
logger.debug(f"Connected to Unity at {self.host}:{self.port}")
|
||||
|
||||
# Strict handshake: require FRAMING=1
|
||||
try:
|
||||
require_framing = getattr(config, "require_framing", True)
|
||||
timeout = float(getattr(config, "handshake_timeout", 1.0))
|
||||
self.sock.settimeout(timeout)
|
||||
buf = bytearray()
|
||||
deadline = time.monotonic() + timeout
|
||||
while time.monotonic() < deadline and len(buf) < 512:
|
||||
try:
|
||||
chunk = self.sock.recv(256)
|
||||
if not chunk:
|
||||
break
|
||||
buf.extend(chunk)
|
||||
if b"\n" in buf:
|
||||
break
|
||||
except socket.timeout:
|
||||
break
|
||||
text = bytes(buf).decode('ascii', errors='ignore').strip()
|
||||
|
||||
if 'FRAMING=1' in text:
|
||||
self.use_framing = True
|
||||
logger.debug(
|
||||
'MCP for Unity handshake received: FRAMING=1 (strict)')
|
||||
else:
|
||||
if require_framing:
|
||||
# Best-effort plain-text advisory for legacy peers
|
||||
with contextlib.suppress(Exception):
|
||||
self.sock.sendall(
|
||||
b'MCP for Unity requires FRAMING=1\n')
|
||||
raise ConnectionError(
|
||||
f'MCP for Unity requires FRAMING=1, got: {text!r}')
|
||||
else:
|
||||
self.use_framing = False
|
||||
logger.warning(
|
||||
'MCP for Unity handshake missing FRAMING=1; proceeding in legacy mode by configuration')
|
||||
finally:
|
||||
self.sock.settimeout(config.connection_timeout)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to connect to Unity: {str(e)}")
|
||||
try:
|
||||
if self.sock:
|
||||
self.sock.close()
|
||||
except Exception:
|
||||
pass
|
||||
self.sock = None
|
||||
return False
|
||||
|
||||
def disconnect(self):
|
||||
"""Close the connection to the Unity Editor."""
|
||||
if self.sock:
|
||||
try:
|
||||
self.sock.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Error disconnecting from Unity: {str(e)}")
|
||||
finally:
|
||||
self.sock = None
|
||||
|
||||
def _read_exact(self, sock: socket.socket, count: int) -> bytes:
|
||||
data = bytearray()
|
||||
while len(data) < count:
|
||||
chunk = sock.recv(count - len(data))
|
||||
if not chunk:
|
||||
raise ConnectionError(
|
||||
"Connection closed before reading expected bytes")
|
||||
data.extend(chunk)
|
||||
return bytes(data)
|
||||
|
||||
def receive_full_response(self, sock, buffer_size=config.buffer_size) -> bytes:
|
||||
"""Receive a complete response from Unity, handling chunked data."""
|
||||
if self.use_framing:
|
||||
try:
|
||||
# Consume heartbeats, but do not hang indefinitely if only zero-length frames arrive
|
||||
heartbeat_count = 0
|
||||
deadline = time.monotonic() + getattr(config, 'framed_receive_timeout', 2.0)
|
||||
while True:
|
||||
header = self._read_exact(sock, 8)
|
||||
payload_len = struct.unpack('>Q', header)[0]
|
||||
if payload_len == 0:
|
||||
# Heartbeat/no-op frame: consume and continue waiting for a data frame
|
||||
logger.debug("Received heartbeat frame (length=0)")
|
||||
heartbeat_count += 1
|
||||
if heartbeat_count >= getattr(config, 'max_heartbeat_frames', 16) or time.monotonic() > deadline:
|
||||
# Treat as empty successful response to match C# server behavior
|
||||
logger.debug(
|
||||
"Heartbeat threshold reached; returning empty response")
|
||||
return b""
|
||||
continue
|
||||
if payload_len > FRAMED_MAX:
|
||||
raise ValueError(
|
||||
f"Invalid framed length: {payload_len}")
|
||||
payload = self._read_exact(sock, payload_len)
|
||||
logger.debug(
|
||||
f"Received framed response ({len(payload)} bytes)")
|
||||
return payload
|
||||
except socket.timeout as e:
|
||||
logger.warning("Socket timeout during framed receive")
|
||||
raise TimeoutError("Timeout receiving Unity response") from e
|
||||
except Exception as e:
|
||||
logger.error(f"Error during framed receive: {str(e)}")
|
||||
raise
|
||||
|
||||
chunks = []
|
||||
# Respect the socket's currently configured timeout
|
||||
try:
|
||||
while True:
|
||||
chunk = sock.recv(buffer_size)
|
||||
if not chunk:
|
||||
if not chunks:
|
||||
raise Exception(
|
||||
"Connection closed before receiving data")
|
||||
break
|
||||
chunks.append(chunk)
|
||||
|
||||
# Process the data received so far
|
||||
data = b''.join(chunks)
|
||||
decoded_data = data.decode('utf-8')
|
||||
|
||||
# Check if we've received a complete response
|
||||
try:
|
||||
# Special case for ping-pong
|
||||
if decoded_data.strip().startswith('{"status":"success","result":{"message":"pong"'):
|
||||
logger.debug("Received ping response")
|
||||
return data
|
||||
|
||||
# Handle escaped quotes in the content
|
||||
if '"content":' in decoded_data:
|
||||
# Find the content field and its value
|
||||
content_start = decoded_data.find('"content":') + 9
|
||||
content_end = decoded_data.rfind('"', content_start)
|
||||
if content_end > content_start:
|
||||
# Replace escaped quotes in content with regular quotes
|
||||
content = decoded_data[content_start:content_end]
|
||||
content = content.replace('\\"', '"')
|
||||
decoded_data = decoded_data[:content_start] + \
|
||||
content + decoded_data[content_end:]
|
||||
|
||||
# Validate JSON format
|
||||
json.loads(decoded_data)
|
||||
|
||||
# If we get here, we have valid JSON
|
||||
logger.info(
|
||||
f"Received complete response ({len(data)} bytes)")
|
||||
return data
|
||||
except json.JSONDecodeError:
|
||||
# We haven't received a complete valid JSON response yet
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Error processing response chunk: {str(e)}")
|
||||
# Continue reading more chunks as this might not be the complete response
|
||||
continue
|
||||
except socket.timeout:
|
||||
logger.warning("Socket timeout during receive")
|
||||
raise Exception("Timeout receiving Unity response")
|
||||
except Exception as e:
|
||||
logger.error(f"Error during receive: {str(e)}")
|
||||
raise
|
||||
|
||||
def send_command(self, command_type: str, params: Dict[str, Any] = None) -> Dict[str, Any]:
|
||||
"""Send a command with retry/backoff and port rediscovery. Pings only when requested."""
|
||||
# Defensive guard: catch empty/placeholder invocations early
|
||||
if not command_type:
|
||||
raise ValueError("MCP call missing command_type")
|
||||
if params is None:
|
||||
return MCPResponse(success=False, error="MCP call received with no parameters (client placeholder?)")
|
||||
attempts = max(config.max_retries, 5)
|
||||
base_backoff = max(0.5, config.retry_delay)
|
||||
|
||||
def read_status_file() -> dict | None:
|
||||
try:
|
||||
status_files = sorted(Path.home().joinpath(
|
||||
'.unity-mcp').glob('unity-mcp-status-*.json'), key=lambda p: p.stat().st_mtime, reverse=True)
|
||||
if not status_files:
|
||||
return None
|
||||
latest = status_files[0]
|
||||
with latest.open('r') as f:
|
||||
return json.load(f)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
last_short_timeout = None
|
||||
|
||||
# Preflight: if Unity reports reloading, return a structured hint so clients can retry politely
|
||||
try:
|
||||
status = read_status_file()
|
||||
if status and (status.get('reloading') or status.get('reason') == 'reloading'):
|
||||
return MCPResponse(
|
||||
success=False,
|
||||
error="Unity domain reload in progress, please try again shortly",
|
||||
data={"state": "reloading", "retry_after_ms": int(
|
||||
config.reload_retry_ms)}
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for attempt in range(attempts + 1):
|
||||
try:
|
||||
# Ensure connected (handshake occurs within connect())
|
||||
if not self.sock and not self.connect():
|
||||
raise Exception("Could not connect to Unity")
|
||||
|
||||
# Build payload
|
||||
if command_type == 'ping':
|
||||
payload = b'ping'
|
||||
else:
|
||||
command = {"type": command_type, "params": params or {}}
|
||||
payload = json.dumps(
|
||||
command, ensure_ascii=False).encode('utf-8')
|
||||
|
||||
# Send/receive are serialized to protect the shared socket
|
||||
with self._io_lock:
|
||||
mode = 'framed' if self.use_framing else 'legacy'
|
||||
with contextlib.suppress(Exception):
|
||||
logger.debug(
|
||||
"send %d bytes; mode=%s; head=%s",
|
||||
len(payload),
|
||||
mode,
|
||||
(payload[:32]).decode('utf-8', 'ignore'),
|
||||
)
|
||||
if self.use_framing:
|
||||
header = struct.pack('>Q', len(payload))
|
||||
self.sock.sendall(header)
|
||||
self.sock.sendall(payload)
|
||||
else:
|
||||
self.sock.sendall(payload)
|
||||
|
||||
# During retry bursts use a short receive timeout and ensure restoration
|
||||
restore_timeout = None
|
||||
if attempt > 0 and last_short_timeout is None:
|
||||
restore_timeout = self.sock.gettimeout()
|
||||
self.sock.settimeout(1.0)
|
||||
try:
|
||||
response_data = self.receive_full_response(self.sock)
|
||||
with contextlib.suppress(Exception):
|
||||
logger.debug("recv %d bytes; mode=%s",
|
||||
len(response_data), mode)
|
||||
finally:
|
||||
if restore_timeout is not None:
|
||||
self.sock.settimeout(restore_timeout)
|
||||
last_short_timeout = None
|
||||
|
||||
# Parse
|
||||
if command_type == 'ping':
|
||||
resp = json.loads(response_data.decode('utf-8'))
|
||||
if resp.get('status') == 'success' and resp.get('result', {}).get('message') == 'pong':
|
||||
return {"message": "pong"}
|
||||
raise Exception("Ping unsuccessful")
|
||||
|
||||
resp = json.loads(response_data.decode('utf-8'))
|
||||
if resp.get('status') == 'error':
|
||||
err = resp.get('error') or resp.get(
|
||||
'message', 'Unknown Unity error')
|
||||
raise Exception(err)
|
||||
return resp.get('result', {})
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Unity communication attempt {attempt+1} failed: {e}")
|
||||
try:
|
||||
if self.sock:
|
||||
self.sock.close()
|
||||
finally:
|
||||
self.sock = None
|
||||
|
||||
# Re-discover port each time
|
||||
try:
|
||||
new_port = PortDiscovery.discover_unity_port()
|
||||
if new_port != self.port:
|
||||
logger.info(
|
||||
f"Unity port changed {self.port} -> {new_port}")
|
||||
self.port = new_port
|
||||
except Exception as de:
|
||||
logger.debug(f"Port discovery failed: {de}")
|
||||
|
||||
if attempt < attempts:
|
||||
# Heartbeat-aware, jittered backoff
|
||||
status = read_status_file()
|
||||
# Base exponential backoff
|
||||
backoff = base_backoff * (2 ** attempt)
|
||||
# Decorrelated jitter multiplier
|
||||
jitter = random.uniform(0.1, 0.3)
|
||||
|
||||
# Fast‑retry for transient socket failures
|
||||
fast_error = isinstance(
|
||||
e, (ConnectionRefusedError, ConnectionResetError, TimeoutError))
|
||||
if not fast_error:
|
||||
try:
|
||||
err_no = getattr(e, 'errno', None)
|
||||
fast_error = err_no in (
|
||||
errno.ECONNREFUSED, errno.ECONNRESET, errno.ETIMEDOUT)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Cap backoff depending on state
|
||||
if status and status.get('reloading'):
|
||||
cap = 0.8
|
||||
elif fast_error:
|
||||
cap = 0.25
|
||||
else:
|
||||
cap = 3.0
|
||||
|
||||
sleep_s = min(cap, jitter * (2 ** attempt))
|
||||
time.sleep(sleep_s)
|
||||
continue
|
||||
raise
|
||||
|
||||
|
||||
# Global Unity connection
|
||||
_unity_connection = None
|
||||
|
||||
|
||||
def get_unity_connection() -> UnityConnection:
|
||||
"""Retrieve or establish a persistent Unity connection.
|
||||
|
||||
Note: Do NOT ping on every retrieval to avoid connection storms. Rely on
|
||||
send_command() exceptions to detect broken sockets and reconnect there.
|
||||
"""
|
||||
global _unity_connection
|
||||
if _unity_connection is not None:
|
||||
return _unity_connection
|
||||
|
||||
# Double-checked locking to avoid concurrent socket creation
|
||||
with _connection_lock:
|
||||
if _unity_connection is not None:
|
||||
return _unity_connection
|
||||
logger.info("Creating new Unity connection")
|
||||
_unity_connection = UnityConnection()
|
||||
if not _unity_connection.connect():
|
||||
_unity_connection = None
|
||||
raise ConnectionError(
|
||||
"Could not connect to Unity. Ensure the Unity Editor and MCP Bridge are running.")
|
||||
logger.info("Connected to Unity on startup")
|
||||
return _unity_connection
|
||||
|
||||
|
||||
# -----------------------------
|
||||
# Centralized retry helpers
|
||||
# -----------------------------
|
||||
|
||||
def _is_reloading_response(resp: dict) -> bool:
|
||||
"""Return True if the Unity response indicates the editor is reloading."""
|
||||
if not isinstance(resp, dict):
|
||||
return False
|
||||
if resp.get("state") == "reloading":
|
||||
return True
|
||||
message_text = (resp.get("message") or resp.get("error") or "").lower()
|
||||
return "reload" in message_text
|
||||
|
||||
|
||||
def send_command_with_retry(command_type: str, params: Dict[str, Any], *, max_retries: int | None = None, retry_ms: int | None = None) -> Dict[str, Any]:
|
||||
"""Send a command via the shared connection, waiting politely through Unity reloads.
|
||||
|
||||
Uses config.reload_retry_ms and config.reload_max_retries by default. Preserves the
|
||||
structured failure if retries are exhausted.
|
||||
"""
|
||||
conn = get_unity_connection()
|
||||
if max_retries is None:
|
||||
max_retries = getattr(config, "reload_max_retries", 40)
|
||||
if retry_ms is None:
|
||||
retry_ms = getattr(config, "reload_retry_ms", 250)
|
||||
|
||||
response = conn.send_command(command_type, params)
|
||||
retries = 0
|
||||
while _is_reloading_response(response) and retries < max_retries:
|
||||
delay_ms = int(response.get("retry_after_ms", retry_ms)
|
||||
) if isinstance(response, dict) else retry_ms
|
||||
time.sleep(max(0.0, delay_ms / 1000.0))
|
||||
retries += 1
|
||||
response = conn.send_command(command_type, params)
|
||||
return response
|
||||
|
||||
|
||||
async def async_send_command_with_retry(command_type: str, params: dict[str, Any], *, loop=None, max_retries: int | None = None, retry_ms: int | None = None) -> dict[str, Any] | MCPResponse:
|
||||
"""Async wrapper that runs the blocking retry helper in a thread pool."""
|
||||
try:
|
||||
import asyncio # local import to avoid mandatory asyncio dependency for sync callers
|
||||
if loop is None:
|
||||
loop = asyncio.get_running_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: send_command_with_retry(
|
||||
command_type, params, max_retries=max_retries, retry_ms=retry_ms),
|
||||
)
|
||||
except Exception as e:
|
||||
return MCPResponse(success=False, error=str(e))
|
||||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue