Skip to content

Commit db8c0b5

Browse files
feat: move dev dependencies to group (#44)
1 parent 7765280 commit db8c0b5

File tree

7 files changed

+90
-595
lines changed

7 files changed

+90
-595
lines changed

.github/workflows/deploy.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ jobs:
3939

4040
- name: Install dependencies
4141
run: |
42-
uv sync
42+
uv sync --group dev
4343
4444
- name: Setup site structure
4545
run: |

CONTRIBUTING.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ Your server JSON file must include the following required fields:
3333
"required": false
3434
}
3535
},
36-
"commands": {
36+
"installations": {
3737
"npm": {
3838
"type": "npm",
3939
"command": "npx",

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ This repository contains the CLI and service components for MCP Manager, built w
109109

110110
### Development Requirements
111111

112-
- Python 3.8+
112+
- Python 3.10+
113113
- uv (for virtual environment and dependency management)
114114
- Click framework for CLI
115115
- Rich for enhanced console output

pyproject.toml

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,8 @@ dependencies = [
2424
"rich>=12.0.0",
2525
"requests>=2.28.0",
2626
"pydantic>=2.5.1",
27-
"jsonschema>=4.17.0",
2827
"mcp>=1.6.0",
29-
"loguru>=0.7.3",
3028
"ruamel-yaml>=0.18.10",
31-
"openai>=1.72.0",
32-
"aiohttp>=3.11.16",
3329
"watchfiles>=1.0.4",
3430
"duckdb>=1.2.2",
3531
]
@@ -63,4 +59,6 @@ dev = [
6359
"pytest>=8.3.5",
6460
"pytest-asyncio>=0.26.0",
6561
"ruff>=0.11.4",
62+
"jsonschema>=4.23.0",
63+
"openai>=1.72.0",
6664
]

scripts/categorization.py

Lines changed: 31 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,18 @@
1+
import asyncio
2+
import json
3+
import logging
4+
import os
15
from dataclasses import dataclass
2-
from typing import List, Dict, Any, Optional
36
from enum import Enum
4-
import os
5-
import json
6-
import asyncio
7-
from openai import OpenAI
8-
from loguru import logger
7+
from typing import Any, Dict, List, Optional
98

109
import dotenv
10+
from openai import OpenAI
11+
1112
dotenv.load_dotenv()
1213

14+
logger = logging.getLogger(__name__)
15+
1316

1417
class MCPCategory(Enum):
1518
DATABASES = "Databases"
@@ -34,6 +37,7 @@ class LLMModel:
3437
@dataclass
3538
class CategorizationWorkflowState:
3639
"""Holds the state for the categorization workflow"""
40+
3741
server_name: str = ""
3842
server_description: str = ""
3943
selected_category: Optional[MCPCategory] = None
@@ -42,6 +46,7 @@ class CategorizationWorkflowState:
4246
@dataclass
4347
class CategorizationAgentBuildPromptTemplateArgs:
4448
"""Arguments for building the prompt template"""
49+
4550
include_examples: bool = False
4651

4752

@@ -57,8 +62,7 @@ def __init__(self):
5762

5863
def build_system_prompt(self) -> str:
5964
"""Build the system prompt for the categorization agent"""
60-
return (
61-
"""You are an expert at categorizing MCP (Model Context Protocol) servers.
65+
return """You are an expert at categorizing MCP (Model Context Protocol) servers.
6266
Your task is to categorize each server into exactly one of the following categories:
6367
## 1. Databases
6468
Systems that connect LLMs to structured data repositories, enabling querying, analysis, and management of various types of databases including relational databases (PostgreSQL, MySQL, MSSQL), NoSQL databases (MongoDB, Redis, ArangoDB), vector databases (Pinecone, Chroma), cloud data warehouses (Snowflake, BigQuery), and search engines (Elasticsearch, Typesense).
@@ -90,7 +94,6 @@ def build_system_prompt(self) -> str:
9094
Choose the MOST appropriate category based on the server's primary function.
9195
Not that the server itself is an MCP server, so only select MCP Tools when the server is a meta-tool that manages other MCP servers.
9296
Only select ONE category per server."""
93-
)
9497

9598
def build_user_prompt(self, server_name: str, server_description: str, include_examples: bool = False) -> str:
9699
"""Build the user prompt for categorization"""
@@ -114,15 +117,15 @@ def build_user_prompt(self, server_name: str, server_description: str, include_e
114117

115118
return base_prompt
116119

117-
async def execute(self, server_name: str, server_description: str, include_examples: bool = False) -> Dict[str, Any]:
120+
async def execute(
121+
self, server_name: str, server_description: str, include_examples: bool = False
122+
) -> Dict[str, Any]:
118123
"""Execute the categorization workflow"""
119124
try:
120125
# Build system and user prompts
121126
system_prompt = self.build_system_prompt()
122127
user_prompt = self.build_user_prompt(
123-
server_name=server_name,
124-
server_description=server_description,
125-
include_examples=include_examples
128+
server_name=server_name, server_description=server_description, include_examples=include_examples
126129
)
127130

128131
# Define the function schema
@@ -136,14 +139,14 @@ async def execute(self, server_name: str, server_description: str, include_examp
136139
"category": {
137140
"type": "string",
138141
"enum": [cat.value for cat in MCPCategory],
139-
"description": "Selected category for the server"
142+
"description": "Selected category for the server",
140143
},
141144
"explanation": {
142145
"type": "string",
143-
"description": "Brief explanation of why this category was chosen"
144-
}
145-
}
146-
}
146+
"description": "Brief explanation of why this category was chosen",
147+
},
148+
},
149+
},
147150
}
148151

149152
# Call OpenAI API with the categorization tool
@@ -154,19 +157,9 @@ async def execute(self, server_name: str, server_description: str, include_examp
154157
"X-Title": "MCPM",
155158
},
156159
model=LLMModel.CLAUDE_3_SONNET,
157-
messages=[
158-
{
159-
"role": "system",
160-
"content": system_prompt
161-
},
162-
{
163-
"role": "user",
164-
"content": user_prompt
165-
}
166-
],
160+
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
167161
tools=[{"type": "function", "function": function_schema}],
168-
tool_choice={"type": "function", "function": {
169-
"name": "categorize_server"}}
162+
tool_choice={"type": "function", "function": {"name": "categorize_server"}},
170163
)
171164

172165
# Process the tool response
@@ -176,25 +169,24 @@ async def execute(self, server_name: str, server_description: str, include_examp
176169

177170
result = {
178171
"category": tool_args.get("category", "Unknown"),
179-
"explanation": tool_args.get("explanation", "No explanation provided.")
172+
"explanation": tool_args.get("explanation", "No explanation provided."),
180173
}
181-
logger.info(
182-
f"Categorization result: {result['category']} - {result['explanation'][:30]}...")
174+
logger.info(f"Categorization result: {result['category']} - {result['explanation'][:30]}...")
183175
return result
184176
else:
185177
logger.error("No tool calls found in the response")
186178
return {
187179
"server_name": server_name,
188180
"category": "Unknown",
189-
"explanation": "Failed to categorize: No tool use in response."
181+
"explanation": "Failed to categorize: No tool use in response.",
190182
}
191183

192184
except Exception as e:
193185
logger.error(f"Error during categorization: {str(e)}")
194186
return {
195187
"server_name": server_name,
196188
"category": "Error",
197-
"explanation": f"Error during categorization: {str(e)}"
189+
"explanation": f"Error during categorization: {str(e)}",
198190
}
199191

200192

@@ -206,9 +198,7 @@ async def categorize_servers(servers: List[Dict[str, str]]) -> List[Dict[str, An
206198

207199
for server in servers:
208200
result = await agent.execute(
209-
server_name=server["name"],
210-
server_description=server["description"],
211-
include_examples=True
201+
server_name=server["name"], server_description=server["description"], include_examples=True
212202
)
213203
result["server_name"] = server["name"]
214204
results.append(result)
@@ -222,7 +212,7 @@ async def categorize_servers(servers: List[Dict[str, str]]) -> List[Dict[str, An
222212
{"name": "GitHub", "description": "Repository management and code hosting"},
223213
{"name": "Notion", "description": "Collaborative workspace and knowledge management"},
224214
{"name": "EverArt", "description": "AI image generation using various models"},
225-
{"name": "MCP Installer", "description": "Installs other MCP servers automatically"}
215+
{"name": "MCP Installer", "description": "Installs other MCP servers automatically"},
226216
]
227217

228218
# Run the categorization
@@ -231,8 +221,8 @@ async def categorize_servers(servers: List[Dict[str, str]]) -> List[Dict[str, An
231221
async def main():
232222
results = await categorize_servers(sample_servers)
233223
for result in results:
234-
print(
235-
f"{result['server_name']}{result['category']} ({result['explanation'][:50]}...)")
224+
print(f"{result['server_name']}{result['category']} ({result['explanation'][:50]}...)")
225+
236226

237227
if __name__ == "__main__":
238228
asyncio.run(main())

0 commit comments

Comments
 (0)