MCP踩坑日记

关于MCP的一些坑

入门指南 - Anthropic

实战解析MCP-使用本地的Qwen-2.5模型-AI协议的未来?_qwen mcp-CSDN博客

使用 mcp-agent 框架和百炼通义千问大模型构建基于 MCP 协议的网页总结智能代理 (agent) - ShanSan

MCP圣经:从入门到精通,从精通到放弃,理论 + 实践吃透 大火的 MCP 协议-CSDN博客

MCP 实战:调用DeepSeek实现MCP客户端和服务端快速搭建_人工智能_Maslow503-DeepSeek技术社区

MCP是Anthropic公司发布的,该公司发布了claude。所以官方给的例子基本都是基于claude Desktop为例的,或者基于Anthropic库,需要claude的KEY。虽然MCP支持几乎所有的大模型,但是claude的API和openai的API格式不一样!response的字段存在一些细微差别,github上有一些库在做兼容性的适配。所以直接拿官方的例子去兼容openai,需要改动!参考了上面一些文章,这里直接给出改动以后的代码。

server.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# server.py
from mcp.server.fastmcp import FastMCP

# Create an MCP server
mcp = FastMCP("Demo")


# Add an addition tool
@mcp.tool()
def add(a: int, b: int) -> int:
"""Add two numbers"""
return a + b


# Add a dynamic greeting resource
@mcp.resource("greeting://{name}")
def get_greeting(name: str) -> str:
"""Get a personalized greeting"""
return f"Hello, {name}!"


if __name__ == "__main__":
# 初始化并运行 server
mcp.run(transport='stdio')

稍微复杂一点的

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
from typing import Any
import httpx
from mcp.server.fastmcp import FastMCP

# 初始化FastMCP服务器
mcp = FastMCP("weather")

# 常量
NWS_API_BASE = "https://api.weather.gov"
USER_AGENT = "weather-app/1.0"


async def make_nws_request(url: str) -> dict[str, Any] | None:
"""向NWS API发出GET请求,处理错误并返回JSON响应"""
headers = {"User-Agent": USER_AGENT, "Accept": "application/geo+json"}
async with httpx.AsyncClient() as client:
try:
response = await client.get(url, headers=headers, timeout=30.0)
response.raise_for_status()
return response.json()
except Exception:
return None


def format_alert(feature: dict) -> str:
"""将警报特征格式化为可读字符串。"""
props = feature["properties"]
return f"""
Event: {props.get('event', 'Unknown')}
Area: {props.get('areaDesc', 'Unknown')}
Severity: {props.get('severity', 'Unknown')}
Description: {props.get('description', 'No description available')}
Instructions: {props.get('instruction', 'No specific instructions provided')}
"""


@mcp.tool()
async def get_alerts(state: str) -> str:
"""获取指定州的天气警报(使用两字母州代码如CA/NY)"""
url = f"{NWS_API_BASE}/alerts/active/area/{state}"
data = await make_nws_request(url)

if not data or "features" not in data:
return "无法获取警报或未找到警报。"

if not data["features"]:
return "该州没有活动警报。"

alerts = [format_alert(feature) for feature in data["features"]]
return "\n---\n".join(alerts)


@mcp.tool()
def get_weather(city: str):
print(f"[debug] getting weather for {city}")
return f"The weather in {city} is sunny."

@mcp.tool()
async def get_forecast(latitude: float, longitude: float) -> str:
"""获取位置的天气预报。

Args:
latitude: 位置的纬度
longitude: 位置的经度
"""
# 首先获取预报网格端点
points_url = f"{NWS_API_BASE}/points/{latitude},{longitude}"
points_data = await make_nws_request(points_url)

if not points_data:
return "无法为此位置获取预报数据。"

# 从点响应中获取预报URL
forecast_url = points_data["properties"]["forecast"]
forecast_data = await make_nws_request(forecast_url)

if not forecast_data:
return "无法获取详细预报。"

# 将时段格式化为可读预报
periods = forecast_data["properties"]["periods"]
forecasts = []
for period in periods[:5]: # 只显示接下来的5个时段
forecast = f"""
{period['name']}:
Temperature: {period['temperature']}°{period['temperatureUnit']}
Wind: {period['windSpeed']} {period['windDirection']}
Forecast: {period['detailedForecast']}
"""
forecasts.append(forecast)

return "\n---\n".join(forecasts)


if __name__ == "__main__":
# 初始化并运行服务器
mcp.run(transport="stdio")

client.py

  • 注意需要要使用await self.client.chat.completions.create,一定得搭配AsyncOpenAI
  • openai格式的response里面,Message里面的content和tool_calls一般只会有一个,choices一般也只有一个,所以一般都是response.choices[0].message,除非你调用的时候指定了参数n

下面给出两个响应的格式

  1. 询问你是谁
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
response = ChatCompletion(
id="chatcmpl-45b85001-e64c-98bb-830c-5d6267e83889",
choices=[
Choice(
finish_reason="stop",
index=0,
logprobs=None,
message=ChatCompletionMessage(
content="我是阿里云开发的超大规模语言模型,我的中文名字叫通义千问,英文名字叫Qwen。我可以回答问题、创作文字,比如写故事、写公文、写邮件、写剧本、逻辑推理、编程等等,还能表达观点,玩游戏等。如果你有任何问题或需要帮助,请随时告诉我!",
refusal=None,
role="assistant",
annotations=None,
audio=None,
function_call=None,
tool_calls=None,
),
)
],
created=1744787090,
model="qwen-plus",
object="chat.completion",
service_tier=None,
system_fingerprint=None,
usage=CompletionUsage(
completion_tokens=69,
prompt_tokens=187,
total_tokens=256,
completion_tokens_details=None,
prompt_tokens_details=PromptTokensDetails(audio_tokens=None, cached_tokens=0),
),
)
  1. 询问两个数相加
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
response = ChatCompletion(
id="chatcmpl-8ab31b9c-99ad-97e8-805f-e77bdd535436",
choices=[
Choice(
finish_reason="tool_calls",
index=0,
logprobs=None,
message=ChatCompletionMessage(
content="",
refusal=None,
role="assistant",
annotations=None,
audio=None,
function_call=None,
tool_calls=[
ChatCompletionMessageToolCall(
id="call_dee4a8444cab44beacee2b",
function=Function(
arguments='{"a": 100, "b": 3300}', name="add"
),
type="function",
index=0,
)
],
),
)
],
created=1744774816,
model="qwen-plus",
object="chat.completion",
service_tier=None,
system_fingerprint=None,
usage=CompletionUsage(
completion_tokens=27,
prompt_tokens=192,
total_tokens=219,
completion_tokens_details=None,
prompt_tokens_details=PromptTokensDetails(audio_tokens=None, cached_tokens=0),
),
)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
import asyncio
import json
from typing import Optional
from contextlib import AsyncExitStack

import httpx

from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client

from openai import AsyncOpenAI
from dotenv import load_dotenv
import os

load_dotenv() # load environment variables from .env

class MCPClient:
def __init__(self):
# Initialize session and client objects
self.session: Optional[ClientSession] = None
self.exit_stack = AsyncExitStack()
self.client = AsyncOpenAI(
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
api_key="sk-xxxxx",
http_client=httpx.AsyncClient(verify=False)
)

async def connect_to_server(self, server_script_path: str):
"""Connect to an MCP server

Args:
server_script_path: Path to the server script (.py or .js)
"""
is_python = server_script_path.endswith('.py')
is_js = server_script_path.endswith('.js')
if not (is_python or is_js):
raise ValueError("Server script must be a .py or .js file")

command = "python" if is_python else "node"
server_params = StdioServerParameters(
command=command,
args=[server_script_path],
env=None
)

stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
self.stdio, self.write = stdio_transport
self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write))

await self.session.initialize()

# List available tools
response = await self.session.list_tools()
tools = response.tools
print("\nConnected to server with tools:", [tool.name for tool in tools])

async def process_query(self, query: str) -> str:
"""使用 LLM 和 MCP 服务器提供的工具处理查询"""
messages = [
{
"role": "user",
"content": query
}
]

response = await self.session.list_tools()
available_tools = [{
"type": "function",
"function": {
"name": tool.name,
"description": tool.description,
"parameters": tool.inputSchema
}
} for tool in response.tools]

# 初始化 LLM API 调用
response = await self.client.chat.completions.create(
model="qwen-plus",
messages=messages,
tools=available_tools
)


final_text = []
message = response.choices[0].message
final_text.append(message.content or "")

# 处理响应并处理工具调用
while message.tool_calls:
# 处理每个工具调用
for tool_call in message.tool_calls:
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments)

# 执行工具调用
result = await self.session.call_tool(tool_name, tool_args)
final_text.append(f"[Calling tool {tool_name} with args {tool_args}]")

# 将工具调用和结果添加到消息历史
messages.append({
"role": "assistant",
"tool_calls": [
{
"id": tool_call.id,
"type": "function",
"function": {
"name": tool_name,
"arguments": json.dumps(tool_args)
}
}
]
})
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": str(result.content)
})

# 将工具调用的结果交给 LLM
response = await self.client.chat.completions.create(
model="qwen-plus",
messages=messages,
tools=available_tools
)

message = response.choices[0].message
if message.content:
final_text.append(message.content)

return "\n".join(final_text)

async def chat_loop(self):
"""Run an interactive chat loop"""
print("\nMCP Client Started!")
print("Type your queries or 'quit' to exit.")

while True:
try:
query = input("\nQuery: ").strip()

if query.lower() == 'quit':
break

response = await self.process_query(query)
print("\n" + response)

except Exception as e:
print(f"\nError: {str(e)}")

async def cleanup(self):
"""Clean up resources"""
await self.exit_stack.aclose()

async def main():
if len(sys.argv) < 2:
print("Usage: python client.py <path_to_server_script>")
sys.exit(1)

client = MCPClient()
try:
await client.connect_to_server(sys.argv[1])
await client.chat_loop()
finally:
await client.cleanup()

if __name__ == "__main__":
import sys
asyncio.run(main())

实际使用中,server一般直接拿已有的,client可能需要我们自己去开发或者适配