Skip to content

Commit c560a89

Browse files
v30.0.0 (#114)
* v30 * fix ver
1 parent 67ac756 commit c560a89

File tree

7 files changed

+60
-148
lines changed

7 files changed

+60
-148
lines changed

README.md

Lines changed: 1 addition & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ Build your AI agents in three lines of code!
1818
* Three lines of code setup
1919
* Simple Agent Definition
2020
* Fast Responses
21-
* Multi-Vendor Support
2221
* Solana Integration
2322
* Multi-Agent Swarm
2423
* Multi-Modal (Images & Audio & Text)
@@ -44,7 +43,6 @@ Build your AI agents in three lines of code!
4443
* Easy three lines of code setup
4544
* Simple agent definition using JSON
4645
* Fast AI responses
47-
* Multi-vendor support including OpenAI, Grok, and Gemini AI services
4846
* Solana Integration
4947
* MCP tool usage with first-class support for [Zapier](https://zapier.com/mcp)
5048
* Integrated observability and tracing via [Pydantic Logfire](https://pydantic.dev/logfire)
@@ -79,25 +77,10 @@ Build your AI agents in three lines of code!
7977
### AI Models Used
8078

8179
**OpenAI**
82-
* [gpt-4.1](https://platform.openai.com/docs/models/gpt-4.1) (agent - can be overridden)
83-
* [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (router - can be overridden)
80+
* [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (agent & router)
8481
* [text-embedding-3-large](https://platform.openai.com/docs/models/text-embedding-3-large) (embedding)
8582
* [tts-1](https://platform.openai.com/docs/models/tts-1) (audio TTS)
8683
* [gpt-4o-mini-transcribe](https://platform.openai.com/docs/models/gpt-4o-mini-transcribe) (audio transcription)
87-
* [gpt-image-1](https://platform.openai.com/docs/models/gpt-image-1) (image generation - can be overridden)
88-
* [gpt-4o-mini-search-preview](https://platform.openai.com/docs/models/gpt-4o-mini-search-preview) (Internet search)
89-
90-
**Grok**
91-
* [grok-3-fast](https://x.ai/api#pricing) (agent - optional)
92-
* [grok-3-mini-fast](https://x.ai/api#pricing) (router - optional)
93-
* [grok-2-image](https://x.ai/api#pricing) (image generation - optional)
94-
95-
**Gemini**
96-
* [gemini-2.5-flash-preview-04-17](https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview) (agent & router - optional)
97-
* [imagen-3.0-generate-002](https://ai.google.dev/gemini-api/docs/models#imagen-3) (image generation - optional)
98-
99-
**Ollama**
100-
* [gemma:4b-it-qat](https://ollama.com/library/gemma3) - (agent & router - optional)
10184

10285
## Installation
10386

@@ -432,36 +415,6 @@ config = {
432415
}
433416
```
434417

435-
### Grok
436-
437-
```python
438-
config = {
439-
"grok": {
440-
"api_key": "your-grok-api-key",
441-
},
442-
}
443-
```
444-
445-
### Gemini
446-
447-
```python
448-
config = {
449-
"gemini": {
450-
"api_key": "your-gemini-api-key",
451-
},
452-
}
453-
```
454-
455-
### Ollama
456-
457-
```python
458-
config = {
459-
"ollama": {
460-
"api_key": "use-this-key-1010"
461-
},
462-
}
463-
```
464-
465418
### Knowledge Base
466419

467420
The Knowledge Base (KB) is meant to store text values and/or PDFs (extracts text) - can handle very large PDFs.

docs/index.rst

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -366,31 +366,6 @@ Observability and Tracing - Optional
366366
},
367367
}
368368
369-
370-
Grok - Optional
371-
~~~~~~~~~~~~~~~~
372-
373-
.. code-block:: python
374-
375-
config = {
376-
"grok": {
377-
"api_key": "your-grok-api-key",
378-
},
379-
}
380-
381-
382-
Gemini - Optional
383-
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
384-
385-
.. code-block:: python
386-
387-
config = {
388-
"gemini": {
389-
"api_key": "your-gemini-api-key",
390-
},
391-
}
392-
393-
394369
Knowledge Base - Optional
395370
~~~~~~~~~~~~~~~~~~~~~~~~~~~
396371

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "solana-agent"
3-
version = "29.3.0"
3+
version = "30.0.0"
44
description = "AI Agents for Solana"
55
authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
66
license = "MIT"

solana_agent/adapters/openai_adapter.py

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@
3333

3434
T = TypeVar("T", bound=BaseModel)
3535

36-
DEFAULT_CHAT_MODEL = "gpt-4.1"
37-
DEFAULT_VISION_MODEL = "gpt-4.1"
36+
DEFAULT_CHAT_MODEL = "gpt-4.1-nano"
37+
DEFAULT_VISION_MODEL = "gpt-4.1-nano"
3838
DEFAULT_PARSE_MODEL = "gpt-4.1-nano"
3939
DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large"
4040
DEFAULT_EMBEDDING_DIMENSIONS = 3072
@@ -163,9 +163,8 @@ async def generate_text(
163163
api_key: Optional[str] = None,
164164
base_url: Optional[str] = None,
165165
model: Optional[str] = None,
166-
functions: Optional[List[Dict[str, Any]]] = None,
167-
function_call: Optional[Union[str, Dict[str, Any]]] = None,
168-
) -> Any: # pragma: no cover
166+
tools: Optional[List[Dict[str, Any]]] = None,
167+
) -> str: # pragma: no cover
169168
"""Generate text or function call from OpenAI models."""
170169
messages = []
171170
if system_prompt:
@@ -176,10 +175,8 @@ async def generate_text(
176175
"messages": messages,
177176
"model": model or self.text_model,
178177
}
179-
if functions:
180-
request_params["functions"] = functions
181-
if function_call:
182-
request_params["function_call"] = function_call
178+
if tools:
179+
request_params["tools"] = tools
183180

184181
if api_key and base_url:
185182
client = AsyncOpenAI(api_key=api_key, base_url=base_url)
@@ -410,8 +407,7 @@ async def parse_structured_output(
410407
api_key: Optional[str] = None,
411408
base_url: Optional[str] = None,
412409
model: Optional[str] = None,
413-
functions: Optional[List[Dict[str, Any]]] = None,
414-
function_call: Optional[Union[str, Dict[str, Any]]] = None,
410+
tools: Optional[List[Dict[str, Any]]] = None,
415411
) -> T: # pragma: no cover
416412
"""Generate structured output using Pydantic model parsing with Instructor."""
417413

@@ -439,10 +435,8 @@ async def parse_structured_output(
439435
"response_model": model_class,
440436
"max_retries": 2, # Automatically retry on validation errors
441437
}
442-
if functions:
443-
create_args["tools"] = functions
444-
if function_call:
445-
create_args["function_call"] = function_call
438+
if tools:
439+
create_args["tools"] = tools
446440

447441
response = await patched_client.chat.completions.create(**create_args)
448442
return response

solana_agent/factories/agent_factory.py

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -195,27 +195,6 @@ def create_from_config(config: Dict[str, Any]) -> QueryService:
195195
model="gemini-2.5-flash-preview-05-20",
196196
) # pragma: no cover
197197

198-
elif "grok" in config and "api_key" in config["grok"]:
199-
# Create primary services
200-
agent_service = AgentService(
201-
llm_provider=llm_adapter,
202-
business_mission=business_mission,
203-
config=config,
204-
api_key=config["grok"]["api_key"],
205-
base_url="https://api.x.ai/v1",
206-
model="grok-3-fast",
207-
output_guardrails=output_guardrails,
208-
) # pragma: no cover
209-
210-
# Create routing service
211-
routing_service = RoutingService(
212-
llm_provider=llm_adapter,
213-
agent_service=agent_service,
214-
api_key=config["gemini"]["api_key"],
215-
base_url="https://api.x.ai/v1",
216-
model="grok-3-mini-fast",
217-
) # pragma: no cover
218-
219198
elif "ollama" in config and "api_key" in config["ollama"]:
220199
# Create primary services
221200
agent_service = AgentService(

solana_agent/interfaces/providers/llm.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,7 @@ async def generate_text(
2828
api_key: Optional[str] = None,
2929
base_url: Optional[str] = None,
3030
model: Optional[str] = None,
31-
functions: Optional[List[Dict[str, Any]]] = None,
32-
function_call: Optional[Union[str, Dict[str, Any]]] = None,
31+
tools: Optional[List[Dict[str, Any]]] = None,
3332
) -> Any:
3433
"""Generate text from the language model."""
3534
pass
@@ -43,8 +42,7 @@ async def parse_structured_output(
4342
api_key: Optional[str] = None,
4443
base_url: Optional[str] = None,
4544
model: Optional[str] = None,
46-
functions: Optional[List[Dict[str, Any]]] = None,
47-
function_call: Optional[Union[str, Dict[str, Any]]] = None,
45+
tools: Optional[List[Dict[str, Any]]] = None,
4846
) -> T:
4947
"""Generate structured output using a specific model class."""
5048
pass
@@ -106,6 +104,7 @@ async def generate_text_with_images(
106104
images: List[Union[str, bytes]],
107105
system_prompt: str = "",
108106
detail: Literal["low", "high", "auto"] = "auto",
107+
tools: Optional[List[Dict[str, Any]]] = None,
109108
) -> str:
110109
"""Generate text from the language model using images."""
111110
pass

solana_agent/services/agent.py

Lines changed: 46 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -267,11 +267,15 @@ async def generate_response(
267267
full_prompt += f"USER IDENTIFIER: {user_id}"
268268

269269
# Get OpenAI function schemas for this agent's tools
270-
functions = [
270+
tools = [
271271
{
272-
"name": tool["name"],
273-
"description": tool.get("description", ""),
274-
"parameters": tool.get("parameters", {}),
272+
"type": "function",
273+
"function": {
274+
"name": tool["name"],
275+
"description": tool.get("description", ""),
276+
"parameters": tool.get("parameters", {}),
277+
"strict": True,
278+
},
275279
}
276280
for tool in self.get_agent_tools(agent_name)
277281
]
@@ -286,24 +290,33 @@ async def generate_response(
286290
api_key=self.api_key,
287291
base_url=self.base_url,
288292
model=self.model,
289-
functions=functions if functions else None,
290-
function_call="auto" if functions else None,
293+
tools=tools if tools else None,
291294
)
292295
yield model_instance
293296
return
294297

295298
# --- Streaming text/audio with tool support (as before) ---
296299
response_text = ""
297300
while True:
298-
response = await self.llm_provider.generate_text(
299-
prompt=full_prompt,
300-
system_prompt=system_prompt,
301-
functions=functions if functions else None,
302-
function_call="auto" if functions else None,
303-
api_key=self.api_key,
304-
base_url=self.base_url,
305-
model=self.model,
306-
)
301+
if not images:
302+
response = await self.llm_provider.generate_text(
303+
prompt=full_prompt,
304+
system_prompt=system_prompt,
305+
api_key=self.api_key,
306+
base_url=self.base_url,
307+
model=self.model,
308+
tools=tools if tools else None,
309+
)
310+
else:
311+
response = await self.llm_provider.generate_text_with_images(
312+
prompt=full_prompt,
313+
system_prompt=system_prompt,
314+
api_key=self.api_key,
315+
base_url=self.base_url,
316+
model=self.model,
317+
tools=tools if tools else None,
318+
images=images,
319+
)
307320
if (
308321
not response
309322
or not hasattr(response, "choices")
@@ -316,25 +329,24 @@ async def generate_response(
316329
choice = response.choices[0]
317330
message = getattr(choice, "message", choice)
318331

319-
# If the model wants to call a function/tool
320-
if hasattr(message, "function_call") and message.function_call:
321-
function_name = message.function_call.name
322-
arguments = json.loads(message.function_call.arguments)
323-
logger.info(
324-
f"Model requested tool '{function_name}' with args: {arguments}"
325-
)
326-
327-
# Execute the tool (async)
328-
tool_result = await self.execute_tool(
329-
agent_name, function_name, arguments
330-
)
331-
332-
# Add the tool result to the prompt for the next round
333-
full_prompt += (
334-
f"\n\nTool '{function_name}' was called with arguments {arguments}.\n"
335-
f"Result: {tool_result}\n"
336-
)
337-
continue # Loop again, LLM will see tool result and may call another tool or finish
332+
if hasattr(message, "tool_calls") and message.tool_calls:
333+
for tool_call in message.tool_calls:
334+
if tool_call.type == "function":
335+
function_name = tool_call.function.name
336+
arguments = json.loads(tool_call.function.arguments)
337+
logger.info(
338+
f"Model requested tool '{function_name}' with args: {arguments}"
339+
)
340+
# Execute the tool (async)
341+
tool_result = await self.execute_tool(
342+
agent_name, function_name, arguments
343+
)
344+
# Add the tool result to the prompt for the next round
345+
full_prompt += (
346+
f"\n\nTool '{function_name}' was called with arguments {arguments}.\n"
347+
f"Result: {tool_result}\n"
348+
)
349+
continue
338350

339351
# Otherwise, it's a normal message (final answer)
340352
response_text = message.content

0 commit comments

Comments
 (0)