Skip to content

v30.0.0 #109

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 2 additions & 15 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ Build your AI agents in three lines of code!
* [Python](https://python.org) - Programming Language
* [OpenAI](https://openai.com) - AI Model Provider
* [MongoDB](https://mongodb.com) - Conversational History (optional)
* [Zep Cloud](https://getzep.com) - Conversational Memory (optional)
* [Pinecone](https://pinecone.io) - Knowledge Base (optional)
* [AgentiPy](https://agentipy.fun) - Solana Ecosystem (optional)
* [Zapier](https://zapier.com) - App Integrations (optional)
Expand All @@ -82,7 +81,7 @@ Build your AI agents in three lines of code!
**OpenAI**
* [gpt-4.1](https://platform.openai.com/docs/models/gpt-4.1) (agent - can be overridden)
* [gpt-4.1-nano](https://platform.openai.com/docs/models/gpt-4.1-nano) (router)
* [text-embedding-3-large](https://platform.openai.com/docs/models/text-embedding-3-large) (embedding)
* [text-embedding-3-small](https://platform.openai.com/docs/models/text-embedding-3-small) (embedding)
* [tts-1](https://platform.openai.com/docs/models/tts-1) (audio TTS)
* [gpt-4o-mini-transcribe](https://platform.openai.com/docs/models/gpt-4o-mini-transcribe) (audio transcription)
* [gpt-image-1](https://platform.openai.com/docs/models/gpt-image-1) (image generation - can be overridden)
Expand Down Expand Up @@ -377,16 +376,6 @@ config = {
}
```

### Conversational Memory

```python
config = {
"zep": {
"api_key": "your-zep-cloud-api-key",
},
}
```

### Observability and Tracing

```python
Expand Down Expand Up @@ -421,9 +410,7 @@ config = {
}
```

### Knowledge Base

The Knowledge Base (KB) is meant to store text values and/or PDFs (extracts text) - can handle very large PDFs.
### Knowledge Base & Conversational Memory

```python
config = {
Expand Down
13 changes: 1 addition & 12 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -310,17 +310,6 @@ Conversational History - Optional
},
}

Conversational Memory - Optional
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

.. code-block:: python

config = {
"zep": {
"api_key": "your-zep-api-key",
},
}

Observability and Tracing - Optional
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Expand Down Expand Up @@ -361,7 +350,7 @@ Gemini - Optional
}


Knowledge Base - Optional
Knowledge Base & Conversational Memory - Optional
~~~~~~~~~~~~~~~~~~~~~~~~~~~

The Knowledge Base (KB) is meant to store text values and/or small PDFs.
Expand Down
33 changes: 8 additions & 25 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 3 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "solana-agent"
version = "29.1.4"
version = "30.0.0"
description = "AI Agents for Solana"
authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
license = "MIT"
Expand All @@ -27,9 +27,8 @@ python = ">=3.12,<4.0"
openai = "1.79.0"
pydantic = ">=2"
pymongo = "4.13.0"
zep-cloud = "2.12.3"
instructor = "1.8.2"
pinecone = "6.0.2"
pinecone = "7.0.0"
llama-index-core = "0.12.37"
llama-index-embeddings-openai = "0.3.1"
pypdf = "5.5.0"
Expand All @@ -50,7 +49,7 @@ sphinx-rtd-theme = "^3.0.2"
myst-parser = "^4.0.1"
sphinx-autobuild = "^2024.10.3"
mongomock = "^4.3.0"
ruff = "^0.11.9"
ruff = "^0.11.10"

[tool.poetry.scripts]
solana-agent = "solana_agent.cli:app"
Expand Down
67 changes: 67 additions & 0 deletions solana_agent/adapters/mongodb_graph_adapter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import uuid
from typing import Dict, Any, List, Optional
from solana_agent.interfaces.providers.graph_storage import GraphStorageProvider
from solana_agent.adapters.mongodb_adapter import MongoDBAdapter


class MongoDBGraphAdapter(GraphStorageProvider):
def __init__(
self,
mongo_adapter: MongoDBAdapter,
node_collection: str = "graph_nodes",
edge_collection: str = "graph_edges",
):
self.mongo = mongo_adapter
self.node_collection = node_collection
self.edge_collection = edge_collection

async def add_node(self, node: Dict[str, Any]) -> str:
node = dict(node)
node["uuid"] = node.get("uuid", str(uuid.uuid4()))
self.mongo.insert_one(self.node_collection, node)
return node["uuid"]

async def add_edge(self, edge: Dict[str, Any]) -> str:
edge = dict(edge)
edge["uuid"] = edge.get("uuid", str(uuid.uuid4()))
return self.mongo.insert_one(self.edge_collection, edge)

async def get_node(self, node_id: str) -> Optional[Dict[str, Any]]:
return self.mongo.find_one(self.node_collection, {"uuid": node_id})

async def get_edges(
self, node_id: str, direction: str = "both"
) -> List[Dict[str, Any]]:
if direction == "out":
query = {"source": node_id}
elif direction == "in":
query = {"target": node_id}
else:
query = {"$or": [{"source": node_id}, {"target": node_id}]}
return self.mongo.find(self.edge_collection, query)

async def find_neighbors(
self, node_id: str, depth: int = 1
) -> List[Dict[str, Any]]:
neighbors = set()
current = {node_id}
for _ in range(depth):
edges = await self.get_edges(list(current)[0])
for edge in edges:
neighbors.add(edge.get("source"))
neighbors.add(edge.get("target"))
current = neighbors
neighbors.discard(node_id)
return [await self.get_node(nid) for nid in neighbors if nid]

async def temporal_query(
self, node_id: str, start_time: Optional[str], end_time: Optional[str]
) -> List[Dict[str, Any]]:
query = {"$or": [{"source": node_id}, {"target": node_id}]}
if start_time or end_time:
query["timestamp"] = {}
if start_time:
query["timestamp"]["$gte"] = start_time
if end_time:
query["timestamp"]["$lte"] = end_time
return self.mongo.find(self.edge_collection, query)
6 changes: 3 additions & 3 deletions solana_agent/adapters/openai_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@
DEFAULT_CHAT_MODEL = "gpt-4.1"
DEFAULT_VISION_MODEL = "gpt-4.1"
DEFAULT_PARSE_MODEL = "gpt-4.1-nano"
DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large"
DEFAULT_EMBEDDING_DIMENSIONS = 3072
DEFAULT_EMBEDDING_MODEL = "text-embedding-3-small"
DEFAULT_EMBEDDING_DIMENSIONS = 1536
DEFAULT_TRANSCRIPTION_MODEL = "gpt-4o-mini-transcribe"
DEFAULT_TTS_MODEL = "tts-1"

Expand Down Expand Up @@ -526,7 +526,7 @@ async def embed_text(

Args:
text: The text to embed.
model: The embedding model to use (defaults to text-embedding-3-large).
model: The embedding model to use (defaults to text-embedding-3-small).
dimensions: Desired output dimensions for the embedding.

Returns:
Expand Down
6 changes: 3 additions & 3 deletions solana_agent/adapters/pinecone_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,15 +33,15 @@ def __init__(
self,
api_key: Optional[str] = None,
index_name: Optional[str] = None,
# Default for OpenAI text-embedding-3-large, MUST match external embedder
# Default for OpenAI text-embedding-3-small, MUST match external embedder
embedding_dimensions: int = 3072,
cloud_provider: str = "aws",
region: str = "us-east-1",
metric: str = "cosine",
create_index_if_not_exists: bool = True,
# Reranking Config
use_reranking: bool = False,
rerank_model: Optional[PineconeRerankModel] = None,
rerank_model: Optional[PineconeRerankModel] = "cohere-rerank-3.5",
rerank_top_k: int = 3, # Final number of results after reranking
# Multiplier for initial fetch before rerank
initial_query_top_k_multiplier: int = 5,
Expand Down Expand Up @@ -371,7 +371,7 @@ async def query_and_rerank(
"parameters": rerank_params,
}

rerank_response = await self.pinecone.rerank(**rerank_request)
rerank_response = await self.pinecone.inference.rerank(**rerank_request)

# 4. Process Reranked Results
reranked_results = []
Expand Down
Loading
Loading