Overview

Integrate Valyu’s deep search capabilities directly into your Anthropic Claude applications using our provider system. This enables your AI agents to access real-time information from academic papers, news, financial data, and authoritative sources.

Installation

Install the Valyu and Anthropic packages:
pip install valyu anthropic
Set your API keys as environment variables:
export VALYU_API_KEY="your-valyu-api-key"
export ANTHROPIC_API_KEY="your-anthropic-api-key"

Free Credits

Get your API key with $10 credit from the Valyu Platform.

Basic Usage

The new Anthropic provider makes integration incredibly simple:
from anthropic import Anthropic
from valyu import AnthropicProvider
from dotenv import load_dotenv

load_dotenv()

# Initialize clients
anthropic_client = Anthropic()
provider = AnthropicProvider()

# Get Valyu tools
tools = provider.get_tools()

# Create a research request
messages = [
    {
        "role": "user",
        "content": "What are the latest developments in quantum computing? Write a summary of your findings."
    }
]

# Step 1: Call Anthropic with tools
response = anthropic_client.messages.create(
    model="claude-3-5-sonnet-20241022",
    max_tokens=1000,
    tools=tools,
    messages=messages,
)

# Step 2: Execute tool calls
tool_results = provider.handle_tool_calls(response=response)

# Step 3: Get final response with search results
if tool_results:
    updated_messages = provider.build_conversation(messages, response, tool_results)
    final_response = anthropic_client.messages.create(
        model="claude-3-5-sonnet-20241022",
        max_tokens=2000,
        messages=updated_messages,
    )
    
    # Extract and print the response
    for content in final_response.content:
        if hasattr(content, "text"):
            print(content.text)
else:
    for content in response.content:
        if hasattr(content, "text"):
            print(content.text)

How It Works

The AnthropicProvider handles everything for you:
  1. Tool Registration: Automatically formats Valyu search as an Anthropic tool
  2. Tool Execution: Manages search API calls behind the scenes
  3. Conversation Flow: Builds proper message sequences with tool results
No need to manually define functions or handle complex tool calling logic!

Research Agent Example

Create a simple research agent that can access current information:
from anthropic import Anthropic
from valyu import AnthropicProvider

def create_research_agent():
    client = Anthropic()
    provider = AnthropicProvider()
    tools = provider.get_tools()
    
    def research(query: str) -> str:
        system_prompt = """You are a research assistant with access to real-time information. Always cite your sources."""
        
        messages = [
            {
                "role": "user",
                "content": query
            }
        ]
        
        # Get response with tools
        response = client.messages.create(
            model="claude-3-5-sonnet-20241022",
            max_tokens=1000,
            tools=tools,
            messages=messages,
            system=system_prompt
        )
        
        # Execute any tool calls
        tool_results = provider.handle_tool_calls(response=response)
        
        if tool_results:
            # Get final response with search data
            updated_messages = provider.build_conversation(messages, response, tool_results)
            final_response = client.messages.create(
                model="claude-3-5-sonnet-20241022",
                max_tokens=2000,
                messages=updated_messages,
                system=system_prompt
            )
            
            # Extract text from response
            result = ""
            for content in final_response.content:
                if hasattr(content, "text"):
                    result += content.text
            return result
        
        # Extract text from initial response
        result = ""
        for content in response.content:
            if hasattr(content, "text"):
                result += content.text
        return result
    
    return research

# Usage
agent = create_research_agent()
result = agent("Find the price of Bitcoin and Nvidia over the last 2 years, then find news about them both respectively, and write a detailed report on the price, news, and potential asset correlation.")
print(result)

Financial Analysis Example

def create_financial_agent():
    client = Anthropic()
    provider = AnthropicProvider()
    tools = provider.get_tools()
    
    def analyze_market(assets: list) -> str:
        query = f"Get the latest news and price data for {', '.join(assets)}, then provide a detailed market analysis report"
        
        messages = [
            {
                "role": "user",
                "content": query
            }
        ]
        
        system_prompt = "You are a financial analyst. Provide data-driven insights with specific numbers and sources."
        
        response = client.messages.create(
            model="claude-3-5-sonnet-20241022",
            max_tokens=1000,
            tools=tools,
            messages=messages,
            system=system_prompt
        )
        
        tool_results = provider.handle_tool_calls(response=response)
        
        if tool_results:
            updated_messages = provider.build_conversation(messages, response, tool_results)
            final_response = client.messages.create(
                model="claude-3-5-sonnet-20241022",
                max_tokens=2500,
                messages=updated_messages,
                system=system_prompt
            )
            
            result = ""
            for content in final_response.content:
                if hasattr(content, "text"):
                    result += content.text
            return result
        
        result = ""
        for content in response.content:
            if hasattr(content, "text"):
                result += content.text
        return result
    
    return analyze_market

# Usage
financial_agent = create_financial_agent()
analysis = financial_agent(["Bitcoin", "Ethereum", "Tesla"])
print(analysis)

Configuration Options

Model Selection

Choose the right Claude model for your use case:
# For speed and efficiency
response = anthropic_client.messages.create(
    model="claude-3-5-haiku-20241022",  # Fastest
    max_tokens=1000,
    tools=tools,
    messages=messages,
)

# For balanced performance (recommended)
response = anthropic_client.messages.create(
    model="claude-3-5-sonnet-20241022",  # Best balance
    max_tokens=1500,
    tools=tools,
    messages=messages,
)

# For complex reasoning tasks
response = anthropic_client.messages.create(
    model="claude-3-opus-20240229",  # Most capable
    max_tokens=2000,
    tools=tools,
    messages=messages,
)

Search Parameters

Claude can automatically use advanced search parameters based on your query context:
  • max_num_results: Limit results (1-20)
  • included_sources: Search specific domains or datasets
  • excluded_sources: Exclude certain sources
  • category: Guide search to specific topics
  • start_date/end_date: Time-bounded searches
  • relevance_threshold: Filter by relevance (0-1)

Best Practices

1. Use System Prompts

system_prompt = """You are a research assistant with access to real-time information.

Guidelines:
- Always cite sources from search results
- Provide specific data points and numbers
- If information is recent, mention the date
"""

response = anthropic_client.messages.create(
    model="claude-3-5-sonnet-20241022",
    max_tokens=2000,
    tools=tools,
    messages=messages,
    system=system_prompt
)

2. Handle Errors Gracefully

try:
    response = anthropic_client.messages.create(
        model="claude-3-5-sonnet-20241022",
        max_tokens=1500,
        tools=tools,
        messages=messages,
    )
    
    tool_results = provider.handle_tool_calls(response=response)
    
    if tool_results:
        updated_messages = provider.build_conversation(messages, response, tool_results)
        final_response = anthropic_client.messages.create(
            model="claude-3-5-sonnet-20241022",
            max_tokens=2000,
            messages=updated_messages,
        )
        
        result = ""
        for content in final_response.content:
            if hasattr(content, "text"):
                result += content.text
        return result
    
    result = ""
    for content in response.content:
        if hasattr(content, "text"):
            result += content.text
    return result
    
except Exception as e:
    return "I apologize, but I encountered an error while processing your request."

3. Multi-Turn Conversations

For chat applications, maintain conversation history:
class ResearchChat:
    def __init__(self, model: str = "claude-3-5-sonnet-20241022"):
        self.client = Anthropic()
        self.provider = AnthropicProvider()
        self.tools = self.provider.get_tools()
        self.model = model
        self.messages = []
        self.system_prompt = None
    
    def set_system_prompt(self, prompt: str):
        self.system_prompt = prompt
    
    def chat(self, user_message: str) -> str:
        self.messages.append({"role": "user", "content": user_message})
        
        kwargs = {
            "model": self.model,
            "max_tokens": 1500,
            "tools": self.tools,
            "messages": self.messages,
        }
        
        if self.system_prompt:
            kwargs["system"] = self.system_prompt
        
        response = self.client.messages.create(**kwargs)
        
        tool_results = self.provider.handle_tool_calls(response=response)
        
        if tool_results:
            self.messages = self.provider.build_conversation(
                self.messages, response, tool_results
            )
            
            final_response = self.client.messages.create(**kwargs)
            
            assistant_text = ""
            for content in final_response.content:
                if hasattr(content, "text"):
                    assistant_text += content.text
        else:
            assistant_text = ""
            for content in response.content:
                if hasattr(content, "text"):
                    assistant_text += content.text
        
        self.messages.append({"role": "assistant", "content": assistant_text})
        return assistant_text

# Usage
chat = ResearchChat()
chat.set_system_prompt("You are a helpful research assistant.")
response = chat.chat("What's the latest news about renewable energy?")

API Reference

AnthropicProvider

class AnthropicProvider:
    def __init__(self, valyu_api_key: Optional[str] = None):
        """Initialize provider. API key auto-detected from environment if not provided."""
    
    def get_tools(self) -> List[Dict]:
        """Get list of tools formatted for Anthropic Messages API."""
    
    def handle_tool_calls(self, response, modifiers=None) -> List[Dict]:
        """Execute tool calls from Anthropic response."""
    
    def build_conversation(self, input_messages, response, tool_results) -> List[Dict]:
        """Build updated message list with tool results."""

Additional Resources