Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import os
- import json
- import logging
- import datetime
- import requests
- import urllib.parse
- from duckduckgo_search import DDGS
- import ollama
- import re
- import colorama
- from colorama import Fore, Back, Style
- # Initialize colorama for color output
- colorama.init(autoreset=True)
- ############################################################
- # Claude-Style Thinking Tool Demo by HartWired (YouTube)
- # https://youtube.com/@hartwired
- #
- # This script demonstrates a thinking tool similar to Claude's
- # that allows the model to reason step by step through complex tasks.
- # Model: jacob-ebey/phi4-tools:latest (via Ollama)
- # Tools: web_search (DuckDuckGo) and think (reasoning)
- ############################################################
- # Set up logging
- logging.basicConfig(
- level=logging.INFO,
- format=f'{Fore.CYAN}%(asctime)s - {Fore.GREEN}%(levelname)s - {Fore.WHITE}%(message)s',
- datefmt='%Y-%m-%d %H:%M:%S'
- )
- logger = logging.getLogger(__name__)
- print(f"\n{Back.BLUE}{Fore.WHITE} CLAUDE-STYLE THINKING TOOL DEMO {Style.RESET_ALL} {Fore.YELLOW}by HartWired (YouTube)")
- print(f"\n{Fore.CYAN}=== {Fore.YELLOW}Initializing Research Assistant with Phi4-tools {Fore.CYAN}===")
- # Define user task - can be modified by users for different research needs
- DEFAULT_RESEARCH_QUERY = """I'm trying to learn about the latest AI news. Can you research this topic. After you researched it can you check if you can find more information. At the end provide a balanced analysis!"""
- # Tool implementation functions to handle actual tool calls
- def process_tool_call(tool_name, tool_input):
- """Process a tool call based on the tool name and input."""
- logger.info(f"Processing tool call: {Fore.YELLOW}{tool_name}")
- logger.info(f"Tool input: {Fore.WHITE}{json.dumps(tool_input, indent=2)}")
- # DuckDuckGo web search
- if tool_name == "web_search":
- query = tool_input.get("query")
- num_results = tool_input.get("num_results", 3)
- logger.info(f"Searching DuckDuckGo for: {Fore.YELLOW}{query}")
- try:
- with DDGS() as ddgs:
- results = list(ddgs.text(query, max_results=num_results))
- if not results:
- logger.warning(f"No search results found for query: {query}")
- return {"error": "No search results found", "query": query}
- logger.info(f"Found {Fore.GREEN}{len(results)} search results")
- # Format the results in a more readable way
- formatted_results = []
- for i, result in enumerate(results, 1):
- formatted_results.append(f"Result {i}:\n" +
- f"Title: {result.get('title', 'No title')}\n" +
- f"Body: {result.get('body', 'No content')}\n" +
- f"URL: {result.get('href', 'No URL')}\n")
- return "\n".join(formatted_results)
- except Exception as e:
- logger.error(f"Error searching DuckDuckGo: {str(e)}")
- return f"Error searching for '{query}': {str(e)}"
- # The "think" tool - Claude-style thinking tool implementation
- elif tool_name == "think":
- thought = tool_input.get("thought")
- # Format the thought with a Claude-style thinking layout
- print("\n" + "="*100)
- print(f"{Back.MAGENTA}{Fore.WHITE}🧠 CLAUDE-STYLE THINKING PROCESS 🧠{Style.RESET_ALL}")
- print("="*100)
- # Split thought into paragraphs for better readability
- paragraphs = thought.split('\n\n')
- for para in paragraphs:
- # Highlight important sections with colors
- para = para.replace('**', f'{Fore.YELLOW}') # Highlight markdown bold text
- if para.strip().startswith(('1.', '2.', '3.', '4.', '5.')):
- # Make numbered points stand out
- print(f"\n{Fore.GREEN}{para}")
- else:
- print(f"\n{Fore.WHITE}{para}")
- print(f"\n{Back.MAGENTA}{Fore.WHITE}" + "="*100 + f"{Style.RESET_ALL}\n")
- # Calculate thinking statistics
- word_count = len(thought.split())
- logger.info(f"{Fore.MAGENTA}Model used the Claude-style think tool ({word_count} words)")
- return "Thought recorded. Continue with your analysis."
- else:
- logger.error(f"Unknown tool: {tool_name}")
- return f"Error: Unknown tool '{tool_name}'"
- # Claude-inspired system prompt with explicit thinking tool instructions
- system_prompt = """
- You are a research assistant that helps users find and synthesize information, similar to Claude AI. You MUST use the available tools:
- 1. web_search tool - Use this to find current information on the web through DuckDuckGo
- 2. think tool - Use this powerful tool for step-by-step reasoning, similar to Claude's thinking tool
- When responding to a request requiring research, ALWAYS use these tools in this format:
- For web searches:
- <tool>web_search
- {"query": "your search query here", "num_results": 3}
- </tool>
- For thinking and analysis:
- <tool>think
- {"thought": "Your detailed analysis and reasoning here..."}
- </tool>
- IMPORTANT:
- - ALWAYS use at least one web_search tool call before providing your final answer
- - ALWAYS use the think tool at least once to analyze findings before your final answer
- - Make your thinking thorough and detailed, similar to how Claude uses its thinking tool
- - Put each tool call in its own code block with the <tool> tags
- - Wait for results from each tool before proceeding
- - After using tools, provide a comprehensive, balanced analysis citing the information you found
- EXAMPLE WORKFLOW:
- 1. User asks about a research topic
- 2. You use web_search tool:
- <tool>web_search
- {"query": "latest research on the topic", "num_results": 3}
- </tool>
- 3. You get search results
- 4. You use the Claude-style think tool to analyze:
- <tool>think
- {"thought": "Let me analyze the search results carefully...\n\n1. The first source indicates X...\n2. The second source suggests Y...\n3. There seems to be a conflict between the first and third sources regarding Z...\n\nTo resolve this, I need more specific information about..."}
- </tool>
- 5. If needed, search for more information
- 6. Provide your final answer synthesizing all information
- Always be thorough, critical, and analytical in your research, similar to Claude's methodical approach.
- """
- def extract_tool_calls(response):
- """
- Extract tool calls from the model response using regex pattern matching.
- Format expected:
- <tool>tool_name
- {"param": "value"}
- </tool>
- """
- # Pattern to match the tool call format
- pattern = r'<tool>(.*?)\n(.*?)\n</tool>'
- # Find all matches in the response
- matches = re.findall(pattern, response, re.DOTALL)
- tool_calls = []
- for match in matches:
- tool_name = match[0].strip()
- try:
- # Fix for JSON parsing - handle escape characters properly
- input_text = match[1].strip()
- # Replace escaped quotes and newlines if needed
- input_text = input_text.replace('\\"', '"').replace('\\n', '\n')
- tool_input = json.loads(input_text)
- tool_calls.append((tool_name, tool_input))
- except json.JSONDecodeError as e:
- logger.error(f"Failed to parse tool input JSON: {e}")
- # Try a more lenient approach for think tool
- if tool_name == "think" and '"thought":' in input_text:
- try:
- # Extract the thought content directly with regex
- thought_match = re.search(r'"thought":\s*"(.*?)"\s*}', input_text, re.DOTALL)
- if thought_match:
- thought_content = thought_match.group(1)
- tool_calls.append((tool_name, {"thought": thought_content}))
- else:
- logger.error(f"Could not extract thought content with regex")
- except Exception as ex:
- logger.error(f"Error extracting thought with regex: {ex}")
- return tool_calls
- def process_message(message, conversation_history=None):
- """
- Process a message using phi4-tools via Ollama
- Args:
- message: User message or system message
- conversation_history: Full conversation history including tool responses
- Returns:
- Response from the model
- """
- if conversation_history is None:
- conversation_history = []
- # Prepare messages for Ollama
- messages = []
- # Add system prompt
- messages.append({"role": "system", "content": system_prompt})
- # Add conversation history
- for entry in conversation_history:
- messages.append({"role": entry["role"], "content": entry["content"]})
- # Add the current message if it's not empty
- if message:
- messages.append({"role": "user", "content": message})
- try:
- # Call Ollama with phi4-tools model
- logger.info(f"{Fore.CYAN}Querying model: {Fore.YELLOW}jacob-ebey/phi4-tools:latest")
- response = ollama.chat(
- model="jacob-ebey/phi4-tools:latest",
- messages=messages,
- stream=False
- )
- return response["message"]["content"]
- except Exception as e:
- logger.error(f"Error calling Ollama: {str(e)}")
- return f"Error: {str(e)}"
- def handle_conversation(user_message):
- """
- Handle a multi-turn conversation with phi4-tools using Ollama.
- Implements a Claude-style thinking process for complex reasoning.
- Args:
- user_message: The user's message as a string
- Returns:
- Full conversation log including all tool usage
- """
- divider = f"{Fore.CYAN}=" * 80
- print(f"\n{divider}")
- print(f"{Fore.YELLOW}🧑 USER QUERY: {Fore.WHITE}{user_message}")
- print(f"{divider}")
- # Initialize conversation history
- conversation_history = []
- # Add the user message to conversation history
- conversation_history.append({"role": "user", "content": user_message})
- # Make the initial request to phi4-tools
- logger.info(f"Sending initial request to Phi4 with Claude-style thinking instructions")
- response = process_message("", conversation_history)
- # Process any tool uses
- iteration = 1
- max_iterations = 10 # Prevent infinite loops
- # Track tool usage for Claude-style analytics
- tool_usage = {"web_search": 0, "think": 0}
- final_response = response
- while iteration < max_iterations:
- # Extract tool calls from the response
- tool_calls = extract_tool_calls(response)
- if not tool_calls:
- print(f"\n{Fore.RED}🛑 No more tool calls found in iteration {iteration}, finalizing response.")
- final_response = response
- break
- # Process each tool call
- for tool_name, tool_input in tool_calls:
- # Format for Claude-style tool usage display
- if tool_name == "think":
- print(f"\n{Back.MAGENTA}{Fore.WHITE} ITERATION {iteration} {Style.RESET_ALL} {Fore.YELLOW}Using Claude-style: {Fore.GREEN}{tool_name}")
- else:
- print(f"\n{Back.BLUE}{Fore.WHITE} ITERATION {iteration} {Style.RESET_ALL} {Fore.YELLOW}Processing tool: {Fore.GREEN}{tool_name}")
- # Track tool usage
- if tool_name in tool_usage:
- tool_usage[tool_name] += 1
- # Process the tool call
- tool_result = process_tool_call(tool_name, tool_input)
- # Add the assistant's tool call to conversation history
- assistant_message = f"I'll use the {tool_name} tool to help with this.\n\n<tool>{tool_name}\n{json.dumps(tool_input)}\n</tool>"
- conversation_history.append({"role": "assistant", "content": assistant_message})
- # Add the tool result to conversation history
- conversation_history.append({"role": "user", "content": f"Tool result:\n{tool_result}"})
- # Get the next response
- response = process_message("", conversation_history)
- iteration += 1
- # Add the final response to conversation history
- final_response_clean = re.sub(r'<tool>.*?</tool>', '', final_response, flags=re.DOTALL).strip()
- if final_response_clean:
- conversation_history.append({"role": "assistant", "content": final_response_clean})
- print(f"\n{divider}")
- print(f"{Back.GREEN}{Fore.WHITE} 🤖 FINAL RESEARCH SYNTHESIS: {Style.RESET_ALL}")
- print(f"{Fore.CYAN}{'-' * 80}")
- # Format the output for better readability
- paragraphs = final_response_clean.split('\n\n')
- for para in paragraphs:
- # Highlight headings and important points
- if para.strip().startswith(('#', '##', '###')):
- print(f"\n{Fore.YELLOW}{para}")
- elif para.strip().startswith(('1.', '2.', '3.', '4.', '5.')):
- print(f"\n{Fore.GREEN}{para}")
- else:
- print(f"\n{Fore.WHITE}{para}")
- print(f"{divider}")
- # Print tool usage summary with Claude-style analytics
- print(f"\n{Back.MAGENTA}{Fore.WHITE} 📊 CLAUDE-STYLE THINKING ANALYTICS {Style.RESET_ALL}")
- for tool, count in tool_usage.items():
- tool_color = Fore.MAGENTA if tool == "think" else Fore.YELLOW
- tool_icon = "🧠" if tool == "think" else "🔍"
- print(f" - {tool_icon} {tool_color}{tool}: {Fore.WHITE}{count} times")
- return conversation_history, tool_usage
- def main():
- """
- Claude-Style Thinking Tool Demo by HartWired (YouTube)
- Demonstrates how a model can use explicit reasoning and web search
- to tackle complex research questions, similar to Claude AI.
- """
- header = f"{Fore.YELLOW}*" * 100
- print(f"\n{header}")
- title = f"{Back.MAGENTA}{Fore.WHITE} CLAUDE-STYLE THINKING TOOL DEMO BY HARTWIRED {Style.RESET_ALL}"
- print(f"{title}")
- print(f"{header}")
- # Display configuration information for users
- config_info = f"""
- {Fore.CYAN}CONFIGURATION:
- {Fore.WHITE}Model: {Fore.GREEN}jacob-ebey/phi4-tools:latest {Fore.WHITE}(via Ollama)
- {Fore.WHITE}Tools: {Fore.YELLOW}web_search {Fore.WHITE}(DuckDuckGo) and {Fore.MAGENTA}think {Fore.WHITE}(Claude-style reasoning)
- {Fore.WHITE}System Prompt: {Fore.GREEN}Claude-inspired prompt with thinking instructions
- {Fore.WHITE}Default Task: {Fore.YELLOW}Research latest AI news
- {Fore.CYAN}DEMO PURPOSE:
- {Fore.GREEN}This demonstration shows how models can think like Claude:
- {Fore.WHITE}1. {Fore.YELLOW}Search the web using DuckDuckGo for factual information
- {Fore.WHITE}2. {Fore.MAGENTA}Use a Claude-style thinking tool to analyze findings and reason step by step
- {Fore.WHITE}3. {Fore.GREEN}Synthesize information into a balanced, comprehensive analysis
- {Fore.YELLOW}Watch as the model works through a research question with Claude-style thinking.
- """
- print(config_info)
- # Ask user if they want to use the default query or enter their own
- print(f"{Fore.CYAN}Default research query: {Fore.WHITE}{DEFAULT_RESEARCH_QUERY}")
- use_default = input(f"\n{Fore.GREEN}Use default query? (y/n): {Fore.WHITE}").lower() == 'y'
- if use_default:
- user_message = DEFAULT_RESEARCH_QUERY
- else:
- user_message = input(f"\n{Fore.GREEN}Enter your research query: {Fore.WHITE}")
- # Process the conversation
- logger.info(f"{Fore.GREEN}Starting Claude-style research process")
- conversation_history, tool_usage = handle_conversation(user_message)
- # Show conversation summary
- print(f"\n{header}")
- print(f"{Back.GREEN}{Fore.WHITE} CLAUDE-STYLE THINKING SUMMARY {Style.RESET_ALL}")
- print(f"{header}")
- print(f"\n{Fore.CYAN}📊 THINKING ANALYTICS:")
- for tool, count in tool_usage.items():
- icon = "🧠" if tool == "think" else "🔍"
- tool_color = Fore.MAGENTA if tool == "think" else Fore.YELLOW
- print(f" - {icon} {tool_color}{tool}: {Fore.WHITE}{count} times")
- # Calculate total think words if any thinking was done
- if tool_usage["think"] > 0:
- think_content = ""
- for entry in conversation_history:
- if entry.get("role") == "assistant" and "think" in entry["content"]:
- think_match = re.search(r'"thought":\s*"(.*?)"\s*}', entry["content"], re.DOTALL)
- if think_match:
- think_content += think_match.group(1) + " "
- if think_content:
- word_count = len(think_content.split())
- thinking_complexity = "Basic" if word_count < 100 else "Intermediate" if word_count < 300 else "Detailed" if word_count < 500 else "Comprehensive"
- print(f" - 📏 {Fore.MAGENTA}Thinking depth: {Fore.WHITE}{word_count} words ({thinking_complexity})")
- print(f"\n{Back.MAGENTA}{Fore.WHITE} CLAUDE-STYLE THINKING BENEFITS {Style.RESET_ALL}")
- print(f"{Fore.WHITE}• Breaks down complex problems into manageable steps")
- print(f"{Fore.WHITE}• Analyzes information from multiple sources before drawing conclusions")
- print(f"{Fore.WHITE}• Makes reasoning explicit and transparent")
- print(f"{Fore.WHITE}• Reduces errors by thinking through implications")
- print(f"\n{header}")
- print(f"{Back.MAGENTA}{Fore.WHITE} CLAUDE-STYLE THINKING DEMO COMPLETE | HARTWIRED {Style.RESET_ALL}")
- print(f"{header}")
- logger.info(f"{Fore.GREEN}Claude-style research process completed successfully")
- if __name__ == "__main__":
- main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement