Advertisement
incomestreamsurfer

people will ask for this autoblogger

Feb 5th, 2025
631
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 26.17 KB | None | 0 0
  1. Try our SEO tool: https://harborseo.ai/
  2. Work with us: https://calendly.com/incomestreamsurfers-strategy-session/seo
  3.  
  4. import asyncio
  5. import aiohttp
  6. import json
  7. import xml.etree.ElementTree as ET
  8. from urllib.parse import urlparse, urljoin
  9.  
  10. # =======================
  11. # Configuration Constants
  12. # =======================
  13. OPENROUTER_API_KEY = "" # Replace with your OpenRouter API key
  14. SERPAPI_API_KEY = "" # Replace with your SERPAPI API key
  15. JINA_API_KEY = "" # Replace with your JINA API key
  16.  
  17.  
  18. # Endpoints
  19. OPENROUTER_URL = "https://openrouter.ai/api/v1/chat/completions"
  20. SERPAPI_URL = "https://serpapi.com/search"
  21. JINA_BASE_URL = "https://r.jina.ai/"
  22.  
  23. # Default LLM model (can be changed if desired)
  24. DEFAULT_MODEL = "anthropic/claude-3.5-haiku"
  25.  
  26.  
  27. # ============================
  28. # Asynchronous Helper Functions
  29. # ============================
  30.  
  31. async def call_openrouter_async(session, messages, model=DEFAULT_MODEL):
  32. """
  33. Asynchronously call the OpenRouter chat completion API with the provided messages.
  34. Returns the content of the assistant's reply.
  35. """
  36. headers = {
  37. "Authorization": f"Bearer {OPENROUTER_API_KEY}",
  38. "X-Title": "OpenDeepResearcher, by Matt Shumer",
  39. "Content-Type": "application/json"
  40. }
  41. payload = {
  42. "model": model,
  43. "messages": messages
  44. }
  45. try:
  46. async with session.post(OPENROUTER_URL, headers=headers, json=payload) as resp:
  47. if resp.status == 200:
  48. result = await resp.json()
  49. try:
  50. return result['choices'][0]['message']['content']
  51. except (KeyError, IndexError) as e:
  52. print("Unexpected OpenRouter response structure:", result)
  53. return None
  54. else:
  55. text = await resp.text()
  56. print(f"OpenRouter API error: {resp.status} - {text}")
  57. return None
  58. except Exception as e:
  59. print("Error calling OpenRouter:", e)
  60. return None
  61.  
  62.  
  63. async def generate_search_queries_async(session, user_query):
  64. """
  65. Ask the LLM to produce up to four precise search queries (in Python list format)
  66. based on the user's query.
  67. """
  68. prompt = (
  69. "You are an expert research assistant. Given the user's query, generate up to four distinct, "
  70. "precise search queries that would help gather comprehensive information on the topic. "
  71. "Return only a Python list of strings, for example: ['query1', 'query2', 'query3']."
  72. )
  73. messages = [
  74. {"role": "system", "content": "You are a helpful and precise research assistant."},
  75. {"role": "user", "content": f"User Query: {user_query}\n\n{prompt}"}
  76. ]
  77. response = await call_openrouter_async(session, messages)
  78. if response:
  79. try:
  80. search_queries = eval(response)
  81. if isinstance(search_queries, list):
  82. return search_queries
  83. else:
  84. print("LLM did not return a list. Response:", response)
  85. return []
  86. except Exception as e:
  87. print("Error parsing search queries:", e, "\nResponse:", response)
  88. return []
  89. return []
  90.  
  91.  
  92. async def perform_search_async(session, query):
  93. """
  94. Asynchronously perform a Google search using SERPAPI for the given query.
  95. Returns a list of result URLs.
  96. """
  97. params = {
  98. "q": query,
  99. "api_key": SERPAPI_API_KEY,
  100. "engine": "google"
  101. }
  102. try:
  103. async with session.get(SERPAPI_URL, params=params) as resp:
  104. if resp.status == 200:
  105. results = await resp.json()
  106. if "organic_results" in results:
  107. links = [item.get("link") for item in results["organic_results"] if "link" in item]
  108. return links
  109. else:
  110. print("No organic results in SERPAPI response.")
  111. return []
  112. else:
  113. text = await resp.text()
  114. print(f"SERPAPI error: {resp.status} - {text}")
  115. return []
  116. except Exception as e:
  117. print("Error performing SERPAPI search:", e)
  118. return []
  119.  
  120.  
  121. async def fetch_webpage_text_async(session, url):
  122. """
  123. Asynchronously retrieve the text content of a webpage using Jina.
  124. The URL is appended to the Jina endpoint.
  125. """
  126. full_url = f"{JINA_BASE_URL}{url}"
  127. headers = {
  128. "Authorization": f"Bearer {JINA_API_KEY}"
  129. }
  130. try:
  131. async with session.get(full_url, headers=headers) as resp:
  132. if resp.status == 200:
  133. return await resp.text()
  134. else:
  135. text = await resp.text()
  136. print(f"Jina fetch error for {url}: {resp.status} - {text}")
  137. return ""
  138. except Exception as e:
  139. print("Error fetching webpage text with Jina:", e)
  140. return ""
  141.  
  142.  
  143. async def is_page_useful_async(session, user_query, page_text):
  144. """
  145. Ask the LLM if the provided webpage content is useful for answering the user's query.
  146. The LLM must reply with exactly "Yes" or "No".
  147. """
  148. prompt = (
  149. "You are a critical research evaluator. Given the user's query and the content of a webpage, "
  150. "determine if the webpage contains information relevant and useful for addressing the query. "
  151. "Respond with exactly one word: 'Yes' if the page is useful, or 'No' if it is not. Do not include any extra text."
  152. )
  153. messages = [
  154. {"role": "system", "content": "You are a strict and concise evaluator of research relevance."},
  155. {"role": "user", "content": f"User Query: {user_query}\n\nWebpage Content (first 20000 characters):\n{page_text[:20000]}\n\n{prompt}"}
  156. ]
  157. response = await call_openrouter_async(session, messages)
  158. if response:
  159. answer = response.strip()
  160. if answer in ["Yes", "No"]:
  161. return answer
  162. else:
  163. if "Yes" in answer:
  164. return "Yes"
  165. elif "No" in answer:
  166. return "No"
  167. return "No"
  168.  
  169.  
  170. async def extract_relevant_context_async(session, user_query, search_query, page_text):
  171. """
  172. Given the original query, the search query used, and the page content,
  173. have the LLM extract all information relevant for answering the query.
  174. """
  175. prompt = (
  176. "You are an expert information extractor. Given the user's query, the search query that led to this page, "
  177. "and the webpage content, extract all pieces of information that are relevant to answering the user's query. "
  178. "Return only the relevant context as plain text without commentary."
  179. )
  180. messages = [
  181. {"role": "system", "content": "You are an expert in extracting and summarizing relevant information."},
  182. {"role": "user", "content": f"User Query: {user_query}\nSearch Query: {search_query}\n\nWebpage Content (first 20000 characters):\n{page_text[:20000]}\n\n{prompt}"}
  183. ]
  184. response = await call_openrouter_async(session, messages)
  185. if response:
  186. return response.strip()
  187. return ""
  188.  
  189.  
  190. async def get_new_search_queries_async(session, user_query, previous_search_queries, all_contexts):
  191. """
  192. Based on the original query, the previously used search queries, and all the extracted contexts,
  193. ask the LLM whether additional search queries are needed.
  194. """
  195. context_combined = "\n".join(all_contexts)
  196. prompt = (
  197. "You are an analytical research assistant. Based on the original query, the search queries performed so far, "
  198. "and the extracted contexts from webpages, determine if further research is needed. "
  199. "If further research is needed, provide up to four new search queries as a Python list (for example, "
  200. "['new query1', 'new query2']). If you believe no further research is needed, respond with exactly <done>."
  201. "\nOutput only a Python list or the token <done> without any additional text."
  202. )
  203. messages = [
  204. {"role": "system", "content": "You are a systematic research planner."},
  205. {"role": "user", "content": f"User Query: {user_query}\nPrevious Search Queries: {previous_search_queries}\n\nExtracted Relevant Contexts:\n{context_combined}\n\n{prompt}"}
  206. ]
  207. response = await call_openrouter_async(session, messages)
  208. if response:
  209. cleaned = response.strip()
  210. if cleaned == "<done>":
  211. return "<done>"
  212. try:
  213. new_queries = eval(cleaned)
  214. if isinstance(new_queries, list):
  215. return new_queries
  216. else:
  217. print("LLM did not return a list for new search queries. Response:", response)
  218. return []
  219. except Exception as e:
  220. print("Error parsing new search queries:", e, "\nResponse:", response)
  221. return []
  222. return []
  223.  
  224.  
  225. async def generate_final_report_async(session, user_query, all_contexts):
  226. """
  227. Generate the final comprehensive report using all gathered contexts.
  228. """
  229. context_combined = "\n".join(all_contexts)
  230. prompt = (
  231. "You are an expert researcher and report writer. Based on the gathered contexts below and the original query, "
  232. "write a comprehensive, well-structured, and detailed report that addresses the query thoroughly. "
  233. "Include all relevant insights and conclusions without extraneous commentary."
  234. )
  235. messages = [
  236. {"role": "system", "content": "You are a skilled report writer."},
  237. {"role": "user", "content": f"User Query: {user_query}\n\nGathered Relevant Contexts:\n{context_combined}\n\n{prompt}"}
  238. ]
  239. report = await call_openrouter_async(session, messages)
  240. return report
  241.  
  242.  
  243. async def process_link(session, link, user_query, search_query):
  244. """
  245. Process a single link: fetch its content, judge its usefulness, and if useful, extract the relevant context.
  246. """
  247. print(f"Fetching content from: {link}")
  248. page_text = await fetch_webpage_text_async(session, link)
  249. if not page_text:
  250. return None
  251. usefulness = await is_page_useful_async(session, user_query, page_text)
  252. print(f"Page usefulness for {link}: {usefulness}")
  253. if usefulness == "Yes":
  254. context = await extract_relevant_context_async(session, user_query, search_query, page_text)
  255. if context:
  256. print(f"Extracted context from {link} (first 200 chars): {context[:200]}")
  257. return context
  258. return None
  259.  
  260.  
  261. async def fetch_sitemap_async(session, sitemap_url):
  262. """
  263. Fetch and parse a sitemap XML file.
  264. Returns a list of all URLs found in the sitemap.
  265. """
  266. try:
  267. async with session.get(sitemap_url) as resp:
  268. if resp.status == 200:
  269. sitemap_content = await resp.text()
  270. root = ET.fromstring(sitemap_content)
  271.  
  272. # Handle both standard sitemaps and sitemap indexes
  273. urls = []
  274.  
  275. # Check if this is a sitemap index
  276. if 'sitemapindex' in root.tag:
  277. # Fetch each individual sitemap
  278. sitemap_tasks = []
  279. for sitemap in root.findall('.//{*}loc'):
  280. sitemap_tasks.append(fetch_sitemap_async(session, sitemap.text))
  281. results = await asyncio.gather(*sitemap_tasks)
  282. for result in results:
  283. urls.extend(result)
  284. else:
  285. # Regular sitemap - extract URLs directly
  286. for url in root.findall('.//{*}loc'):
  287. urls.append(url.text)
  288.  
  289. return urls
  290. else:
  291. print(f"Error fetching sitemap: {resp.status}")
  292. return []
  293. except Exception as e:
  294. print(f"Error processing sitemap: {e}")
  295. return []
  296.  
  297.  
  298. async def fetch_sitemap_async(session, sitemap_url):
  299. """
  300. Fetch and parse a sitemap XML file.
  301. Returns a list of all valid webpage URLs found in the sitemap (excluding image URLs).
  302. """
  303. try:
  304. async with session.get(sitemap_url) as resp:
  305. if resp.status == 200:
  306. sitemap_content = await resp.text()
  307. root = ET.fromstring(sitemap_content)
  308.  
  309. # Handle both standard sitemaps and sitemap indexes
  310. urls = []
  311.  
  312. # Helper function to check if URL is valid for our purposes
  313. def is_valid_url(url):
  314. # Exclude common image file extensions and CDN URLs
  315. image_extensions = ('.jpg', '.jpeg', '.png', '.gif', '.webp', '.svg')
  316. cdn_indicators = ('cdn.', '/cdn/', '/assets/', '/images/', '/img/')
  317.  
  318. url_lower = url.lower()
  319. return not (
  320. url_lower.endswith(image_extensions) or
  321. any(indicator in url_lower for indicator in cdn_indicators) or
  322. 'cdn.shopify.com' in url_lower
  323. )
  324.  
  325. # Check if this is a sitemap index
  326. if 'sitemapindex' in root.tag:
  327. # Fetch each individual sitemap
  328. sitemap_tasks = []
  329. for sitemap in root.findall('.//{*}loc'):
  330. if not is_valid_url(sitemap.text): # Skip image sitemaps
  331. continue
  332. sitemap_tasks.append(fetch_sitemap_async(session, sitemap.text))
  333. if sitemap_tasks:
  334. results = await asyncio.gather(*sitemap_tasks)
  335. for result in results:
  336. urls.extend(result)
  337. else:
  338. # Regular sitemap - extract URLs directly
  339. for url in root.findall('.//{*}loc'):
  340. if is_valid_url(url.text):
  341. urls.append(url.text)
  342.  
  343. return urls
  344. else:
  345. print(f"Error fetching sitemap: {resp.status}")
  346. return []
  347. except Exception as e:
  348. print(f"Error processing sitemap: {e}")
  349. return []
  350.  
  351. async def select_relevant_urls_async(session, urls, keyword, research_context):
  352. """
  353. Ask the LLM to select 10-20 most relevant URLs based on the keyword
  354. and previous research context.
  355. """
  356. # First, let's filter URLs to remove any that are obviously irrelevant
  357. def url_relevance_score(url, keyword):
  358. """Basic relevance scoring for initial filtering"""
  359. url_lower = url.lower()
  360. keyword_parts = keyword.lower().split()
  361.  
  362. # Initialize score
  363. score = 0
  364.  
  365. # Check for keyword parts in URL
  366. for part in keyword_parts:
  367. if part in url_lower:
  368. score += 1
  369.  
  370. # Penalize obvious non-content pages
  371. if any(x in url_lower for x in ['/cart', '/checkout', '/account', '/login', '/admin']):
  372. score -= 10
  373.  
  374. return score
  375.  
  376. # Sort URLs by initial relevance score
  377. scored_urls = [(url, url_relevance_score(url, keyword)) for url in urls]
  378. scored_urls.sort(key=lambda x: x[1], reverse=True)
  379.  
  380. # Take top 50 URLs for LLM analysis
  381. candidate_urls = [url for url, score in scored_urls[:50]]
  382.  
  383. prompt = (
  384. "You are an expert content curator specializing in finding relevant web content. "
  385. "Your task is to select the most relevant URLs for researching the given keyword. "
  386. "Consider URL structure and likely content relevance.\n\n"
  387. "Instructions:\n"
  388. "0. Look for exact phrase matches in the URL slug first"
  389. "1. Analyze each URL carefully\n"
  390. "2. Select 10-20 URLs that are most likely to contain relevant content\n"
  391. "3. Return ONLY a Python list containing the selected URLs\n"
  392. "4. Focus on content-rich pages (avoid cart, checkout, account pages)\n\n"
  393. f"Keyword: <keyword>{keyword}</keyword>\n"
  394. f"Research Context Summary: {research_context[:500]}...\n\n"
  395. "Available URLs:\n" + "\n".join(candidate_urls)
  396. )
  397.  
  398. messages = [
  399. {"role": "system", "content": "You are a precise URL curator focusing on content relevance."},
  400. {"role": "user", "content": prompt}
  401. ]
  402.  
  403. response = await call_openrouter_async(session, messages)
  404. if response:
  405. try:
  406. # Clean the response to handle potential formatting issues
  407. cleaned_response = response.strip()
  408. if cleaned_response.startswith("```python"):
  409. cleaned_response = cleaned_response.split("```python")[1]
  410. if cleaned_response.endswith("```"):
  411. cleaned_response = cleaned_response.rsplit("```", 1)[0]
  412.  
  413. selected_urls = eval(cleaned_response)
  414.  
  415. if isinstance(selected_urls, list):
  416. # Ensure all URLs are from the original list
  417. valid_urls = [url for url in selected_urls if url in urls]
  418.  
  419. if 10 <= len(valid_urls) <= 20:
  420. return valid_urls
  421. elif len(valid_urls) > 20:
  422. return valid_urls[:15]
  423. else:
  424. # If not enough valid URLs, fall back to top scoring URLs
  425. return [url for url, _ in scored_urls[:15]]
  426. except Exception as e:
  427. print(f"Error parsing URL selection: {e}")
  428. # Fallback to scored URLs
  429. return [url for url, _ in scored_urls[:15]]
  430.  
  431. return [url for url, _ in scored_urls[:15]]
  432.  
  433. async def analyze_website_content_async(session, urls, keyword, research_context):
  434. """
  435. Analyze content from selected URLs, extracting images, information, and context.
  436. Returns a structured analysis of each page.
  437. """
  438. analyses = []
  439.  
  440. for url in urls:
  441. print(f"Analyzing website content from: {url}")
  442. page_text = await fetch_webpage_text_async(session, url)
  443.  
  444. if not page_text:
  445. continue
  446.  
  447. # Extract relevant information using the LLM
  448. prompt = (
  449. "You are an expert content analyzer. Given the webpage content, extract relevant information "
  450. "related to our keyword and research topic. Format your response as a valid JSON object with "
  451. "the following structure:\n"
  452. "{\n"
  453. ' "key_info": "Main points and findings related to the topic",\n
  454. ' "relevant_links": "find relevant links
  455. ' "images": "relevant image links to be used in content mentioned in the content",\n'
  456. ' "context": "Additional context and supporting information"\n'
  457. "}\n\n"
  458. f"Keyword: {keyword}\n"
  459. "Note: Ensure your response is valid JSON with proper escaping of special characters."
  460. )
  461.  
  462. messages = [
  463. {"role": "system", "content": "You are a thorough content analyzer producing valid JSON output."},
  464. {"role": "user", "content": f"{prompt}\n\nPage Content (first 20000 chars):\n{page_text[:20000]}"}
  465. ]
  466.  
  467. analysis = await call_openrouter_async(session, messages)
  468. if analysis:
  469. try:
  470. # Clean the response to ensure valid JSON
  471. cleaned_analysis = analysis.strip()
  472. if cleaned_analysis.startswith("```json"):
  473. cleaned_analysis = cleaned_analysis.split("```json")[1]
  474. if cleaned_analysis.endswith("```"):
  475. cleaned_analysis = cleaned_analysis.rsplit("```", 1)[0]
  476.  
  477. parsed_analysis = json.loads(cleaned_analysis)
  478. parsed_analysis['url'] = url
  479. analyses.append(parsed_analysis)
  480. except json.JSONDecodeError as e:
  481. print(f"Error parsing analysis for {url}: {e}")
  482. continue
  483.  
  484. return analyses
  485.  
  486.  
  487. async def create_detailed_outline_async(session, research_context, website_analyses, keyword):
  488. """
  489. Create a detailed outline combining the initial research and website-specific analyses.
  490. """
  491. # Prepare a summary of website analyses
  492. website_summary = "\n".join([
  493. f"URL: {analysis['url']}\n"
  494. f"Key Info: {analysis['key_info']}\n"
  495. f"Context: {analysis['context']}\n"
  496. for analysis in website_analyses
  497. ])
  498.  
  499. prompt = (
  500. "You are an expert content organizer. Create a detailed outline that combines:\n"
  501. "1. The initial research findings\n"
  502. "2. The specific information found on the analyzed website\n"
  503. "The outline should be comprehensive, well-structured, and focused on our keyword.\n"
  504. "Use proper outline format with main sections (I, II, III) and subsections (A, B, C).\n"
  505. "You need to include real internal links and images from <websitesummary>"
  506. f"Keyword: {keyword}"
  507. )
  508.  
  509. messages = [
  510. {"role": "system", "content": "You are a skilled content organizer."},
  511. {"role": "user", "content":
  512. f"{prompt}\n\n"
  513. f"Initial Research:\n{research_context}\n\n"
  514. f"Website Analyses:<websitesummary>\n{website_summary}</websitesummary>"}
  515. ]
  516.  
  517.  
  518.  
  519. outline = await call_openrouter_async(session, messages)
  520. return outline
  521.  
  522.  
  523. # =========================
  524. # Main Asynchronous Routine
  525. # =========================
  526.  
  527. async def async_main():
  528. user_query = input("Enter your research query/topic: ").strip()
  529. iter_limit_input = input("Enter maximum number of iterations (default 10): ").strip()
  530. iteration_limit = int(iter_limit_input) if iter_limit_input.isdigit() else 10
  531.  
  532. aggregated_contexts = [] # All useful contexts from every iteration
  533. all_search_queries = [] # Every search query used across iterations
  534. iteration = 0
  535.  
  536. async with aiohttp.ClientSession() as session:
  537. # ----- INITIAL SEARCH QUERIES -----
  538. new_search_queries = await generate_search_queries_async(session, user_query)
  539. if not new_search_queries:
  540. print("No search queries were generated by the LLM. Exiting.")
  541. return
  542. all_search_queries.extend(new_search_queries)
  543.  
  544. # ----- ITERATIVE RESEARCH LOOP -----
  545. while iteration < iteration_limit:
  546. print(f"\n=== Iteration {iteration + 1} ===")
  547. iteration_contexts = []
  548.  
  549. # For each search query, perform SERPAPI searches concurrently
  550. search_tasks = [perform_search_async(session, query) for query in new_search_queries]
  551. search_results = await asyncio.gather(*search_tasks)
  552.  
  553. # Aggregate all unique links from all search queries of this iteration
  554. unique_links = {}
  555. for idx, links in enumerate(search_results):
  556. query = new_search_queries[idx]
  557. for link in links:
  558. if link not in unique_links:
  559. unique_links[link] = query
  560.  
  561. print(f"Aggregated {len(unique_links)} unique links from this iteration.")
  562.  
  563. # Process each link concurrently
  564. link_tasks = [
  565. process_link(session, link, user_query, unique_links[link])
  566. for link in unique_links
  567. ]
  568. link_results = await asyncio.gather(*link_tasks)
  569.  
  570. # Collect non-None contexts
  571. for res in link_results:
  572. if res:
  573. iteration_contexts.append(res)
  574.  
  575. if iteration_contexts:
  576. aggregated_contexts.extend(iteration_contexts)
  577. else:
  578. print("No useful contexts were found in this iteration.")
  579.  
  580. # Check if more searches are needed
  581. new_search_queries = await get_new_search_queries_async(session, user_query, all_search_queries, aggregated_contexts)
  582. if new_search_queries == "<done>":
  583. print("LLM indicated that no further research is needed.")
  584. break
  585. elif new_search_queries:
  586. print("LLM provided new search queries:", new_search_queries)
  587. all_search_queries.extend(new_search_queries)
  588. else:
  589. print("LLM did not provide any new search queries. Ending the loop.")
  590. break
  591.  
  592. iteration += 1
  593.  
  594. # Generate initial research report
  595. print("\nGenerating initial research report...")
  596. final_report = await generate_final_report_async(session, user_query, aggregated_contexts)
  597. print("\n==== INITIAL RESEARCH REPORT ====\n")
  598. print(final_report)
  599.  
  600. # Begin website-specific research phase
  601. print("\nMoving to website-specific research phase...")
  602. sitemap_url = input("Enter the sitemap URL: ").strip()
  603. keyword = input("Enter the specific keyword to focus on: ").strip()
  604.  
  605. # Fetch and process sitemap
  606. print("Fetching sitemap...")
  607. all_urls = await fetch_sitemap_async(session, sitemap_url)
  608. if not all_urls:
  609. print("No URLs found in sitemap. Exiting.")
  610. return
  611.  
  612. # Select relevant URLs
  613. print("Selecting relevant URLs...")
  614. selected_urls = await select_relevant_urls_async(session, all_urls, keyword, final_report)
  615. print(f"Selected {len(selected_urls)} relevant URLs")
  616.  
  617. # Analyze website content
  618. print("Analyzing website content...")
  619. website_analyses = await analyze_website_content_async(session, selected_urls, keyword, final_report)
  620. print(f"Completed analysis of {len(website_analyses)} pages")
  621.  
  622. # Create detailed outline
  623. print("\nGenerating detailed outline...")
  624. detailed_outline = await create_detailed_outline_async(session, final_report, website_analyses, keyword)
  625.  
  626. print("\n==== DETAILED OUTLINE ====\n")
  627. print(detailed_outline)
  628.  
  629.  
  630. def main():
  631. asyncio.run(async_main())
  632.  
  633.  
  634. if __name__ == "__main__":
  635. main()``
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement