Advertisement
lucas0

Untitled

Jun 12th, 2023
208
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 9.55 KB | None | 0 0
  1. ---------------------------------------------------------------------------
  2. ValueError                                Traceback (most recent call last)
  3. Cell In[10], line 22
  4.      19 qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch, return_source_documents=False)
  5.      21 q = input("input your query:")
  6. ---> 22 result = qa.run(query=q)
  7.      24 print(result["result"])
  8.      25 #print(result["source_documents"])
  9.  
  10. File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:261, in Chain.run(self, callbacks, *args, **kwargs)
  11.     258     return self(args[0], callbacks=callbacks)[self.output_keys[0]]
  12.     260 if kwargs and not args:
  13. --> 261     return self(kwargs, callbacks=callbacks)[self.output_keys[0]]
  14.     263 if not kwargs and not args:
  15.     264     raise ValueError(
  16.     265         "`run` supported with either positional arguments or keyword arguments,"
  17.     266         " but none were provided."
  18.     267     )
  19.  
  20. File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:147, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
  21.     145 except (KeyboardInterrupt, Exception) as e:
  22.     146     run_manager.on_chain_error(e)
  23. --> 147     raise e
  24.     148 run_manager.on_chain_end(outputs)
  25.     149 final_outputs: Dict[str, Any] = self.prep_outputs(
  26.     150     inputs, outputs, return_only_outputs
  27.     151 )
  28.  
  29. File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:141, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
  30.     135 run_manager = callback_manager.on_chain_start(
  31.     136     dumpd(self),
  32.     137     inputs,
  33.     138 )
  34.     139 try:
  35.     140     outputs = (
  36. --> 141         self._call(inputs, run_manager=run_manager)
  37.     142         if new_arg_supported
  38.     143         else self._call(inputs)
  39.     144     )
  40.     145 except (KeyboardInterrupt, Exception) as e:
  41.     146     run_manager.on_chain_error(e)
  42.  
  43. File /opt/conda/lib/python3.10/site-packages/langchain/chains/retrieval_qa/base.py:120, in BaseRetrievalQA._call(self, inputs, run_manager)
  44.     117 question = inputs[self.input_key]
  45.     119 docs = self._get_docs(question)
  46. --> 120 answer = self.combine_documents_chain.run(
  47.     121     input_documents=docs, question=question, callbacks=_run_manager.get_child()
  48.     122 )
  49.     124 if self.return_source_documents:
  50.     125     return {self.output_key: answer, "source_documents": docs}
  51.  
  52. File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:261, in Chain.run(self, callbacks, *args, **kwargs)
  53.     258     return self(args[0], callbacks=callbacks)[self.output_keys[0]]
  54.     260 if kwargs and not args:
  55. --> 261     return self(kwargs, callbacks=callbacks)[self.output_keys[0]]
  56.     263 if not kwargs and not args:
  57.     264     raise ValueError(
  58.     265         "`run` supported with either positional arguments or keyword arguments,"
  59.     266         " but none were provided."
  60.     267     )
  61.  
  62. File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:147, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
  63.     145 except (KeyboardInterrupt, Exception) as e:
  64.     146     run_manager.on_chain_error(e)
  65. --> 147     raise e
  66.     148 run_manager.on_chain_end(outputs)
  67.     149 final_outputs: Dict[str, Any] = self.prep_outputs(
  68.     150     inputs, outputs, return_only_outputs
  69.     151 )
  70.  
  71. File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:141, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
  72.     135 run_manager = callback_manager.on_chain_start(
  73.     136     dumpd(self),
  74.     137     inputs,
  75.     138 )
  76.     139 try:
  77.     140     outputs = (
  78. --> 141         self._call(inputs, run_manager=run_manager)
  79.     142         if new_arg_supported
  80.     143         else self._call(inputs)
  81.     144     )
  82.     145 except (KeyboardInterrupt, Exception) as e:
  83.     146     run_manager.on_chain_error(e)
  84.  
  85. File /opt/conda/lib/python3.10/site-packages/langchain/chains/combine_documents/base.py:84, in BaseCombineDocumentsChain._call(self, inputs, run_manager)
  86.      82 # Other keys are assumed to be needed for LLM prediction
  87.      83 other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
  88. ---> 84 output, extra_return_dict = self.combine_docs(
  89.      85     docs, callbacks=_run_manager.get_child(), **other_keys
  90.      86 )
  91.      87 extra_return_dict[self.output_key] = output
  92.      88 return extra_return_dict
  93.  
  94. File /opt/conda/lib/python3.10/site-packages/langchain/chains/combine_documents/stuff.py:87, in StuffDocumentsChain.combine_docs(self, docs, callbacks, **kwargs)
  95.      85 inputs = self._get_inputs(docs, **kwargs)
  96.      86 # Call predict on the LLM.
  97. ---> 87 return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
  98.  
  99. File /opt/conda/lib/python3.10/site-packages/langchain/chains/llm.py:218, in LLMChain.predict(self, callbacks, **kwargs)
  100.     203 def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
  101.     204     """Format prompt with kwargs and pass to LLM.
  102.    205
  103.    206     Args:
  104.   (...)
  105.    216             completion = llm.predict(adjective="funny")
  106.    217     """
  107. --> 218     return self(kwargs, callbacks=callbacks)[self.output_key]
  108.  
  109. File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:147, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
  110.     145 except (KeyboardInterrupt, Exception) as e:
  111.     146     run_manager.on_chain_error(e)
  112. --> 147     raise e
  113.     148 run_manager.on_chain_end(outputs)
  114.     149 final_outputs: Dict[str, Any] = self.prep_outputs(
  115.     150     inputs, outputs, return_only_outputs
  116.     151 )
  117.  
  118. File /opt/conda/lib/python3.10/site-packages/langchain/chains/base.py:141, in Chain.__call__(self, inputs, return_only_outputs, callbacks, include_run_info)
  119.     135 run_manager = callback_manager.on_chain_start(
  120.     136     dumpd(self),
  121.     137     inputs,
  122.     138 )
  123.     139 try:
  124.     140     outputs = (
  125. --> 141         self._call(inputs, run_manager=run_manager)
  126.     142         if new_arg_supported
  127.     143         else self._call(inputs)
  128.     144     )
  129.     145 except (KeyboardInterrupt, Exception) as e:
  130.     146     run_manager.on_chain_error(e)
  131.  
  132. File /opt/conda/lib/python3.10/site-packages/langchain/chains/llm.py:74, in LLMChain._call(self, inputs, run_manager)
  133.      69 def _call(
  134.      70     self,
  135.      71     inputs: Dict[str, Any],
  136.      72     run_manager: Optional[CallbackManagerForChainRun] = None,
  137.      73 ) -> Dict[str, str]:
  138. ---> 74     response = self.generate([inputs], run_manager=run_manager)
  139.      75     return self.create_outputs(response)[0]
  140.  
  141. File /opt/conda/lib/python3.10/site-packages/langchain/chains/llm.py:84, in LLMChain.generate(self, input_list, run_manager)
  142.      82 """Generate LLM result from inputs."""
  143.      83 prompts, stop = self.prep_prompts(input_list, run_manager=run_manager)
  144. ---> 84 return self.llm.generate_prompt(
  145.      85     prompts, stop, callbacks=run_manager.get_child() if run_manager else None
  146.      86 )
  147.  
  148. File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:139, in BaseLLM.generate_prompt(self, prompts, stop, callbacks, **kwargs)
  149.     131 def generate_prompt(
  150.     132     self,
  151.     133     prompts: List[PromptValue],
  152.    (...)
  153.     136     **kwargs: Any,
  154.     137 ) -> LLMResult:
  155.     138     prompt_strings = [p.to_string() for p in prompts]
  156. --> 139     return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)
  157.  
  158. File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:203, in BaseLLM.generate(self, prompts, stop, callbacks, **kwargs)
  159.     201 except (KeyboardInterrupt, Exception) as e:
  160.     202     run_manager.on_llm_error(e)
  161. --> 203     raise e
  162.     204 run_manager.on_llm_end(output)
  163.     205 if run_manager:
  164.  
  165. File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:195, in BaseLLM.generate(self, prompts, stop, callbacks, **kwargs)
  166.     190 run_manager = callback_manager.on_llm_start(
  167.     191     dumpd(self), prompts, invocation_params=params, options=options
  168.     192 )
  169.     193 try:
  170.     194     output = (
  171. --> 195         self._generate(
  172.     196             prompts, stop=stop, run_manager=run_manager, **kwargs
  173.     197         )
  174.     198         if new_arg_supported
  175.     199         else self._generate(prompts, stop=stop, **kwargs)
  176.     200     )
  177.     201 except (KeyboardInterrupt, Exception) as e:
  178.     202     run_manager.on_llm_error(e)
  179.  
  180. File /opt/conda/lib/python3.10/site-packages/langchain/llms/base.py:493, in LLM._generate(self, prompts, stop, run_manager, **kwargs)
  181.     490 new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
  182.     491 for prompt in prompts:
  183.     492     text = (
  184. --> 493         self._call(prompt, stop=stop, run_manager=run_manager, **kwargs)
  185.     494         if new_arg_supported
  186.     495         else self._call(prompt, stop=stop, **kwargs)
  187.     496     )
  188.     497     generations.append([Generation(text=text)])
  189.     498 return LLMResult(generations=generations)
  190.  
  191. File /opt/conda/lib/python3.10/site-packages/langchain/llms/huggingface_hub.py:114, in HuggingFaceHub._call(self, prompt, stop, run_manager, **kwargs)
  192.     112 response = self.client(inputs=prompt, params=params)
  193.     113 if "error" in response:
  194. --> 114     raise ValueError(f"Error raised by inference API: {response['error']}")
  195.     115 if self.client.task == "text-generation":
  196.     116     # Text generation return includes the starter text.
  197.     117     text = response[0]["generated_text"][len(prompt) :]
  198.  
  199. ValueError: Error raised by inference API: Pipeline cannot infer suitable model classes from lucas0/empath-llama-7b
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement