- This topic has 0 replies, 1 voice, and was last updated 3 days, 13 hours ago by
Mr Tra.
- AuthorPosts
- August 28, 2025 at 10:00 pm #210016
Mr Tra
ParticipantRecall memories extension error:
minimize
expand_allcontent_copy
volume_up
Traceback (most recent call last):
Traceback (most recent call last):
File “/opt/venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/llm_http_handler.py”, line 111, in _make_common_async_call
response = await async_httpx_client.post(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/litellm_core_utils/logging_utils.py”, line 135, in async_wrapper
result = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/http_handler.py”, line 324, in post
raise e
File “/opt/venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/http_handler.py”, line 280, in post
response.raise_for_status()
File “/opt/venv/lib/python3.12/site-packages/httpx/_models.py”, line 829, in raise_for_status
raise HTTPStatusError(message, request=request, response=self)
httpx.HTTPStatusError: Client error ‘402 Payment Required’ for url ‘https://openrouter.ai/api/v1/chat/completions’
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/402During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File “/opt/venv/lib/python3.12/site-packages/litellm/main.py”, line 538, in acompletion
response = await init_response
^^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/llm_http_handler.py”, line 600, in acompletion_stream_function
completion_stream, _response_headers = await self.make_async_call_stream_helper(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/llm_http_handler.py”, line 659, in make_async_call_stream_helper
response = await self._make_common_async_call(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^>>> 8 stack lines skipped <<<
File “/a0/models.py”, line 317, in unified_call
_completion = await acompletion(
^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/utils.py”, line 1552, in wrapper_async
raise e
File “/opt/venv/lib/python3.12/site-packages/litellm/utils.py”, line 1410, in wrapper_async
result = await original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/main.py”, line 557, in acompletion
raise exception_type(
^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py”, line 2293, in exception_type
raise e
File “/opt/venv/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py”, line 2226, in exception_type
raise APIError(
litellm.exceptions.APIError: litellm.APIError: APIError: OpenrouterException – {“error”:{“message”:”This request requires more credits, or fewer max_tokens. You requested up to 32768 tokens, but can only afford 21493. To increase, visit https://openrouter.ai/settings/credits and upgrade to a paid account”,”code”:402,”metadata”:{“provider_name”:null}},”user_id”:”user_31ufwgEN0JygIm6rdb6BNTG2ioT”}litellm.exceptions.APIError: litellm.APIError: APIError: OpenrouterException – {“error”:{“message”:”This request requires more credits, or fewer max_tokens. You requested up to 32768 tokens, but can only afford 21493. To increase, visit https://openrouter.ai/settings/credits and upgrade to a paid account”,”code”:402,”metadata”:{“provider_name”:null}},”user_id”:”user_31ufwgEN0JygIm6rdb6BNTG2ioT”}
Error
minimize
expand_all
Text
litellm.APIError: APIError: OpenrouterException – {“error”:{“message”:”This request requires more credits, or fewer max_tokens. You requested up to 32768 tokens, but can only afford 4298. To increase, visit https://openrouter.ai/settings/credits and upgrade to a paid account”,”code”:402,”metadata”:{“provider_name”:null}},”user_id”:”user_31ufwgEN0JygIm6rdb6BNTG2ioT”}
Traceback (most recent call last):
Traceback (most recent call last):
File “/opt/venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/llm_http_handler.py”, line 111, in _make_common_async_call
response = await async_httpx_client.post(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/litellm_core_utils/logging_utils.py”, line 135, in async_wrapper
result = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/http_handler.py”, line 324, in post
raise e
File “/opt/venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/http_handler.py”, line 280, in post
response.raise_for_status()
File “/opt/venv/lib/python3.12/site-packages/httpx/_models.py”, line 829, in raise_for_status
raise HTTPStatusError(message, request=request, response=self)
httpx.HTTPStatusError: Client error ‘402 Payment Required’ for url ‘https://openrouter.ai/api/v1/chat/completions’
For more information check: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/402During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File “/opt/venv/lib/python3.12/site-packages/litellm/main.py”, line 538, in acompletion
response = await init_response
^^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/llm_http_handler.py”, line 600, in acompletion_stream_function
completion_stream, _response_headers = await self.make_async_call_stream_helper(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/llms/custom_httpx/llm_http_handler.py”, line 659, in make_async_call_stream_helper
response = await self._make_common_async_call(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^>>> 8 stack lines skipped <<<
File “/a0/models.py”, line 317, in unified_call
_completion = await acompletion(
^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/utils.py”, line 1552, in wrapper_async
raise e
File “/opt/venv/lib/python3.12/site-packages/litellm/utils.py”, line 1410, in wrapper_async
result = await original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/main.py”, line 557, in acompletion
raise exception_type(
^^^^^^^^^^^^^^^
File “/opt/venv/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py”, line 2293, in exception_type
raise e
File “/opt/venv/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py”, line 2226, in exception_type
raise APIError(
litellm.exceptions.APIError: litellm.APIError: APIError: OpenrouterException – {“error”:{“message”:”This request requires more credits, or fewer max_tokens. You requested up to 32768 tokens, but can only afford 4298. To increase, visit https://openrouter.ai/settings/credits and upgrade to a paid account”,”code”:402,”metadata”:{“provider_name”:null}},”user_id”:”user_31ufwgEN0JygIm6rdb6BNTG2ioT”}litellm.exceptions.APIError: litellm.APIError: APIError: OpenrouterException – {“error”:{“message”:”This request requires more credits, or fewer max_tokens. You requested up to 32768 tokens, but can only afford 4298. To increase, visit https://openrouter.ai/settings/credits and upgrade to a paid account”,”code”:402,”metadata”:{“provider_name”:null}},”user_id”:”user_31ufwgEN0JygIm6rdb6BNTG2ioT”}
- AuthorPosts
- You must be logged in to reply to this topic.