openai.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. from functools import wraps
  2. import sentry_sdk
  3. from sentry_sdk import consts
  4. from sentry_sdk.ai.monitoring import record_token_usage
  5. from sentry_sdk.ai.utils import set_data_normalized, normalize_message_roles
  6. from sentry_sdk.consts import SPANDATA
  7. from sentry_sdk.integrations import DidNotEnable, Integration
  8. from sentry_sdk.scope import should_send_default_pii
  9. from sentry_sdk.tracing_utils import set_span_errored
  10. from sentry_sdk.utils import (
  11. capture_internal_exceptions,
  12. event_from_exception,
  13. safe_serialize,
  14. )
  15. from typing import TYPE_CHECKING
  16. if TYPE_CHECKING:
  17. from typing import Any, Iterable, List, Optional, Callable, AsyncIterator, Iterator
  18. from sentry_sdk.tracing import Span
  19. try:
  20. try:
  21. from openai import NOT_GIVEN
  22. except ImportError:
  23. NOT_GIVEN = None
  24. from openai.resources.chat.completions import Completions, AsyncCompletions
  25. from openai.resources import Embeddings, AsyncEmbeddings
  26. if TYPE_CHECKING:
  27. from openai.types.chat import ChatCompletionMessageParam, ChatCompletionChunk
  28. except ImportError:
  29. raise DidNotEnable("OpenAI not installed")
  30. RESPONSES_API_ENABLED = True
  31. try:
  32. # responses API support was introduced in v1.66.0
  33. from openai.resources.responses import Responses, AsyncResponses
  34. from openai.types.responses.response_completed_event import ResponseCompletedEvent
  35. except ImportError:
  36. RESPONSES_API_ENABLED = False
  37. class OpenAIIntegration(Integration):
  38. identifier = "openai"
  39. origin = f"auto.ai.{identifier}"
  40. def __init__(self, include_prompts=True, tiktoken_encoding_name=None):
  41. # type: (OpenAIIntegration, bool, Optional[str]) -> None
  42. self.include_prompts = include_prompts
  43. self.tiktoken_encoding = None
  44. if tiktoken_encoding_name is not None:
  45. import tiktoken # type: ignore
  46. self.tiktoken_encoding = tiktoken.get_encoding(tiktoken_encoding_name)
  47. @staticmethod
  48. def setup_once():
  49. # type: () -> None
  50. Completions.create = _wrap_chat_completion_create(Completions.create)
  51. AsyncCompletions.create = _wrap_async_chat_completion_create(
  52. AsyncCompletions.create
  53. )
  54. Embeddings.create = _wrap_embeddings_create(Embeddings.create)
  55. AsyncEmbeddings.create = _wrap_async_embeddings_create(AsyncEmbeddings.create)
  56. if RESPONSES_API_ENABLED:
  57. Responses.create = _wrap_responses_create(Responses.create)
  58. AsyncResponses.create = _wrap_async_responses_create(AsyncResponses.create)
  59. def count_tokens(self, s):
  60. # type: (OpenAIIntegration, str) -> int
  61. if self.tiktoken_encoding is not None:
  62. return len(self.tiktoken_encoding.encode_ordinary(s))
  63. return 0
  64. def _capture_exception(exc, manual_span_cleanup=True):
  65. # type: (Any, bool) -> None
  66. # Close an eventually open span
  67. # We need to do this by hand because we are not using the start_span context manager
  68. current_span = sentry_sdk.get_current_span()
  69. set_span_errored(current_span)
  70. if manual_span_cleanup and current_span is not None:
  71. current_span.__exit__(None, None, None)
  72. event, hint = event_from_exception(
  73. exc,
  74. client_options=sentry_sdk.get_client().options,
  75. mechanism={"type": "openai", "handled": False},
  76. )
  77. sentry_sdk.capture_event(event, hint=hint)
  78. def _get_usage(usage, names):
  79. # type: (Any, List[str]) -> int
  80. for name in names:
  81. if hasattr(usage, name) and isinstance(getattr(usage, name), int):
  82. return getattr(usage, name)
  83. return 0
  84. def _calculate_token_usage(
  85. messages, response, span, streaming_message_responses, count_tokens
  86. ):
  87. # type: (Optional[Iterable[ChatCompletionMessageParam]], Any, Span, Optional[List[str]], Callable[..., Any]) -> None
  88. input_tokens = 0 # type: Optional[int]
  89. input_tokens_cached = 0 # type: Optional[int]
  90. output_tokens = 0 # type: Optional[int]
  91. output_tokens_reasoning = 0 # type: Optional[int]
  92. total_tokens = 0 # type: Optional[int]
  93. if hasattr(response, "usage"):
  94. input_tokens = _get_usage(response.usage, ["input_tokens", "prompt_tokens"])
  95. if hasattr(response.usage, "input_tokens_details"):
  96. input_tokens_cached = _get_usage(
  97. response.usage.input_tokens_details, ["cached_tokens"]
  98. )
  99. output_tokens = _get_usage(
  100. response.usage, ["output_tokens", "completion_tokens"]
  101. )
  102. if hasattr(response.usage, "output_tokens_details"):
  103. output_tokens_reasoning = _get_usage(
  104. response.usage.output_tokens_details, ["reasoning_tokens"]
  105. )
  106. total_tokens = _get_usage(response.usage, ["total_tokens"])
  107. # Manually count tokens
  108. if input_tokens == 0:
  109. for message in messages or []:
  110. if isinstance(message, dict) and "content" in message:
  111. input_tokens += count_tokens(message["content"])
  112. elif isinstance(message, str):
  113. input_tokens += count_tokens(message)
  114. if output_tokens == 0:
  115. if streaming_message_responses is not None:
  116. for message in streaming_message_responses:
  117. output_tokens += count_tokens(message)
  118. elif hasattr(response, "choices"):
  119. for choice in response.choices:
  120. if hasattr(choice, "message"):
  121. output_tokens += count_tokens(choice.message)
  122. # Do not set token data if it is 0
  123. input_tokens = input_tokens or None
  124. input_tokens_cached = input_tokens_cached or None
  125. output_tokens = output_tokens or None
  126. output_tokens_reasoning = output_tokens_reasoning or None
  127. total_tokens = total_tokens or None
  128. record_token_usage(
  129. span,
  130. input_tokens=input_tokens,
  131. input_tokens_cached=input_tokens_cached,
  132. output_tokens=output_tokens,
  133. output_tokens_reasoning=output_tokens_reasoning,
  134. total_tokens=total_tokens,
  135. )
  136. def _set_input_data(span, kwargs, operation, integration):
  137. # type: (Span, dict[str, Any], str, OpenAIIntegration) -> None
  138. # Input messages (the prompt or data sent to the model)
  139. messages = kwargs.get("messages")
  140. if messages is None:
  141. messages = kwargs.get("input")
  142. if isinstance(messages, str):
  143. messages = [messages]
  144. if (
  145. messages is not None
  146. and len(messages) > 0
  147. and should_send_default_pii()
  148. and integration.include_prompts
  149. ):
  150. normalized_messages = normalize_message_roles(messages)
  151. set_data_normalized(
  152. span, SPANDATA.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False
  153. )
  154. # Input attributes: Common
  155. set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, "openai")
  156. set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation)
  157. # Input attributes: Optional
  158. kwargs_keys_to_attributes = {
  159. "model": SPANDATA.GEN_AI_REQUEST_MODEL,
  160. "stream": SPANDATA.GEN_AI_RESPONSE_STREAMING,
  161. "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
  162. "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
  163. "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
  164. "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
  165. "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
  166. }
  167. for key, attribute in kwargs_keys_to_attributes.items():
  168. value = kwargs.get(key)
  169. if value is not NOT_GIVEN and value is not None:
  170. set_data_normalized(span, attribute, value)
  171. # Input attributes: Tools
  172. tools = kwargs.get("tools")
  173. if tools is not NOT_GIVEN and tools is not None and len(tools) > 0:
  174. set_data_normalized(
  175. span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools)
  176. )
  177. def _set_output_data(span, response, kwargs, integration, finish_span=True):
  178. # type: (Span, Any, dict[str, Any], OpenAIIntegration, bool) -> None
  179. if hasattr(response, "model"):
  180. set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model)
  181. # Input messages (the prompt or data sent to the model)
  182. # used for the token usage calculation
  183. messages = kwargs.get("messages")
  184. if messages is None:
  185. messages = kwargs.get("input")
  186. if messages is not None and isinstance(messages, str):
  187. messages = [messages]
  188. if hasattr(response, "choices"):
  189. if should_send_default_pii() and integration.include_prompts:
  190. response_text = [choice.message.dict() for choice in response.choices]
  191. if len(response_text) > 0:
  192. set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_text)
  193. _calculate_token_usage(messages, response, span, None, integration.count_tokens)
  194. if finish_span:
  195. span.__exit__(None, None, None)
  196. elif hasattr(response, "output"):
  197. if should_send_default_pii() and integration.include_prompts:
  198. output_messages = {
  199. "response": [],
  200. "tool": [],
  201. } # type: (dict[str, list[Any]])
  202. for output in response.output:
  203. if output.type == "function_call":
  204. output_messages["tool"].append(output.dict())
  205. elif output.type == "message":
  206. for output_message in output.content:
  207. try:
  208. output_messages["response"].append(output_message.text)
  209. except AttributeError:
  210. # Unknown output message type, just return the json
  211. output_messages["response"].append(output_message.dict())
  212. if len(output_messages["tool"]) > 0:
  213. set_data_normalized(
  214. span,
  215. SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
  216. output_messages["tool"],
  217. unpack=False,
  218. )
  219. if len(output_messages["response"]) > 0:
  220. set_data_normalized(
  221. span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"]
  222. )
  223. _calculate_token_usage(messages, response, span, None, integration.count_tokens)
  224. if finish_span:
  225. span.__exit__(None, None, None)
  226. elif hasattr(response, "_iterator"):
  227. data_buf: list[list[str]] = [] # one for each choice
  228. old_iterator = response._iterator
  229. def new_iterator():
  230. # type: () -> Iterator[ChatCompletionChunk]
  231. count_tokens_manually = True
  232. for x in old_iterator:
  233. with capture_internal_exceptions():
  234. # OpenAI chat completion API
  235. if hasattr(x, "choices"):
  236. choice_index = 0
  237. for choice in x.choices:
  238. if hasattr(choice, "delta") and hasattr(
  239. choice.delta, "content"
  240. ):
  241. content = choice.delta.content
  242. if len(data_buf) <= choice_index:
  243. data_buf.append([])
  244. data_buf[choice_index].append(content or "")
  245. choice_index += 1
  246. # OpenAI responses API
  247. elif hasattr(x, "delta"):
  248. if len(data_buf) == 0:
  249. data_buf.append([])
  250. data_buf[0].append(x.delta or "")
  251. # OpenAI responses API end of streaming response
  252. if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
  253. _calculate_token_usage(
  254. messages,
  255. x.response,
  256. span,
  257. None,
  258. integration.count_tokens,
  259. )
  260. count_tokens_manually = False
  261. yield x
  262. with capture_internal_exceptions():
  263. if len(data_buf) > 0:
  264. all_responses = ["".join(chunk) for chunk in data_buf]
  265. if should_send_default_pii() and integration.include_prompts:
  266. set_data_normalized(
  267. span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
  268. )
  269. if count_tokens_manually:
  270. _calculate_token_usage(
  271. messages,
  272. response,
  273. span,
  274. all_responses,
  275. integration.count_tokens,
  276. )
  277. if finish_span:
  278. span.__exit__(None, None, None)
  279. async def new_iterator_async():
  280. # type: () -> AsyncIterator[ChatCompletionChunk]
  281. count_tokens_manually = True
  282. async for x in old_iterator:
  283. with capture_internal_exceptions():
  284. # OpenAI chat completion API
  285. if hasattr(x, "choices"):
  286. choice_index = 0
  287. for choice in x.choices:
  288. if hasattr(choice, "delta") and hasattr(
  289. choice.delta, "content"
  290. ):
  291. content = choice.delta.content
  292. if len(data_buf) <= choice_index:
  293. data_buf.append([])
  294. data_buf[choice_index].append(content or "")
  295. choice_index += 1
  296. # OpenAI responses API
  297. elif hasattr(x, "delta"):
  298. if len(data_buf) == 0:
  299. data_buf.append([])
  300. data_buf[0].append(x.delta or "")
  301. # OpenAI responses API end of streaming response
  302. if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
  303. _calculate_token_usage(
  304. messages,
  305. x.response,
  306. span,
  307. None,
  308. integration.count_tokens,
  309. )
  310. count_tokens_manually = False
  311. yield x
  312. with capture_internal_exceptions():
  313. if len(data_buf) > 0:
  314. all_responses = ["".join(chunk) for chunk in data_buf]
  315. if should_send_default_pii() and integration.include_prompts:
  316. set_data_normalized(
  317. span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
  318. )
  319. if count_tokens_manually:
  320. _calculate_token_usage(
  321. messages,
  322. response,
  323. span,
  324. all_responses,
  325. integration.count_tokens,
  326. )
  327. if finish_span:
  328. span.__exit__(None, None, None)
  329. if str(type(response._iterator)) == "<class 'async_generator'>":
  330. response._iterator = new_iterator_async()
  331. else:
  332. response._iterator = new_iterator()
  333. else:
  334. _calculate_token_usage(messages, response, span, None, integration.count_tokens)
  335. if finish_span:
  336. span.__exit__(None, None, None)
  337. def _new_chat_completion_common(f, *args, **kwargs):
  338. # type: (Any, Any, Any) -> Any
  339. integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
  340. if integration is None:
  341. return f(*args, **kwargs)
  342. if "messages" not in kwargs:
  343. # invalid call (in all versions of openai), let it return error
  344. return f(*args, **kwargs)
  345. try:
  346. iter(kwargs["messages"])
  347. except TypeError:
  348. # invalid call (in all versions), messages must be iterable
  349. return f(*args, **kwargs)
  350. model = kwargs.get("model")
  351. operation = "chat"
  352. span = sentry_sdk.start_span(
  353. op=consts.OP.GEN_AI_CHAT,
  354. name=f"{operation} {model}",
  355. origin=OpenAIIntegration.origin,
  356. )
  357. span.__enter__()
  358. _set_input_data(span, kwargs, operation, integration)
  359. response = yield f, args, kwargs
  360. _set_output_data(span, response, kwargs, integration, finish_span=True)
  361. return response
  362. def _wrap_chat_completion_create(f):
  363. # type: (Callable[..., Any]) -> Callable[..., Any]
  364. def _execute_sync(f, *args, **kwargs):
  365. # type: (Any, Any, Any) -> Any
  366. gen = _new_chat_completion_common(f, *args, **kwargs)
  367. try:
  368. f, args, kwargs = next(gen)
  369. except StopIteration as e:
  370. return e.value
  371. try:
  372. try:
  373. result = f(*args, **kwargs)
  374. except Exception as e:
  375. _capture_exception(e)
  376. raise e from None
  377. return gen.send(result)
  378. except StopIteration as e:
  379. return e.value
  380. @wraps(f)
  381. def _sentry_patched_create_sync(*args, **kwargs):
  382. # type: (Any, Any) -> Any
  383. integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
  384. if integration is None or "messages" not in kwargs:
  385. # no "messages" means invalid call (in all versions of openai), let it return error
  386. return f(*args, **kwargs)
  387. return _execute_sync(f, *args, **kwargs)
  388. return _sentry_patched_create_sync
  389. def _wrap_async_chat_completion_create(f):
  390. # type: (Callable[..., Any]) -> Callable[..., Any]
  391. async def _execute_async(f, *args, **kwargs):
  392. # type: (Any, Any, Any) -> Any
  393. gen = _new_chat_completion_common(f, *args, **kwargs)
  394. try:
  395. f, args, kwargs = next(gen)
  396. except StopIteration as e:
  397. return await e.value
  398. try:
  399. try:
  400. result = await f(*args, **kwargs)
  401. except Exception as e:
  402. _capture_exception(e)
  403. raise e from None
  404. return gen.send(result)
  405. except StopIteration as e:
  406. return e.value
  407. @wraps(f)
  408. async def _sentry_patched_create_async(*args, **kwargs):
  409. # type: (Any, Any) -> Any
  410. integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
  411. if integration is None or "messages" not in kwargs:
  412. # no "messages" means invalid call (in all versions of openai), let it return error
  413. return await f(*args, **kwargs)
  414. return await _execute_async(f, *args, **kwargs)
  415. return _sentry_patched_create_async
  416. def _new_embeddings_create_common(f, *args, **kwargs):
  417. # type: (Any, Any, Any) -> Any
  418. integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
  419. if integration is None:
  420. return f(*args, **kwargs)
  421. model = kwargs.get("model")
  422. operation = "embeddings"
  423. with sentry_sdk.start_span(
  424. op=consts.OP.GEN_AI_EMBEDDINGS,
  425. name=f"{operation} {model}",
  426. origin=OpenAIIntegration.origin,
  427. ) as span:
  428. _set_input_data(span, kwargs, operation, integration)
  429. response = yield f, args, kwargs
  430. _set_output_data(span, response, kwargs, integration, finish_span=False)
  431. return response
  432. def _wrap_embeddings_create(f):
  433. # type: (Any) -> Any
  434. def _execute_sync(f, *args, **kwargs):
  435. # type: (Any, Any, Any) -> Any
  436. gen = _new_embeddings_create_common(f, *args, **kwargs)
  437. try:
  438. f, args, kwargs = next(gen)
  439. except StopIteration as e:
  440. return e.value
  441. try:
  442. try:
  443. result = f(*args, **kwargs)
  444. except Exception as e:
  445. _capture_exception(e, manual_span_cleanup=False)
  446. raise e from None
  447. return gen.send(result)
  448. except StopIteration as e:
  449. return e.value
  450. @wraps(f)
  451. def _sentry_patched_create_sync(*args, **kwargs):
  452. # type: (Any, Any) -> Any
  453. integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
  454. if integration is None:
  455. return f(*args, **kwargs)
  456. return _execute_sync(f, *args, **kwargs)
  457. return _sentry_patched_create_sync
  458. def _wrap_async_embeddings_create(f):
  459. # type: (Any) -> Any
  460. async def _execute_async(f, *args, **kwargs):
  461. # type: (Any, Any, Any) -> Any
  462. gen = _new_embeddings_create_common(f, *args, **kwargs)
  463. try:
  464. f, args, kwargs = next(gen)
  465. except StopIteration as e:
  466. return await e.value
  467. try:
  468. try:
  469. result = await f(*args, **kwargs)
  470. except Exception as e:
  471. _capture_exception(e, manual_span_cleanup=False)
  472. raise e from None
  473. return gen.send(result)
  474. except StopIteration as e:
  475. return e.value
  476. @wraps(f)
  477. async def _sentry_patched_create_async(*args, **kwargs):
  478. # type: (Any, Any) -> Any
  479. integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
  480. if integration is None:
  481. return await f(*args, **kwargs)
  482. return await _execute_async(f, *args, **kwargs)
  483. return _sentry_patched_create_async
  484. def _new_responses_create_common(f, *args, **kwargs):
  485. # type: (Any, Any, Any) -> Any
  486. integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
  487. if integration is None:
  488. return f(*args, **kwargs)
  489. model = kwargs.get("model")
  490. operation = "responses"
  491. span = sentry_sdk.start_span(
  492. op=consts.OP.GEN_AI_RESPONSES,
  493. name=f"{operation} {model}",
  494. origin=OpenAIIntegration.origin,
  495. )
  496. span.__enter__()
  497. _set_input_data(span, kwargs, operation, integration)
  498. response = yield f, args, kwargs
  499. _set_output_data(span, response, kwargs, integration, finish_span=True)
  500. return response
  501. def _wrap_responses_create(f):
  502. # type: (Any) -> Any
  503. def _execute_sync(f, *args, **kwargs):
  504. # type: (Any, Any, Any) -> Any
  505. gen = _new_responses_create_common(f, *args, **kwargs)
  506. try:
  507. f, args, kwargs = next(gen)
  508. except StopIteration as e:
  509. return e.value
  510. try:
  511. try:
  512. result = f(*args, **kwargs)
  513. except Exception as e:
  514. _capture_exception(e)
  515. raise e from None
  516. return gen.send(result)
  517. except StopIteration as e:
  518. return e.value
  519. @wraps(f)
  520. def _sentry_patched_create_sync(*args, **kwargs):
  521. # type: (Any, Any) -> Any
  522. integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
  523. if integration is None:
  524. return f(*args, **kwargs)
  525. return _execute_sync(f, *args, **kwargs)
  526. return _sentry_patched_create_sync
  527. def _wrap_async_responses_create(f):
  528. # type: (Any) -> Any
  529. async def _execute_async(f, *args, **kwargs):
  530. # type: (Any, Any, Any) -> Any
  531. gen = _new_responses_create_common(f, *args, **kwargs)
  532. try:
  533. f, args, kwargs = next(gen)
  534. except StopIteration as e:
  535. return await e.value
  536. try:
  537. try:
  538. result = await f(*args, **kwargs)
  539. except Exception as e:
  540. _capture_exception(e)
  541. raise e from None
  542. return gen.send(result)
  543. except StopIteration as e:
  544. return e.value
  545. @wraps(f)
  546. async def _sentry_patched_responses_async(*args, **kwargs):
  547. # type: (Any, Any) -> Any
  548. integration = sentry_sdk.get_client().get_integration(OpenAIIntegration)
  549. if integration is None:
  550. return await f(*args, **kwargs)
  551. return await _execute_async(f, *args, **kwargs)
  552. return _sentry_patched_responses_async