
    hq                       S SK Jr  S SKJrJrJrJrJrJrJ	r	  S SK
Jr  S SKJr  S SKrSSKJr  SSKJrJrJrJrJr  SS	KJrJr  SS
KJr  SSKJrJr  SSKJ r J!r!  SSK"J#r#  SSK$J%r%  SSK&J'r'  SSK(J)r)J*r+J,r-J.r/  SSK0J1r1  SSK2J3r3J4r4  SSK5J6r6J7r7  SSK8J9r9  SSK:J;r;  SSK<J=r=  SSK>J?r?  SSK@JArA  SSKBJCrC  SSKDJErE  SSKFJGrG  SSKHJIrI  SS/rJ " S S\5      rK " S  S\5      rL " S! S"5      rM " S# S$5      rN " S% S&5      rO " S' S(5      rPg))    )annotations)DictListTypeUnionIterableOptionalcast)partial)LiteralN   )_legacy_response)	NOT_GIVENBodyQueryHeadersNotGiven)maybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)Stream)completion_create_params)make_request_options)ResponseFormatTvalidate_input_toolsparse_chat_completiontype_to_response_format_param)	ChatModel)ChatCompletionStreamManager AsyncChatCompletionStreamManager)MetadataReasoningEffort)ChatCompletion)ChatCompletionChunk)ParsedChatCompletion)ChatCompletionToolParam)ChatCompletionAudioParam)ChatCompletionMessageParam) ChatCompletionStreamOptionsParam)$ChatCompletionPredictionContentParam)#ChatCompletionToolChoiceOptionParamCompletionsAsyncCompletionsc            !         \ rS rSr\S	S j5       r\S
S j5       r\\\\\\\\\\\\\\\\\\\\\\\\\\\\SSS\S.                                                                      SS jjr\\\\\\\\\\\\\\\\\\\\\\\\\\\\SSS\S.                                                                      SS jjr	Sr
g)r0   *   c                    [        U 5      $ z
This property can be used as a prefix for any HTTP method call to return the
the raw response object instead of the parsed content.

For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
)CompletionsWithRawResponseselfs    X/var/www/html/env/lib/python3.13/site-packages/openai/resources/beta/chat/completions.pywith_raw_responseCompletions.with_raw_response+   s     *$//    c                    [        U 5      $ z
An alternative to `.with_raw_response` that doesn't eagerly read the response body.

For more information, see https://www.github.com/openai/openai-python#with_streaming_response
) CompletionsWithStreamingResponser7   s    r9   with_streaming_response#Completions.with_streaming_response5   s     055r<   N audioresponse_formatfrequency_penaltyfunction_call	functions
logit_biaslogprobsmax_completion_tokens
max_tokensmetadata
modalitiesnparallel_tool_calls
predictionpresence_penaltyreasoning_effortseedservice_tierstopstorestream_optionstemperaturetool_choicetoolstop_logprobstop_puserweb_search_optionsextra_headersextra_query
extra_bodytimeoutc       "          ^^ [        T5        SS0U=(       d    0 EnSUU4S jjn#U R                  S[        0 SU_SU_SU_SU_S	U_S
U_SU_SU	_SU
_SU_SU_SU_SU_SU_SU_SU_SU_[        T5      UUUUSUUUTUUUUS.E[        R
                  5      [        UU U!U"U#S9[        [        [        [              [        5      SS9$ )a  Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
& returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.

You can pass a pydantic model to this method and it will automatically convert the model
into a JSON schema, send it to the API and parse the response content back into the given model.

This method will also automatically parse `function` tool calls if:
- You use the `openai.pydantic_function_tool()` helper method
- You mark your tool schema with `"strict": True`

Example usage:
```py
from pydantic import BaseModel
from openai import OpenAI


class Step(BaseModel):
    explanation: str
    output: str


class MathResponse(BaseModel):
    steps: List[Step]
    final_answer: str


client = OpenAI()
completion = client.beta.chat.completions.parse(
    model="gpt-4o-2024-08-06",
    messages=[
        {"role": "system", "content": "You are a helpful math tutor."},
        {"role": "user", "content": "solve 8x + 31 = 2"},
    ],
    response_format=MathResponse,
)

message = completion.choices[0].message
if message.parsed:
    print(message.parsed.steps)
    print("answer: ", message.parsed.final_answer)
```
X-Stainless-Helper-Methodbeta.chat.completions.parsec                   > [        TU TS9$ N)rD   chat_completioninput_tools_parse_chat_completionraw_completionrD   rZ   s    r9   parser!Completions.parse.<locals>.parser       ) / .! r<   /chat/completionsmessagesmodelrC   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   F)rD   rS   rT   rU   rV   streamrW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   post_parserbodyoptionscast_tort   rm   r'   return%ParsedChatCompletion[ResponseFormatT])_validate_input_tools_postr   _type_to_response_formatr   CompletionCreateParamsr   r
   r   r)   r   r'   $r8   rr   rs   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rn   s$       `                     `         r9   parseCompletions.parse>   s   d 	e$ ()F
"

	 	 zz   U  U  ():	 
 $]     !*    ,-B  !*    !*    *+>  !*   '(8! " '(8# $ (@'P $0 "#&4#.#."$0" *<? B )??E#H )+'%" 2?CDnUa  1
 1	
r<   c       "        ~   SS0U=(       d    0 En[        U R                  R                  R                  R                  40 SU_SU_SU_SS_S[        U5      _S	U_S
U_SU_SU_SU	_SU
_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_S U_S!U_S"U_S#U_S$U _S%U!_S&U"_6n#[        U#UUS'9$ )(a  Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
and automatic accumulation of each delta.

This also supports all of the parsing utilities that `.parse()` does.

Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:

```py
with client.beta.chat.completions.stream(
    model="gpt-4o-2024-08-06",
    messages=[...],
) as stream:
    for event in stream:
        if event.type == "content.delta":
            print(event.delta, flush=True, end="")
```

When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).

When the context manager exits, the response will be closed, however the `stream` instance is still available outside
the context manager.
rd   beta.chat.completions.streamrr   rs   rC   rt   TrD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rV   rU   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rD   ri   )r   _clientchatcompletionscreater   r#   $r8   rr   rs   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   api_requests$                                       r9   rt   Completions.stream   s   ~ ()G
"

 =DLL))00%=
%=
 %=
 	%=

 %=
 5_E%=
 0%=
 (%=
  %=
 "%=
 %=
 #8%=
 "%=
 %=
 "%=
  !%=
" !4#%=
$ "%%=
& .'%=
( .)%=
* +%=
, &-%=
. /%=
0 1%=
2 *3%=
4 $5%=
6 $7%=
8 9%=
: &;%=
< =%=
> ?%=
@  2A%=
B (C%=
D $E%=
F "G%=
H I%=
L ++
 	
r<    )r|   r6   )r|   r?   Frr   $Iterable[ChatCompletionMessageParam]rs   Union[str, ChatModel]rC   -Optional[ChatCompletionAudioParam] | NotGivenrD   z type[ResponseFormatT] | NotGivenrE   Optional[float] | NotGivenrF   0completion_create_params.FunctionCall | NotGivenrG   6Iterable[completion_create_params.Function] | NotGivenrH   #Optional[Dict[str, int]] | NotGivenrI   Optional[bool] | NotGivenrJ   Optional[int] | NotGivenrK   r   rL   Optional[Metadata] | NotGivenrM   3Optional[List[Literal['text', 'audio']]] | NotGivenrN   r   rO   bool | NotGivenrP   9Optional[ChatCompletionPredictionContentParam] | NotGivenrQ   r   rR   $Optional[ReasoningEffort] | NotGivenrS   r   rT   /Optional[Literal['auto', 'default']] | NotGivenrU   0Union[Optional[str], List[str], None] | NotGivenrV   r   rW   5Optional[ChatCompletionStreamOptionsParam] | NotGivenrX   r   rY   .ChatCompletionToolChoiceOptionParam | NotGivenrZ   ,Iterable[ChatCompletionToolParam] | NotGivenr[   r   r\   r   r]   str | NotGivenr^   4completion_create_params.WebSearchOptions | NotGivenr_   Headers | Noner`   Query | Nonera   Body | Nonerb   'float | httpx.Timeout | None | NotGivenr|   r}   )Frr   r   rs   r   rC   r   rD   Jcompletion_create_params.ResponseFormat | type[ResponseFormatT] | NotGivenrE   r   rF   r   rG   r   rH   r   rI   r   rJ   r   rK   r   rL   r   rM   r   rN   r   rO   r   rP   r   rQ   r   rR   r   rS   r   rT   r   rU   r   rV   r   rW   r   rX   r   rY   r   rZ   r   r[   r   r\   r   r]   r   r^   r   r_   r   r`   r   ra   r   rb   r   r|   z,ChatCompletionStreamManager[ResponseFormatT]__name__
__module____qualname____firstlineno__r   r:   r@   r   r   rt   __static_attributes__r   r<   r9   r0   r0   *   s4   0 0 6 6 @I<E8AJSLU:C.7:C/82;JS&//8PY7@AJ)2HQAJ+4PY2;FO>G1:,5(S\ )-$("&;DMQ
 7Q
 %	Q

 =Q
 :Q
 6Q
 HQ
 JQ
 8Q
 ,Q
  8Q
 -Q
 0Q
 HQ
  $!Q
" -#Q
$ N%Q
& 5'Q
( ?)Q
* '+Q
, F-Q
. ?/Q
0 )1Q
2 N3Q
4 05Q
6 D7Q
8 <9Q
: /;Q
< *=Q
> ?Q
@ QAQ
F &GQ
H "IQ
J  KQ
L 9MQ
N 
/OQ
p @Ifo8AJSLU:C.7:C/82;JS&//8PY7@AJ)2HQAJ+4PY2;FO>G1:,5(S\ )-$("&;DMm
 7m
 %	m

 =m
 dm
 6m
 Hm
 Jm
 8m
 ,m
  8m
 -m
 0m
 Hm
  $!m
" -#m
$ N%m
& 5'm
( ?)m
* '+m
, F-m
. ?/m
0 )1m
2 N3m
4 05m
6 D7m
8 <9m
: /;m
< *=m
> ?m
@ QAm
F &Gm
H "Im
J  Km
L 9Mm
N 
6Om
 m
r<   c            !         \ rS rSr\S	S j5       r\S
S j5       r\\\\\\\\\\\\\\\\\\\\\\\\\\\\SSS\S.                                                                      SS jjr\\\\\\\\\\\\\\\\\\\\\\\\\\\\SSS\S.                                                                      SS jjr	Sr
g)r1   iA  c                    [        U 5      $ r5   )AsyncCompletionsWithRawResponser7   s    r9   r:   "AsyncCompletions.with_raw_responseB  s     /t44r<   c                    [        U 5      $ r>   )%AsyncCompletionsWithStreamingResponser7   s    r9   r@   (AsyncCompletions.with_streaming_responseL  s     5T::r<   NrB   c       "          ^^#    [        T5        SS0U=(       d    0 EnSUU4S jjn#U R                  S[        0 SU_SU_SU_SU_S	U_S
U_SU_SU	_SU
_SU_SU_SU_SU_SU_SU_SU_SU_[        T5      UUUUSUUUTUUUUS.E[        R
                  5      I Sh  vN [        UU U!U"U#S9[        [        [        [              [        5      SS9I Sh  vN $  N< N7f)a  Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
& returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.

You can pass a pydantic model to this method and it will automatically convert the model
into a JSON schema, send it to the API and parse the response content back into the given model.

This method will also automatically parse `function` tool calls if:
- You use the `openai.pydantic_function_tool()` helper method
- You mark your tool schema with `"strict": True`

Example usage:
```py
from pydantic import BaseModel
from openai import AsyncOpenAI


class Step(BaseModel):
    explanation: str
    output: str


class MathResponse(BaseModel):
    steps: List[Step]
    final_answer: str


client = AsyncOpenAI()
completion = await client.beta.chat.completions.parse(
    model="gpt-4o-2024-08-06",
    messages=[
        {"role": "system", "content": "You are a helpful math tutor."},
        {"role": "user", "content": "solve 8x + 31 = 2"},
    ],
    response_format=MathResponse,
)

message = completion.choices[0].message
if message.parsed:
    print(message.parsed.steps)
    print("answer: ", message.parsed.final_answer)
```
rd   re   c                   > [        TU TS9$ rg   rj   rl   s    r9   rn   &AsyncCompletions.parse.<locals>.parser  rp   r<   rq   rr   rs   rC   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   F)rD   rS   rT   rV   rU   rt   rW   rX   rY   rZ   r[   r\   r]   r^   Nru   rw   r{   )r~   r   r   r   r   r   r   r
   r   r)   r   r'   r   s$       `                     `         r9   r   AsyncCompletions.parseU  s    d 	e$ ()F
"

	 	 ZZ,  U  U  ():	 
 $]     !*    ,-B  !*    !*    *+>  !*   '(8! " '(8# $ (@'P $0" #&4#.#."$0" *<? B )??E# #H )+'%" 2?CDnUa   1
 1
 1	
#1
s$   BCC
7CCCCc       "           [        U5        SS0U=(       d    0 EnU R                  R                  R                  R                  " S(0 SU_SU_SU_SS_S[        U5      _S	U_S
U_SU_SU_SU	_SU
_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_SU_S U_S!U_S"U_S#U _S$U!_S%U"_S&U_6n#[        U#UUS'9$ ))a0  Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
and automatic accumulation of each delta.

This also supports all of the parsing utilities that `.parse()` does.

Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:

```py
async with client.beta.chat.completions.stream(
    model="gpt-4o-2024-08-06",
    messages=[...],
) as stream:
    async for event in stream:
        if event.type == "content.delta":
            print(event.delta, flush=True, end="")
```

When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).

When the context manager exits, the response will be closed, however the `stream` instance is still available outside
the context manager.
rd   r   rr   rs   rC   rt   TrD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r_   r`   ra   rb   r^   r   r   )r~   r   r   r   r   r   r$   r   s$                                       r9   rt   AsyncCompletions.stream  s   | 	e$ ()G
"

 ll''33:: $
$
$
 $
 	$

 5_E$
 0$
 ($
  $
 "$
 $
 #8$
 "$
 $
 "$
 $
  !4!$
" "#$
$ .%$
& .'$
( )$
* &+$
, -$
. /$
0 *1$
2 $3$
4 $5$
6 7$
8 &9$
: ;$
< =$
> (?$
@ $A$
B "C$
D E$
F  2G$
J 0+
 	
r<   r   )r|   r   )r|   r   r   )Frr   r   rs   r   rC   r   rD   r   rE   r   rF   r   rG   r   rH   r   rI   r   rJ   r   rK   r   rL   r   rM   r   rN   r   rO   r   rP   r   rQ   r   rR   r   rS   r   rT   r   rU   r   rV   r   rW   r   rX   r   rY   r   rZ   r   r[   r   r\   r   r]   r   r^   r   r_   r   r`   r   ra   r   rb   r   r|   z1AsyncChatCompletionStreamManager[ResponseFormatT]r   r   r<   r9   r1   r1   A  s4   5 5 ; ; @I<E8AJSLU:C.7:C/82;JS&//8PY7@AJ)2HQAJ+4PY2;FO>G1:,5(S\ )-$("&;DMQ
 7Q
 %	Q

 =Q
 :Q
 6Q
 HQ
 JQ
 8Q
 ,Q
  8Q
 -Q
 0Q
 HQ
  $!Q
" -#Q
$ N%Q
& 5'Q
( ?)Q
* '+Q
, F-Q
. ?/Q
0 )1Q
2 N3Q
4 05Q
6 D7Q
8 <9Q
: /;Q
< *=Q
> ?Q
@ QAQ
F &GQ
H "IQ
J  KQ
L 9MQ
N 
/OQ
p @Ifo8AJSLU:C.7:C/82;JS&//8PY7@AJ)2HQAJ+4PY2;FO>G1:,5(S\ )-$("&;DMn
 7n
 %	n

 =n
 dn
 6n
 Hn
 Jn
 8n
 ,n
  8n
 -n
 0n
 Hn
  $!n
" -#n
$ N%n
& 5'n
( ?)n
* '+n
, F-n
. ?/n
0 )1n
2 N3n
4 05n
6 D7n
8 <9n
: /;n
< *=n
> ?n
@ QAn
F &Gn
H "In
J  Kn
L 9Mn
N 
;On
 n
r<   c                      \ rS rSrSS jrSrg)r6   iY  c                Z    Xl         [        R                  " UR                  5      U l        g N)_completionsr   to_raw_response_wrapperr   r8   r   s     r9   __init__#CompletionsWithRawResponse.__init__Z  s#    '%==

r<   r   r   Nr   r0   r|   Noner   r   r   r   r   r   r   r<   r9   r6   r6   Y      
r<   r6   c                      \ rS rSrSS jrSrg)r   ib  c                Z    Xl         [        R                  " UR                  5      U l        g r   )r   r   async_to_raw_response_wrapperr   r   s     r9   r   (AsyncCompletionsWithRawResponse.__init__c  s#    '%CC

r<   r   Nr   r1   r|   r   r   r   r<   r9   r   r   b  r   r<   r   c                      \ rS rSrSS jrSrg)r?   ik  c                D    Xl         [        UR                  5      U l        g r   )r   r   r   r   s     r9   r   )CompletionsWithStreamingResponse.__init__l  s    '1

r<   r   Nr   r   r   r<   r9   r?   r?   k  r   r<   r?   c                      \ rS rSrSS jrSrg)r   it  c                D    Xl         [        UR                  5      U l        g r   )r   r   r   r   s     r9   r   .AsyncCompletionsWithStreamingResponse.__init__u  s    '7

r<   r   Nr   r   r   r<   r9   r   r   t  r   r<   r   )Q
__future__r   typingr   r   r   r   r   r	   r
   	functoolsr   typing_extensionsr   httpx r   _typesr   r   r   r   r   _utilsr   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   
types.chatr   _base_clientr   lib._parsingr   r   r~   r    rk   r!   r   types.chat_modelr"   lib.streaming.chatr#   r$   types.shared_paramsr%   r&   types.chat.chat_completionr'    types.chat.chat_completion_chunkr(   !types.chat.parsed_chat_completionr)   %types.chat.chat_completion_tool_paramr*   &types.chat.chat_completion_audio_paramr+   (types.chat.chat_completion_message_paramr,   /types.chat.chat_completion_stream_options_paramr-   3types.chat.chat_completion_prediction_content_paramr.   3types.chat.chat_completion_tool_choice_option_paramr/   __all__r0   r1   r6   r   r?   r   r   r<   r9   <module>r      s    # D D D  %  ! @ @ = ' ; Z ! 3 1  + ` = 9 D F M O S ` h g,
-T
/ T
nU
' U
p
 

 

 

 
r<   