
    eiA                    x   S SK Jr  S SKJrJrJrJr  S SKJrJ	r	  S SK
r
SSKJr  SSKJr  SSKJrJrJrJrJrJrJrJr  SS	KJrJrJr  SS
KJr  SSKJrJ r   SSK!J"r"J#r#  SSK$J%r%J&r&  SSK'J(r(  SSK)J*r*  SSK+J,r,  SS/r- " S S\5      r. " S S\ 5      r/ " S S5      r0 " S S5      r1 " S S5      r2 " S S5      r3g)    )annotations)DictUnionIterableOptional)LiteraloverloadN   )_legacy_response)completion_create_params)BodyOmitQueryHeadersNotGivenSequenceNotStromit	not_given)required_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)StreamAsyncStream)make_request_options)
Completion) ChatCompletionStreamOptionsParamCompletionsAsyncCompletionsc                     \ rS rSr\SS j5       r\SS j5       r\\\\\\\\\\\\\\\\\SSS\	S.                                             SS jj5       r
\\\\\\\\\\\\\\\\SSS\	S	.                                             SS
 jj5       r
\\\\\\\\\\\\\\\\SSS\	S	.                                             SS jj5       r
\" SS// SQ5      \\\\\\\\\\\\\\\\SSS\	S.                                             SS jj5       r
Srg)r"      c                    [        U 5      $ z
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.

For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
)CompletionsWithRawResponseselfs    j/var/www/html/BTCUSD/btcusdt_trading_app/venv/lib/python3.13/site-packages/openai/resources/completions.pywith_raw_responseCompletions.with_raw_response   s     *$//    c                    [        U 5      $ z
An alternative to `.with_raw_response` that doesn't eagerly read the response body.

For more information, see https://www.github.com/openai/openai-python#with_streaming_response
) CompletionsWithStreamingResponser)   s    r+   with_streaming_response#Completions.with_streaming_response&   s     055r.   Nbest_ofechofrequency_penalty
logit_biaslogprobs
max_tokensnpresence_penaltyseedstopstreamstream_optionssuffixtemperaturetop_puserextra_headersextra_query
extra_bodytimeoutmodelpromptc                   gu  
Creates a completion for the provided prompt and parameters.

Args:
  model: ID of the model to use. You can use the
      [List models](https://platform.openai.com/docs/api-reference/models/list) API to
      see all of your available models, or see our
      [Model overview](https://platform.openai.com/docs/models) for descriptions of
      them.

  prompt: The prompt(s) to generate completions for, encoded as a string, array of
      strings, array of tokens, or array of token arrays.

      Note that <|endoftext|> is the document separator that the model sees during
      training, so if a prompt is not specified the model will generate as if from the
      beginning of a new document.

  best_of: Generates `best_of` completions server-side and returns the "best" (the one with
      the highest log probability per token). Results cannot be streamed.

      When used with `n`, `best_of` controls the number of candidate completions and
      `n` specifies how many to return – `best_of` must be greater than `n`.

      **Note:** Because this parameter generates many completions, it can quickly
      consume your token quota. Use carefully and ensure that you have reasonable
      settings for `max_tokens` and `stop`.

  echo: Echo back the prompt in addition to the completion

  frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
      existing frequency in the text so far, decreasing the model's likelihood to
      repeat the same line verbatim.

      [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

  logit_bias: Modify the likelihood of specified tokens appearing in the completion.

      Accepts a JSON object that maps tokens (specified by their token ID in the GPT
      tokenizer) to an associated bias value from -100 to 100. You can use this
      [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
      Mathematically, the bias is added to the logits generated by the model prior to
      sampling. The exact effect will vary per model, but values between -1 and 1
      should decrease or increase likelihood of selection; values like -100 or 100
      should result in a ban or exclusive selection of the relevant token.

      As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
      from being generated.

  logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
      well the chosen tokens. For example, if `logprobs` is 5, the API will return a
      list of the 5 most likely tokens. The API will always return the `logprob` of
      the sampled token, so there may be up to `logprobs+1` elements in the response.

      The maximum value for `logprobs` is 5.

  max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
      completion.

      The token count of your prompt plus `max_tokens` cannot exceed the model's
      context length.
      [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
      for counting tokens.

  n: How many completions to generate for each prompt.

      **Note:** Because this parameter generates many completions, it can quickly
      consume your token quota. Use carefully and ensure that you have reasonable
      settings for `max_tokens` and `stop`.

  presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
      whether they appear in the text so far, increasing the model's likelihood to
      talk about new topics.

      [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

  seed: If specified, our system will make a best effort to sample deterministically,
      such that repeated requests with the same `seed` and parameters should return
      the same result.

      Determinism is not guaranteed, and you should refer to the `system_fingerprint`
      response parameter to monitor changes in the backend.

  stop: Not supported with latest reasoning models `o3` and `o4-mini`.

      Up to 4 sequences where the API will stop generating further tokens. The
      returned text will not contain the stop sequence.

  stream: Whether to stream back partial progress. If set, tokens will be sent as
      data-only
      [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
      as they become available, with the stream terminated by a `data: [DONE]`
      message.
      [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

  stream_options: Options for streaming response. Only set this when you set `stream: true`.

  suffix: The suffix that comes after a completion of inserted text.

      This parameter is only supported for `gpt-3.5-turbo-instruct`.

  temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
      make the output more random, while lower values like 0.2 will make it more
      focused and deterministic.

      We generally recommend altering this or `top_p` but not both.

  top_p: An alternative to sampling with temperature, called nucleus sampling, where the
      model considers the results of the tokens with top_p probability mass. So 0.1
      means only the tokens comprising the top 10% probability mass are considered.

      We generally recommend altering this or `temperature` but not both.

  user: A unique identifier representing your end-user, which can help OpenAI to monitor
      and detect abuse.
      [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

  extra_headers: Send extra headers

  extra_query: Add additional query parameters to the request

  extra_body: Add additional JSON properties to the request

  timeout: Override the client-level default timeout for this request, in seconds
N r*   rI   rJ   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   s                          r+   createCompletions.create/       r 	r.   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r@   rA   rB   rC   rD   rE   rF   rG   rH   c                   gu  
Creates a completion for the provided prompt and parameters.

Args:
  model: ID of the model to use. You can use the
      [List models](https://platform.openai.com/docs/api-reference/models/list) API to
      see all of your available models, or see our
      [Model overview](https://platform.openai.com/docs/models) for descriptions of
      them.

  prompt: The prompt(s) to generate completions for, encoded as a string, array of
      strings, array of tokens, or array of token arrays.

      Note that <|endoftext|> is the document separator that the model sees during
      training, so if a prompt is not specified the model will generate as if from the
      beginning of a new document.

  stream: Whether to stream back partial progress. If set, tokens will be sent as
      data-only
      [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
      as they become available, with the stream terminated by a `data: [DONE]`
      message.
      [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

  best_of: Generates `best_of` completions server-side and returns the "best" (the one with
      the highest log probability per token). Results cannot be streamed.

      When used with `n`, `best_of` controls the number of candidate completions and
      `n` specifies how many to return – `best_of` must be greater than `n`.

      **Note:** Because this parameter generates many completions, it can quickly
      consume your token quota. Use carefully and ensure that you have reasonable
      settings for `max_tokens` and `stop`.

  echo: Echo back the prompt in addition to the completion

  frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
      existing frequency in the text so far, decreasing the model's likelihood to
      repeat the same line verbatim.

      [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

  logit_bias: Modify the likelihood of specified tokens appearing in the completion.

      Accepts a JSON object that maps tokens (specified by their token ID in the GPT
      tokenizer) to an associated bias value from -100 to 100. You can use this
      [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
      Mathematically, the bias is added to the logits generated by the model prior to
      sampling. The exact effect will vary per model, but values between -1 and 1
      should decrease or increase likelihood of selection; values like -100 or 100
      should result in a ban or exclusive selection of the relevant token.

      As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
      from being generated.

  logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
      well the chosen tokens. For example, if `logprobs` is 5, the API will return a
      list of the 5 most likely tokens. The API will always return the `logprob` of
      the sampled token, so there may be up to `logprobs+1` elements in the response.

      The maximum value for `logprobs` is 5.

  max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
      completion.

      The token count of your prompt plus `max_tokens` cannot exceed the model's
      context length.
      [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
      for counting tokens.

  n: How many completions to generate for each prompt.

      **Note:** Because this parameter generates many completions, it can quickly
      consume your token quota. Use carefully and ensure that you have reasonable
      settings for `max_tokens` and `stop`.

  presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
      whether they appear in the text so far, increasing the model's likelihood to
      talk about new topics.

      [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

  seed: If specified, our system will make a best effort to sample deterministically,
      such that repeated requests with the same `seed` and parameters should return
      the same result.

      Determinism is not guaranteed, and you should refer to the `system_fingerprint`
      response parameter to monitor changes in the backend.

  stop: Not supported with latest reasoning models `o3` and `o4-mini`.

      Up to 4 sequences where the API will stop generating further tokens. The
      returned text will not contain the stop sequence.

  stream_options: Options for streaming response. Only set this when you set `stream: true`.

  suffix: The suffix that comes after a completion of inserted text.

      This parameter is only supported for `gpt-3.5-turbo-instruct`.

  temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
      make the output more random, while lower values like 0.2 will make it more
      focused and deterministic.

      We generally recommend altering this or `top_p` but not both.

  top_p: An alternative to sampling with temperature, called nucleus sampling, where the
      model considers the results of the tokens with top_p probability mass. So 0.1
      means only the tokens comprising the top 10% probability mass are considered.

      We generally recommend altering this or `temperature` but not both.

  user: A unique identifier representing your end-user, which can help OpenAI to monitor
      and detect abuse.
      [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

  extra_headers: Send extra headers

  extra_query: Add additional query parameters to the request

  extra_body: Add additional JSON properties to the request

  timeout: Override the client-level default timeout for this request, in seconds
NrM   r*   rI   rJ   r?   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r@   rA   rB   rC   rD   rE   rF   rG   rH   s                          r+   rO   rP      rQ   r.   c                   grT   rM   rU   s                          r+   rO   rP   e  rQ   r.   rI   rJ   r?   c               :   U R                  S[        0 SU_SU_SU_SU_SU_SU_SU_S	U_S
U	_SU
_SU_SU_SU_SU_SU_SU_SU_SU0EU(       a  [        R                  O[        R                  5      [        UUUUS9[        U=(       d    S[        [           S9$ Nz/completionsrI   rJ   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   )rE   rF   rG   rH   F)bodyoptionscast_tor?   
stream_cls)_postr   r   CompletionCreateParamsStreaming"CompletionCreateParamsNonStreamingr   r    r   rN   s                          r+   rO   rP      sC   : zz Uf w D	
 (): !*  !*  '(8 D D f %n f  ";!" U#$ D%*  )HH-PP/2 )+Q[el ?Uj)A  !
 !	
r.   rM   )returnr(   )ra   r1   .rI   KUnion[str, Literal['gpt-3.5-turbo-instruct', 'davinci-002', 'babbage-002']]rJ   MUnion[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None]r5   Optional[int] | Omitr6   Optional[bool] | Omitr7   Optional[float] | Omitr8   Optional[Dict[str, int]] | Omitr9   re   r:   re   r;   re   r<   rg   r=   re   r>   6Union[Optional[str], SequenceNotStr[str], None] | Omitr?   zOptional[Literal[False]] | Omitr@   1Optional[ChatCompletionStreamOptionsParam] | OmitrA   Optional[str] | OmitrB   rg   rC   rg   rD   
str | OmitrE   Headers | NonerF   Query | NonerG   Body | NonerH   'float | httpx.Timeout | None | NotGivenra   r    ).rI   rc   rJ   rd   r?   Literal[True]r5   re   r6   rf   r7   rg   r8   rh   r9   re   r:   re   r;   re   r<   rg   r=   re   r>   ri   r@   rj   rA   rk   rB   rg   rC   rg   rD   rl   rE   rm   rF   rn   rG   ro   rH   rp   ra   zStream[Completion]).rI   rc   rJ   rd   r?   boolr5   re   r6   rf   r7   rg   r8   rh   r9   re   r:   re   r;   re   r<   rg   r=   re   r>   ri   r@   rj   rA   rk   rB   rg   rC   rg   rD   rl   rE   rm   rF   rn   rG   ro   rH   rp   ra   Completion | Stream[Completion]).rI   rc   rJ   rd   r5   re   r6   rf   r7   rg   r8   rh   r9   re   r:   re   r;   re   r<   rg   r=   re   r>   ri   r?   /Optional[Literal[False]] | Literal[True] | Omitr@   rj   rA   rk   rB   rg   rC   rg   rD   rl   rE   rm   rF   rn   rG   ro   rH   rp   ra   rs   __name__
__module____qualname____firstlineno__r   r,   r2   r	   r   r   rO   r   __static_attributes__rM   r.   r+   r"   r"      sw   0 0 6 6  )-&*486:)-+/"&37%)GK26LP'+.2(, )-$("&;D5X [X ^	X
 &X $X 2X 4X 'X )X  X 1X #X EX 0X  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
7X Xt  )-&*486:)-+/"&37%)GKLP'+.2(, )-$("&;D5X [X ^	X
 X &X $X 2X 4X 'X )X  X 1X #X EX  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
7X Xt  )-&*486:)-+/"&37%)GKLP'+.2(, )-$("&;D5X [X ^	X
 X &X $X 2X 4X 'X )X  X 1X #X EX  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
)7X Xt GX&(EF )-&*486:)-+/"&37%)GKBFLP'+.2(, )-$("&;D5=
 [=
 ^	=

 &=
 $=
 2=
 4=
 '=
 )=
  =
 1=
 #=
 E=
 @=
  J!=
" %#=
$ ,%=
& &'=
( )=
. &/=
0 "1=
2  3=
4 95=
6 
)7=
 G=
r.   c                     \ rS rSr\SS j5       r\SS j5       r\\\\\\\\\\\\\\\\\SSS\	S.                                             SS jj5       r
\\\\\\\\\\\\\\\\SSS\	S	.                                             SS
 jj5       r
\\\\\\\\\\\\\\\\SSS\	S	.                                             SS jj5       r
\" SS// SQ5      \\\\\\\\\\\\\\\\SSS\	S.                                             SS jj5       r
Srg)r#   iA  c                    [        U 5      $ r'   )AsyncCompletionsWithRawResponser)   s    r+   r,   "AsyncCompletions.with_raw_responseB  s     /t44r.   c                    [        U 5      $ r0   )%AsyncCompletionsWithStreamingResponser)   s    r+   r2   (AsyncCompletions.with_streaming_responseL  s     5T::r.   Nr4   rI   rJ   c                  #    g7frL   rM   rN   s                          r+   rO   AsyncCompletions.createU       r 	   rR   c                  #    g7frT   rM   rU   s                          r+   rO   r     r   r   c                  #    g7frT   rM   rU   s                          r+   rO   r     r   r   rW   c               j  #    U R                  S[        0 SU_SU_SU_SU_SU_SU_SU_S	U_S
U	_SU
_SU_SU_SU_SU_SU_SU_SU_SU0EU(       a  [        R                  O[        R                  5      I S h  vN [        UUUUS9[        U=(       d    S[        [           S9I S h  vN $  N4 N7frY   )r^   r   r   r_   r`   r   r    r   rN   s                          r+   rO   r   &  sZ    : ZZ,Uf w D	
 (): !*  !*  '(8 D D f %n f  ";!" U#$ D%*  )HH-PP/ 2 )+Q[el ?U":.A   !
 !
 !	
!
s$   A8B3:B/
;/B3*B1+B31B3rM   )ra   r}   )ra   r   rb   ).rI   rc   rJ   rd   r?   rq   r5   re   r6   rf   r7   rg   r8   rh   r9   re   r:   re   r;   re   r<   rg   r=   re   r>   ri   r@   rj   rA   rk   rB   rg   rC   rg   rD   rl   rE   rm   rF   rn   rG   ro   rH   rp   ra   zAsyncStream[Completion]).rI   rc   rJ   rd   r?   rr   r5   re   r6   rf   r7   rg   r8   rh   r9   re   r:   re   r;   re   r<   rg   r=   re   r>   ri   r@   rj   rA   rk   rB   rg   rC   rg   rD   rl   rE   rm   rF   rn   rG   ro   rH   rp   ra   $Completion | AsyncStream[Completion]).rI   rc   rJ   rd   r5   re   r6   rf   r7   rg   r8   rh   r9   re   r:   re   r;   re   r<   rg   r=   re   r>   ri   r?   rt   r@   rj   rA   rk   rB   rg   rC   rg   rD   rl   rE   rm   rF   rn   rG   ro   rH   rp   ra   r   ru   rM   r.   r+   r#   r#   A  sw   5 5 ; ;  )-&*486:)-+/"&37%)GK26LP'+.2(, )-$("&;D5X [X ^	X
 &X $X 2X 4X 'X )X  X 1X #X EX 0X  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
7X Xt  )-&*486:)-+/"&37%)GKLP'+.2(, )-$("&;D5X [X ^	X
 X &X $X 2X 4X 'X )X  X 1X #X EX  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
!7X Xt  )-&*486:)-+/"&37%)GKLP'+.2(, )-$("&;D5X [X ^	X
 X &X $X 2X 4X 'X )X  X 1X #X EX  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
.7X Xt GX&(EF )-&*486:)-+/"&37%)GKBFLP'+.2(, )-$("&;D5=
 [=
 ^	=

 &=
 $=
 2=
 4=
 '=
 )=
  =
 1=
 #=
 E=
 @=
  J!=
" %#=
$ ,%=
& &'=
( )=
. &/=
0 "1=
2  3=
4 95=
6 
.7=
 G=
r.   c                      \ rS rSrSS jrSrg)r(   ig  c                Z    Xl         [        R                  " UR                  5      U l        g N)_completionsr   to_raw_response_wrapperrO   r*   completionss     r+   __init__#CompletionsWithRawResponse.__init__h  s#    '&>>
r.   r   rO   Nr   r"   ra   Nonerv   rw   rx   ry   r   rz   rM   r.   r+   r(   r(   g      
r.   r(   c                      \ rS rSrSS jrSrg)r}   ip  c                Z    Xl         [        R                  " UR                  5      U l        g r   )r   r   async_to_raw_response_wrapperrO   r   s     r+   r   (AsyncCompletionsWithRawResponse.__init__q  s#    '&DD
r.   r   Nr   r#   ra   r   r   rM   r.   r+   r}   r}   p  r   r.   r}   c                      \ rS rSrSS jrSrg)r1   iy  c                D    Xl         [        UR                  5      U l        g r   )r   r   rO   r   s     r+   r   )CompletionsWithStreamingResponse.__init__z  s    '2
r.   r   Nr   r   rM   r.   r+   r1   r1   y  r   r.   r1   c                      \ rS rSrSS jrSrg)r   i  c                D    Xl         [        UR                  5      U l        g r   )r   r   rO   r   s     r+   r   .AsyncCompletionsWithStreamingResponse.__init__  s    '8
r.   r   Nr   r   rM   r.   r+   r   r     r   r.   r   )4
__future__r   typingr   r   r   r   typing_extensionsr   r	   httpx r   typesr   _typesr   r   r   r   r   r   r   r   _utilsr   r   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   r   _base_clientr   types.completionr    /types.chat.chat_completion_stream_options_paramr!   __all__r"   r#   r(   r}   r1   r   rM   r.   r+   <module>r      s    # 2 2 /   , Z Z Z J J % 9 X , * ^,
-c
/ c
Lc
' c
L
 

 

 

 
r.   