o
    $iA                     @  sT  d dl mZ d dlmZmZmZmZ d dlmZm	Z	 d dl
Z
ddlmZ ddlmZ ddlmZmZmZmZmZmZmZmZ dd	lmZmZmZ dd
lmZ ddlmZm Z  ddl!m"Z"m#Z# ddl$m%Z%m&Z& ddl'm(Z( ddl)m*Z* ddl+m,Z, ddgZ-G dd deZ.G dd de Z/G dd dZ0G dd dZ1G dd dZ2G dd dZ3dS )    )annotations)DictUnionIterableOptional)LiteraloverloadN   )_legacy_response)completion_create_params)BodyOmitQueryHeadersNotGivenSequenceNotStromit	not_given)required_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)StreamAsyncStream)make_request_options)
Completion) ChatCompletionStreamOptionsParamCompletionsAsyncCompletionsc                   @    e Zd Zed<ddZed=ddZeeeeeeeeeeeeeeeeeddded	d>d0d1Z	eeeeeeeeeeeeeeeeddded2d?d5d1Z	eeeeeeeeeeeeeeeeddded2d@d8d1Z	e
d
dgg d9eeeeeeeeeeeeeeeeddded	dAd;d1Z	dS )Br!   returnCompletionsWithRawResponsec                 C     t | S a  
        This property can be used as a prefix for any HTTP method call to return
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
        )r%   self r*   \/var/www/html/flask_server/venv/lib/python3.10/site-packages/openai/resources/completions.pywith_raw_response      zCompletions.with_raw_response CompletionsWithStreamingResponsec                 C  r&   z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
        )r.   r(   r*   r*   r+   with_streaming_response&      z#Completions.with_streaming_responseNbest_ofechofrequency_penalty
logit_biaslogprobs
max_tokensnpresence_penaltyseedstopstreamstream_optionssuffixtemperaturetop_puserextra_headersextra_query
extra_bodytimeoutmodelKUnion[str, Literal['gpt-3.5-turbo-instruct', 'davinci-002', 'babbage-002']]promptMUnion[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None]r3   Optional[int] | Omitr4   Optional[bool] | Omitr5   Optional[float] | Omitr6   Optional[Dict[str, int]] | Omitr7   r8   r9   r:   r;   r<   6Union[Optional[str], SequenceNotStr[str], None] | Omitr=   Optional[Literal[False]] | Omitr>   1Optional[ChatCompletionStreamOptionsParam] | Omitr?   Optional[str] | Omitr@   rA   rB   
str | OmitrC   Headers | NonerD   Query | NonerE   Body | NonerF   'float | httpx.Timeout | None | NotGivenr   c                C     dS u3  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models) for descriptions of
              them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
              Mathematically, the bias is added to the logits generated by the model prior to
              sampling. The exact effect will vary per model, but values between -1 and 1
              should decrease or increase likelihood of selection; values like -100 or 100
              should result in a ban or exclusive selection of the relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
              list of the 5 most likely tokens. The API will always return the `logprob` of
              the sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
              completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Not supported with latest reasoning models `o3` and `o4-mini`.

              Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          stream_options: Options for streaming response. Only set this when you set `stream: true`.

          suffix: The suffix that comes after a completion of inserted text.

              This parameter is only supported for `gpt-3.5-turbo-instruct`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr*   r)   rG   rI   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   r*   r*   r+   create/       zCompletions.creater3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r>   r?   r@   rA   rB   rC   rD   rE   rF   Literal[True]Stream[Completion]c                C  rX   u3  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models) for descriptions of
              them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
              Mathematically, the bias is added to the logits generated by the model prior to
              sampling. The exact effect will vary per model, but values between -1 and 1
              should decrease or increase likelihood of selection; values like -100 or 100
              should result in a ban or exclusive selection of the relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
              list of the 5 most likely tokens. The API will always return the `logprob` of
              the sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
              completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Not supported with latest reasoning models `o3` and `o4-mini`.

              Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          stream_options: Options for streaming response. Only set this when you set `stream: true`.

          suffix: The suffix that comes after a completion of inserted text.

              This parameter is only supported for `gpt-3.5-turbo-instruct`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr*   r)   rG   rI   r=   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r>   r?   r@   rA   rB   rC   rD   rE   rF   r*   r*   r+   r[      r\   boolCompletion | Stream[Completion]c                C  rX   r`   r*   ra   r*   r*   r+   r[   e  r\   rG   rI   r=   /Optional[Literal[False]] | Literal[True] | Omitc             	   C  s   | j dti d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d|i|rAtjntjt||||dt|pOdtt dS Nz/completionsrG   rI   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   )rC   rD   rE   rF   F)bodyoptionscast_tor=   
stream_cls)_postr   r   CompletionCreateParamsStreaming"CompletionCreateParamsNonStreamingr   r   r   rZ   r*   r*   r+   r[      sf   	
)r$   r%   )r$   r.   .rG   rH   rI   rJ   r3   rK   r4   rL   r5   rM   r6   rN   r7   rK   r8   rK   r9   rK   r:   rM   r;   rK   r<   rO   r=   rP   r>   rQ   r?   rR   r@   rM   rA   rM   rB   rS   rC   rT   rD   rU   rE   rV   rF   rW   r$   r   ).rG   rH   rI   rJ   r=   r^   r3   rK   r4   rL   r5   rM   r6   rN   r7   rK   r8   rK   r9   rK   r:   rM   r;   rK   r<   rO   r>   rQ   r?   rR   r@   rM   rA   rM   rB   rS   rC   rT   rD   rU   rE   rV   rF   rW   r$   r_   ).rG   rH   rI   rJ   r=   rb   r3   rK   r4   rL   r5   rM   r6   rN   r7   rK   r8   rK   r9   rK   r:   rM   r;   rK   r<   rO   r>   rQ   r?   rR   r@   rM   rA   rM   rB   rS   rC   rT   rD   rU   rE   rV   rF   rW   r$   rc   ).rG   rH   rI   rJ   r3   rK   r4   rL   r5   rM   r6   rN   r7   rK   r8   rK   r9   rK   r:   rM   r;   rK   r<   rO   r=   re   r>   rQ   r?   rR   r@   rM   rA   rM   rB   rS   rC   rT   rD   rU   rE   rV   rF   rW   r$   rc   __name__
__module____qualname__r   r,   r0   r   r   r   r[   r   r*   r*   r*   r+   r!          	   c                   @  r#   )Br"   r$   AsyncCompletionsWithRawResponsec                 C  r&   r'   )rt   r(   r*   r*   r+   r,   B  r-   z"AsyncCompletions.with_raw_response%AsyncCompletionsWithStreamingResponsec                 C  r&   r/   )ru   r(   r*   r*   r+   r0   L  r1   z(AsyncCompletions.with_streaming_responseNr2   rG   rH   rI   rJ   r3   rK   r4   rL   r5   rM   r6   rN   r7   r8   r9   r:   r;   r<   rO   r=   rP   r>   rQ   r?   rR   r@   rA   rB   rS   rC   rT   rD   rU   rE   rV   rF   rW   r   c                     dS rY   r*   rZ   r*   r*   r+   r[   U      zAsyncCompletions.creater]   r^   AsyncStream[Completion]c                  rv   r`   r*   ra   r*   r*   r+   r[     rw   rb   $Completion | AsyncStream[Completion]c                  rv   r`   r*   ra   r*   r*   r+   r[     rw   rd   re   c             	     s   | j dti d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d|i|rBtjntjI d H t||||dt|pSdtt dI d H S rf   )rk   r   r   rl   rm   r   r   r   rZ   r*   r*   r+   r[   &  sh   	
)r$   rt   )r$   ru   rn   ).rG   rH   rI   rJ   r=   r^   r3   rK   r4   rL   r5   rM   r6   rN   r7   rK   r8   rK   r9   rK   r:   rM   r;   rK   r<   rO   r>   rQ   r?   rR   r@   rM   rA   rM   rB   rS   rC   rT   rD   rU   rE   rV   rF   rW   r$   rx   ).rG   rH   rI   rJ   r=   rb   r3   rK   r4   rL   r5   rM   r6   rN   r7   rK   r8   rK   r9   rK   r:   rM   r;   rK   r<   rO   r>   rQ   r?   rR   r@   rM   rA   rM   rB   rS   rC   rT   rD   rU   rE   rV   rF   rW   r$   ry   ).rG   rH   rI   rJ   r3   rK   r4   rL   r5   rM   r6   rN   r7   rK   r8   rK   r9   rK   r:   rM   r;   rK   r<   rO   r=   re   r>   rQ   r?   rR   r@   rM   rA   rM   rB   rS   rC   rT   rD   rU   rE   rV   rF   rW   r$   ry   ro   r*   r*   r*   r+   r"   A  rs   c                   @     e Zd ZdddZdS )	r%   completionsr!   r$   Nonec                 C     || _ t|j| _d S N)_completionsr
   to_raw_response_wrapperr[   r)   r{   r*   r*   r+   __init__h     
z#CompletionsWithRawResponse.__init__Nr{   r!   r$   r|   rp   rq   rr   r   r*   r*   r*   r+   r%   g      r%   c                   @  rz   )	rt   r{   r"   r$   r|   c                 C  r}   r~   )r   r
   async_to_raw_response_wrapperr[   r   r*   r*   r+   r   q  r   z(AsyncCompletionsWithRawResponse.__init__Nr{   r"   r$   r|   r   r*   r*   r*   r+   rt   p  r   rt   c                   @  rz   )	r.   r{   r!   r$   r|   c                 C     || _ t|j| _d S r~   )r   r   r[   r   r*   r*   r+   r   z     
z)CompletionsWithStreamingResponse.__init__Nr   r   r*   r*   r*   r+   r.   y  r   r.   c                   @  rz   )	ru   r{   r"   r$   r|   c                 C  r   r~   )r   r   r[   r   r*   r*   r+   r     r   z.AsyncCompletionsWithStreamingResponse.__init__Nr   r   r*   r*   r*   r+   ru     r   ru   )4
__future__r   typingr   r   r   r   typing_extensionsr   r   httpx r
   typesr   _typesr   r   r   r   r   r   r   r   _utilsr   r   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   r   _base_clientr   types.completionr   /types.chat.chat_completion_stream_options_paramr    __all__r!   r"   r%   rt   r.   ru   r*   r*   r*   r+   <module>   s<   (    *    *			