o
    $i'                     @  s(  d dl mZ d dlmZ d dlmZ d dlZddlmZ ddl	m
Z
mZmZmZmZmZmZ ddlmZmZ dd	lmZ dd
lmZmZ ddlmZmZmZmZ ddlmZ ddl m!Z! ddl"m#Z# ddgZ$G dd deZ%G dd deZ&G dd dZ'G dd dZ(G dd dZ)G dd dZ*dS )    )annotations)Union)LiteralN   )_legacy_response)BodyOmitQueryHeadersNotGivenomit	not_given)maybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)StreamedBinaryAPIResponseAsyncStreamedBinaryAPIResponse#to_custom_streamed_response_wrapper)async_to_custom_streamed_response_wrapper)speech_create_params)make_request_options)SpeechModelSpeechAsyncSpeechc                	   @  F   e Zd Zed#ddZed$ddZeeeeddded	d%d!d"ZdS )&r   returnSpeechWithRawResponsec                 C     t | S a  
        This property can be used as a prefix for any HTTP method call to return
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
        )r   self r#   ]/var/www/html/flask_server/venv/lib/python3.10/site-packages/openai/resources/audio/speech.pywith_raw_response      zSpeech.with_raw_responseSpeechWithStreamingResponsec                 C  r   z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
        )r'   r!   r#   r#   r$   with_streaming_response'      zSpeech.with_streaming_responseNinstructionsresponse_formatspeedstream_formatextra_headersextra_query
extra_bodytimeoutinputstrmodelUnion[str, SpeechModel]voicelUnion[str, Literal['alloy', 'ash', 'ballad', 'coral', 'echo', 'sage', 'shimmer', 'verse', 'marin', 'cedar']]r,   
str | Omitr-   :Literal['mp3', 'opus', 'aac', 'flac', 'wav', 'pcm'] | Omitr.   float | Omitr/   Literal['sse', 'audio'] | Omitr0   Headers | Noner1   Query | Noner2   Body | Noner3   'float | httpx.Timeout | None | NotGiven+_legacy_response.HttpxBinaryResponseContentc                C  sH   ddi|pi }| j dt|||||||dtjt||	|
|dtjdS )  
        Generates audio from the input text.

        Args:
          input: The text to generate audio for. The maximum length is 4096 characters.

          model:
              One of the available [TTS models](https://platform.openai.com/docs/models#tts):
              `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.

          voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
              `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
              `verse`. Previews of the voices are available in the
              [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).

          instructions: Control the voice of your generated audio with additional instructions. Does not
              work with `tts-1` or `tts-1-hd`.

          response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
              `wav`, and `pcm`.

          speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
              the default.

          stream_format: The format to stream the audio in. Supported formats are `sse` and `audio`.
              `sse` is not supported for `tts-1` or `tts-1-hd`.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Acceptapplication/octet-stream/audio/speechr4   r6   r8   r,   r-   r.   r/   r0   r1   r2   r3   bodyoptionscast_to)_postr   r   SpeechCreateParamsr   r   HttpxBinaryResponseContentr"   r4   r6   r8   r,   r-   r.   r/   r0   r1   r2   r3   r#   r#   r$   create0   s&   6	zSpeech.create)r   r   )r   r'   r4   r5   r6   r7   r8   r9   r,   r:   r-   r;   r.   r<   r/   r=   r0   r>   r1   r?   r2   r@   r3   rA   r   rB   	__name__
__module____qualname__r   r%   r)   r   r   rQ   r#   r#   r#   r$   r          	c                	   @  r   )&r   r   AsyncSpeechWithRawResponsec                 C  r   r    )rX   r!   r#   r#   r$   r%   }   r&   zAsyncSpeech.with_raw_response AsyncSpeechWithStreamingResponsec                 C  r   r(   )rY   r!   r#   r#   r$   r)      r*   z#AsyncSpeech.with_streaming_responseNr+   r4   r5   r6   r7   r8   r9   r,   r:   r-   r;   r.   r<   r/   r=   r0   r>   r1   r?   r2   r@   r3   rA   rB   c                  sV   ddi|pi }| j dt|||||||dtjI dH t||	|
|dtjdI dH S )rC   rD   rE   rF   rG   NrH   rI   )rM   r   r   rN   r   r   rO   rP   r#   r#   r$   rQ      s(   6	zAsyncSpeech.create)r   rX   )r   rY   rR   rS   r#   r#   r#   r$   r   |   rW   c                   @     e Zd ZdddZdS )	r   speechr   r   Nonec                 C     || _ t|j| _d S N)_speechr   to_raw_response_wrapperrQ   r"   r[   r#   r#   r$   __init__      
zSpeechWithRawResponse.__init__Nr[   r   r   r\   rT   rU   rV   rb   r#   r#   r#   r$   r          r   c                   @  rZ   )	rX   r[   r   r   r\   c                 C  r]   r^   )r_   r   async_to_raw_response_wrapperrQ   ra   r#   r#   r$   rb      rc   z#AsyncSpeechWithRawResponse.__init__Nr[   r   r   r\   re   r#   r#   r#   r$   rX      rf   rX   c                   @  rZ   )	r'   r[   r   r   r\   c                 C     || _ t|jt| _d S r^   )r_   r   rQ   r   ra   r#   r#   r$   rb      
   
z$SpeechWithStreamingResponse.__init__Nrd   re   r#   r#   r#   r$   r'      rf   r'   c                   @  rZ   )	rY   r[   r   r   r\   c                 C  ri   r^   )r_   r   rQ   r   ra   r#   r#   r$   rb      rj   z)AsyncSpeechWithStreamingResponse.__init__Nrh   re   r#   r#   r#   r$   rY      rf   rY   )+
__future__r   typingr   typing_extensionsr   httpx r   _typesr   r   r	   r
   r   r   r   _utilsr   r   _compatr   	_resourcer   r   	_responser   r   r   r   types.audior   _base_clientr   types.audio.speech_modelr   __all__r   r   r   rX   r'   rY   r#   r#   r#   r$   <module>   s(   $``		
