Class: Google::Cloud::Speech::V1::RecognitionConfig

Inherits:
Object
  • Object
show all
Defined in:
lib/google/cloud/speech/v1/doc/google/cloud/speech/v1/cloud_speech.rb

Overview

Provides information to the recognizer that specifies how to process the request.

Defined Under Namespace

Modules: AudioEncoding

Instance Attribute Summary collapse

Instance Attribute Details

#enable_word_time_offsetstrue, false

Returns Optional If +true+, the top result includes a list of words and the start and end time offsets (timestamps) for those words. If +false+, no word-level time offset information is returned. The default is +false+.

Returns:

  • (true, false)

    Optional If +true+, the top result includes a list of words and the start and end time offsets (timestamps) for those words. If +false+, no word-level time offset information is returned. The default is +false+.



131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# File 'lib/google/cloud/speech/v1/doc/google/cloud/speech/v1/cloud_speech.rb', line 131

class RecognitionConfig
  # Audio encoding of the data sent in the audio message. All encodings support
  # only 1 channel (mono) audio. Only +FLAC+ and +WAV+ include a header that
  # describes the bytes of audio that follow the header. The other encodings
  # are raw audio bytes with no header.
  #
  # For best results, the audio source should be captured and transmitted using
  # a lossless encoding (+FLAC+ or +LINEAR16+). Recognition accuracy may be
  # reduced if lossy codecs, which include the other codecs listed in
  # this section, are used to capture or transmit the audio, particularly if
  # background noise is present.
  module AudioEncoding
    # Not specified. Will return result {Google::Rpc::Code::INVALID_ARGUMENT}.
    ENCODING_UNSPECIFIED = 0

    # Uncompressed 16-bit signed little-endian samples (Linear PCM).
    LINEAR16 = 1

    # [+FLAC+](https://xiph.org/flac/documentation.html) (Free Lossless Audio
    # Codec) is the recommended encoding because it is
    # lossless--therefore recognition is not compromised--and
    # requires only about half the bandwidth of +LINEAR16+. +FLAC+ stream
    # encoding supports 16-bit and 24-bit samples, however, not all fields in
    # +STREAMINFO+ are supported.
    FLAC = 2

    # 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
    MULAW = 3

    # Adaptive Multi-Rate Narrowband codec. +sample_rate_hertz+ must be 8000.
    AMR = 4

    # Adaptive Multi-Rate Wideband codec. +sample_rate_hertz+ must be 16000.
    AMR_WB = 5

    # Opus encoded audio frames in Ogg container
    # ([OggOpus](https://wiki.xiph.org/OggOpus)).
    # +sample_rate_hertz+ must be 16000.
    OGG_OPUS = 6

    # Although the use of lossy encodings is not recommended, if a very low
    # bitrate encoding is required, +OGG_OPUS+ is highly preferred over
    # Speex encoding. The [Speex](https://speex.org/)  encoding supported by
    # Cloud Speech API has a header byte in each block, as in MIME type
    # +audio/x-speex-with-header-byte+.
    # It is a variant of the RTP Speex encoding defined in
    # [RFC 5574](https://tools.ietf.org/html/rfc5574).
    # The stream is a sequence of blocks, one block per RTP packet. Each block
    # starts with a byte containing the length of the block, in bytes, followed
    # by one or more frames of Speex data, padded to an integral number of
    # bytes (octets) as specified in RFC 5574. In other words, each RTP header
    # is replaced with a single byte containing the block length. Only Speex
    # wideband is supported. +sample_rate_hertz+ must be 16000.
    SPEEX_WITH_HEADER_BYTE = 7
  end
end

#encodingGoogle::Cloud::Speech::V1::RecognitionConfig::AudioEncoding

Returns Required Encoding of audio data sent in all +RecognitionAudio+ messages.

Returns:



131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# File 'lib/google/cloud/speech/v1/doc/google/cloud/speech/v1/cloud_speech.rb', line 131

class RecognitionConfig
  # Audio encoding of the data sent in the audio message. All encodings support
  # only 1 channel (mono) audio. Only +FLAC+ and +WAV+ include a header that
  # describes the bytes of audio that follow the header. The other encodings
  # are raw audio bytes with no header.
  #
  # For best results, the audio source should be captured and transmitted using
  # a lossless encoding (+FLAC+ or +LINEAR16+). Recognition accuracy may be
  # reduced if lossy codecs, which include the other codecs listed in
  # this section, are used to capture or transmit the audio, particularly if
  # background noise is present.
  module AudioEncoding
    # Not specified. Will return result {Google::Rpc::Code::INVALID_ARGUMENT}.
    ENCODING_UNSPECIFIED = 0

    # Uncompressed 16-bit signed little-endian samples (Linear PCM).
    LINEAR16 = 1

    # [+FLAC+](https://xiph.org/flac/documentation.html) (Free Lossless Audio
    # Codec) is the recommended encoding because it is
    # lossless--therefore recognition is not compromised--and
    # requires only about half the bandwidth of +LINEAR16+. +FLAC+ stream
    # encoding supports 16-bit and 24-bit samples, however, not all fields in
    # +STREAMINFO+ are supported.
    FLAC = 2

    # 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
    MULAW = 3

    # Adaptive Multi-Rate Narrowband codec. +sample_rate_hertz+ must be 8000.
    AMR = 4

    # Adaptive Multi-Rate Wideband codec. +sample_rate_hertz+ must be 16000.
    AMR_WB = 5

    # Opus encoded audio frames in Ogg container
    # ([OggOpus](https://wiki.xiph.org/OggOpus)).
    # +sample_rate_hertz+ must be 16000.
    OGG_OPUS = 6

    # Although the use of lossy encodings is not recommended, if a very low
    # bitrate encoding is required, +OGG_OPUS+ is highly preferred over
    # Speex encoding. The [Speex](https://speex.org/)  encoding supported by
    # Cloud Speech API has a header byte in each block, as in MIME type
    # +audio/x-speex-with-header-byte+.
    # It is a variant of the RTP Speex encoding defined in
    # [RFC 5574](https://tools.ietf.org/html/rfc5574).
    # The stream is a sequence of blocks, one block per RTP packet. Each block
    # starts with a byte containing the length of the block, in bytes, followed
    # by one or more frames of Speex data, padded to an integral number of
    # bytes (octets) as specified in RFC 5574. In other words, each RTP header
    # is replaced with a single byte containing the block length. Only Speex
    # wideband is supported. +sample_rate_hertz+ must be 16000.
    SPEEX_WITH_HEADER_BYTE = 7
  end
end

#language_codeString

Returns Required The language of the supplied audio as a BCP-47 language tag. Example: "en-US". See Language Support for a list of the currently supported language codes.

Returns:

  • (String)

    Required The language of the supplied audio as a BCP-47 language tag. Example: "en-US". See Language Support for a list of the currently supported language codes.



131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# File 'lib/google/cloud/speech/v1/doc/google/cloud/speech/v1/cloud_speech.rb', line 131

class RecognitionConfig
  # Audio encoding of the data sent in the audio message. All encodings support
  # only 1 channel (mono) audio. Only +FLAC+ and +WAV+ include a header that
  # describes the bytes of audio that follow the header. The other encodings
  # are raw audio bytes with no header.
  #
  # For best results, the audio source should be captured and transmitted using
  # a lossless encoding (+FLAC+ or +LINEAR16+). Recognition accuracy may be
  # reduced if lossy codecs, which include the other codecs listed in
  # this section, are used to capture or transmit the audio, particularly if
  # background noise is present.
  module AudioEncoding
    # Not specified. Will return result {Google::Rpc::Code::INVALID_ARGUMENT}.
    ENCODING_UNSPECIFIED = 0

    # Uncompressed 16-bit signed little-endian samples (Linear PCM).
    LINEAR16 = 1

    # [+FLAC+](https://xiph.org/flac/documentation.html) (Free Lossless Audio
    # Codec) is the recommended encoding because it is
    # lossless--therefore recognition is not compromised--and
    # requires only about half the bandwidth of +LINEAR16+. +FLAC+ stream
    # encoding supports 16-bit and 24-bit samples, however, not all fields in
    # +STREAMINFO+ are supported.
    FLAC = 2

    # 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
    MULAW = 3

    # Adaptive Multi-Rate Narrowband codec. +sample_rate_hertz+ must be 8000.
    AMR = 4

    # Adaptive Multi-Rate Wideband codec. +sample_rate_hertz+ must be 16000.
    AMR_WB = 5

    # Opus encoded audio frames in Ogg container
    # ([OggOpus](https://wiki.xiph.org/OggOpus)).
    # +sample_rate_hertz+ must be 16000.
    OGG_OPUS = 6

    # Although the use of lossy encodings is not recommended, if a very low
    # bitrate encoding is required, +OGG_OPUS+ is highly preferred over
    # Speex encoding. The [Speex](https://speex.org/)  encoding supported by
    # Cloud Speech API has a header byte in each block, as in MIME type
    # +audio/x-speex-with-header-byte+.
    # It is a variant of the RTP Speex encoding defined in
    # [RFC 5574](https://tools.ietf.org/html/rfc5574).
    # The stream is a sequence of blocks, one block per RTP packet. Each block
    # starts with a byte containing the length of the block, in bytes, followed
    # by one or more frames of Speex data, padded to an integral number of
    # bytes (octets) as specified in RFC 5574. In other words, each RTP header
    # is replaced with a single byte containing the block length. Only Speex
    # wideband is supported. +sample_rate_hertz+ must be 16000.
    SPEEX_WITH_HEADER_BYTE = 7
  end
end

#max_alternativesInteger

Returns Optional Maximum number of recognition hypotheses to be returned. Specifically, the maximum number of +SpeechRecognitionAlternative+ messages within each +SpeechRecognitionResult+. The server may return fewer than +max_alternatives+. Valid values are +0+-+30+. A value of +0+ or +1+ will return a maximum of one. If omitted, will return a maximum of one.

Returns:

  • (Integer)

    Optional Maximum number of recognition hypotheses to be returned. Specifically, the maximum number of +SpeechRecognitionAlternative+ messages within each +SpeechRecognitionResult+. The server may return fewer than +max_alternatives+. Valid values are +0+-+30+. A value of +0+ or +1+ will return a maximum of one. If omitted, will return a maximum of one.



131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# File 'lib/google/cloud/speech/v1/doc/google/cloud/speech/v1/cloud_speech.rb', line 131

class RecognitionConfig
  # Audio encoding of the data sent in the audio message. All encodings support
  # only 1 channel (mono) audio. Only +FLAC+ and +WAV+ include a header that
  # describes the bytes of audio that follow the header. The other encodings
  # are raw audio bytes with no header.
  #
  # For best results, the audio source should be captured and transmitted using
  # a lossless encoding (+FLAC+ or +LINEAR16+). Recognition accuracy may be
  # reduced if lossy codecs, which include the other codecs listed in
  # this section, are used to capture or transmit the audio, particularly if
  # background noise is present.
  module AudioEncoding
    # Not specified. Will return result {Google::Rpc::Code::INVALID_ARGUMENT}.
    ENCODING_UNSPECIFIED = 0

    # Uncompressed 16-bit signed little-endian samples (Linear PCM).
    LINEAR16 = 1

    # [+FLAC+](https://xiph.org/flac/documentation.html) (Free Lossless Audio
    # Codec) is the recommended encoding because it is
    # lossless--therefore recognition is not compromised--and
    # requires only about half the bandwidth of +LINEAR16+. +FLAC+ stream
    # encoding supports 16-bit and 24-bit samples, however, not all fields in
    # +STREAMINFO+ are supported.
    FLAC = 2

    # 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
    MULAW = 3

    # Adaptive Multi-Rate Narrowband codec. +sample_rate_hertz+ must be 8000.
    AMR = 4

    # Adaptive Multi-Rate Wideband codec. +sample_rate_hertz+ must be 16000.
    AMR_WB = 5

    # Opus encoded audio frames in Ogg container
    # ([OggOpus](https://wiki.xiph.org/OggOpus)).
    # +sample_rate_hertz+ must be 16000.
    OGG_OPUS = 6

    # Although the use of lossy encodings is not recommended, if a very low
    # bitrate encoding is required, +OGG_OPUS+ is highly preferred over
    # Speex encoding. The [Speex](https://speex.org/)  encoding supported by
    # Cloud Speech API has a header byte in each block, as in MIME type
    # +audio/x-speex-with-header-byte+.
    # It is a variant of the RTP Speex encoding defined in
    # [RFC 5574](https://tools.ietf.org/html/rfc5574).
    # The stream is a sequence of blocks, one block per RTP packet. Each block
    # starts with a byte containing the length of the block, in bytes, followed
    # by one or more frames of Speex data, padded to an integral number of
    # bytes (octets) as specified in RFC 5574. In other words, each RTP header
    # is replaced with a single byte containing the block length. Only Speex
    # wideband is supported. +sample_rate_hertz+ must be 16000.
    SPEEX_WITH_HEADER_BYTE = 7
  end
end

#profanity_filtertrue, false

Returns Optional If set to +true+, the server will attempt to filter out profanities, replacing all but the initial character in each filtered word with asterisks, e.g. "f***". If set to +false+ or omitted, profanities won't be filtered out.

Returns:

  • (true, false)

    Optional If set to +true+, the server will attempt to filter out profanities, replacing all but the initial character in each filtered word with asterisks, e.g. "f***". If set to +false+ or omitted, profanities won't be filtered out.



131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# File 'lib/google/cloud/speech/v1/doc/google/cloud/speech/v1/cloud_speech.rb', line 131

class RecognitionConfig
  # Audio encoding of the data sent in the audio message. All encodings support
  # only 1 channel (mono) audio. Only +FLAC+ and +WAV+ include a header that
  # describes the bytes of audio that follow the header. The other encodings
  # are raw audio bytes with no header.
  #
  # For best results, the audio source should be captured and transmitted using
  # a lossless encoding (+FLAC+ or +LINEAR16+). Recognition accuracy may be
  # reduced if lossy codecs, which include the other codecs listed in
  # this section, are used to capture or transmit the audio, particularly if
  # background noise is present.
  module AudioEncoding
    # Not specified. Will return result {Google::Rpc::Code::INVALID_ARGUMENT}.
    ENCODING_UNSPECIFIED = 0

    # Uncompressed 16-bit signed little-endian samples (Linear PCM).
    LINEAR16 = 1

    # [+FLAC+](https://xiph.org/flac/documentation.html) (Free Lossless Audio
    # Codec) is the recommended encoding because it is
    # lossless--therefore recognition is not compromised--and
    # requires only about half the bandwidth of +LINEAR16+. +FLAC+ stream
    # encoding supports 16-bit and 24-bit samples, however, not all fields in
    # +STREAMINFO+ are supported.
    FLAC = 2

    # 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
    MULAW = 3

    # Adaptive Multi-Rate Narrowband codec. +sample_rate_hertz+ must be 8000.
    AMR = 4

    # Adaptive Multi-Rate Wideband codec. +sample_rate_hertz+ must be 16000.
    AMR_WB = 5

    # Opus encoded audio frames in Ogg container
    # ([OggOpus](https://wiki.xiph.org/OggOpus)).
    # +sample_rate_hertz+ must be 16000.
    OGG_OPUS = 6

    # Although the use of lossy encodings is not recommended, if a very low
    # bitrate encoding is required, +OGG_OPUS+ is highly preferred over
    # Speex encoding. The [Speex](https://speex.org/)  encoding supported by
    # Cloud Speech API has a header byte in each block, as in MIME type
    # +audio/x-speex-with-header-byte+.
    # It is a variant of the RTP Speex encoding defined in
    # [RFC 5574](https://tools.ietf.org/html/rfc5574).
    # The stream is a sequence of blocks, one block per RTP packet. Each block
    # starts with a byte containing the length of the block, in bytes, followed
    # by one or more frames of Speex data, padded to an integral number of
    # bytes (octets) as specified in RFC 5574. In other words, each RTP header
    # is replaced with a single byte containing the block length. Only Speex
    # wideband is supported. +sample_rate_hertz+ must be 16000.
    SPEEX_WITH_HEADER_BYTE = 7
  end
end

#sample_rate_hertzInteger

Returns Required Sample rate in Hertz of the audio data sent in all +RecognitionAudio+ messages. Valid values are: 8000-48000. 16000 is optimal. For best results, set the sampling rate of the audio source to 16000 Hz. If that's not possible, use the native sample rate of the audio source (instead of re-sampling).

Returns:

  • (Integer)

    Required Sample rate in Hertz of the audio data sent in all +RecognitionAudio+ messages. Valid values are: 8000-48000. 16000 is optimal. For best results, set the sampling rate of the audio source to 16000 Hz. If that's not possible, use the native sample rate of the audio source (instead of re-sampling).



131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# File 'lib/google/cloud/speech/v1/doc/google/cloud/speech/v1/cloud_speech.rb', line 131

class RecognitionConfig
  # Audio encoding of the data sent in the audio message. All encodings support
  # only 1 channel (mono) audio. Only +FLAC+ and +WAV+ include a header that
  # describes the bytes of audio that follow the header. The other encodings
  # are raw audio bytes with no header.
  #
  # For best results, the audio source should be captured and transmitted using
  # a lossless encoding (+FLAC+ or +LINEAR16+). Recognition accuracy may be
  # reduced if lossy codecs, which include the other codecs listed in
  # this section, are used to capture or transmit the audio, particularly if
  # background noise is present.
  module AudioEncoding
    # Not specified. Will return result {Google::Rpc::Code::INVALID_ARGUMENT}.
    ENCODING_UNSPECIFIED = 0

    # Uncompressed 16-bit signed little-endian samples (Linear PCM).
    LINEAR16 = 1

    # [+FLAC+](https://xiph.org/flac/documentation.html) (Free Lossless Audio
    # Codec) is the recommended encoding because it is
    # lossless--therefore recognition is not compromised--and
    # requires only about half the bandwidth of +LINEAR16+. +FLAC+ stream
    # encoding supports 16-bit and 24-bit samples, however, not all fields in
    # +STREAMINFO+ are supported.
    FLAC = 2

    # 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
    MULAW = 3

    # Adaptive Multi-Rate Narrowband codec. +sample_rate_hertz+ must be 8000.
    AMR = 4

    # Adaptive Multi-Rate Wideband codec. +sample_rate_hertz+ must be 16000.
    AMR_WB = 5

    # Opus encoded audio frames in Ogg container
    # ([OggOpus](https://wiki.xiph.org/OggOpus)).
    # +sample_rate_hertz+ must be 16000.
    OGG_OPUS = 6

    # Although the use of lossy encodings is not recommended, if a very low
    # bitrate encoding is required, +OGG_OPUS+ is highly preferred over
    # Speex encoding. The [Speex](https://speex.org/)  encoding supported by
    # Cloud Speech API has a header byte in each block, as in MIME type
    # +audio/x-speex-with-header-byte+.
    # It is a variant of the RTP Speex encoding defined in
    # [RFC 5574](https://tools.ietf.org/html/rfc5574).
    # The stream is a sequence of blocks, one block per RTP packet. Each block
    # starts with a byte containing the length of the block, in bytes, followed
    # by one or more frames of Speex data, padded to an integral number of
    # bytes (octets) as specified in RFC 5574. In other words, each RTP header
    # is replaced with a single byte containing the block length. Only Speex
    # wideband is supported. +sample_rate_hertz+ must be 16000.
    SPEEX_WITH_HEADER_BYTE = 7
  end
end

#speech_contextsArray<Google::Cloud::Speech::V1::SpeechContext>

Returns Optional A means to provide context to assist the speech recognition.

Returns:



131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
# File 'lib/google/cloud/speech/v1/doc/google/cloud/speech/v1/cloud_speech.rb', line 131

class RecognitionConfig
  # Audio encoding of the data sent in the audio message. All encodings support
  # only 1 channel (mono) audio. Only +FLAC+ and +WAV+ include a header that
  # describes the bytes of audio that follow the header. The other encodings
  # are raw audio bytes with no header.
  #
  # For best results, the audio source should be captured and transmitted using
  # a lossless encoding (+FLAC+ or +LINEAR16+). Recognition accuracy may be
  # reduced if lossy codecs, which include the other codecs listed in
  # this section, are used to capture or transmit the audio, particularly if
  # background noise is present.
  module AudioEncoding
    # Not specified. Will return result {Google::Rpc::Code::INVALID_ARGUMENT}.
    ENCODING_UNSPECIFIED = 0

    # Uncompressed 16-bit signed little-endian samples (Linear PCM).
    LINEAR16 = 1

    # [+FLAC+](https://xiph.org/flac/documentation.html) (Free Lossless Audio
    # Codec) is the recommended encoding because it is
    # lossless--therefore recognition is not compromised--and
    # requires only about half the bandwidth of +LINEAR16+. +FLAC+ stream
    # encoding supports 16-bit and 24-bit samples, however, not all fields in
    # +STREAMINFO+ are supported.
    FLAC = 2

    # 8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
    MULAW = 3

    # Adaptive Multi-Rate Narrowband codec. +sample_rate_hertz+ must be 8000.
    AMR = 4

    # Adaptive Multi-Rate Wideband codec. +sample_rate_hertz+ must be 16000.
    AMR_WB = 5

    # Opus encoded audio frames in Ogg container
    # ([OggOpus](https://wiki.xiph.org/OggOpus)).
    # +sample_rate_hertz+ must be 16000.
    OGG_OPUS = 6

    # Although the use of lossy encodings is not recommended, if a very low
    # bitrate encoding is required, +OGG_OPUS+ is highly preferred over
    # Speex encoding. The [Speex](https://speex.org/)  encoding supported by
    # Cloud Speech API has a header byte in each block, as in MIME type
    # +audio/x-speex-with-header-byte+.
    # It is a variant of the RTP Speex encoding defined in
    # [RFC 5574](https://tools.ietf.org/html/rfc5574).
    # The stream is a sequence of blocks, one block per RTP packet. Each block
    # starts with a byte containing the length of the block, in bytes, followed
    # by one or more frames of Speex data, padded to an integral number of
    # bytes (octets) as specified in RFC 5574. In other words, each RTP header
    # is replaced with a single byte containing the block length. Only Speex
    # wideband is supported. +sample_rate_hertz+ must be 16000.
    SPEEX_WITH_HEADER_BYTE = 7
  end
end