Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 37ccb4b5 authored by Narayan Kamath's avatar Narayan Kamath Committed by Android (Google) Code Review
Browse files

Merge "Remove the completeAudioAvailable API."

parents 1af6d27b c3da8818
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -18024,7 +18024,6 @@ package android.speech.tts {
  public abstract interface SynthesisCallback {
    method public abstract int audioAvailable(byte[], int, int);
    method public abstract int completeAudioAvailable(int, int, int, byte[], int, int);
    method public abstract int done();
    method public abstract void error();
    method public abstract int getMaxBufferSize();
+3 −53
Original line number Diff line number Diff line
@@ -31,8 +31,7 @@ class AudioPlaybackHandler {

    private static final int SYNTHESIS_START = 1;
    private static final int SYNTHESIS_DATA_AVAILABLE = 2;
    private static final int SYNTHESIS_COMPLETE_DATA_AVAILABLE = 3;
    private static final int SYNTHESIS_DONE = 4;
    private static final int SYNTHESIS_DONE = 3;

    private static final int PLAY_AUDIO = 5;
    private static final int PLAY_SILENCE = 6;
@@ -120,10 +119,6 @@ class AudioPlaybackHandler {
        mQueue.add(new ListEntry(SYNTHESIS_DATA_AVAILABLE, token));
    }

    void enqueueSynthesisCompleteDataAvailable(SynthesisMessageParams token) {
        mQueue.add(new ListEntry(SYNTHESIS_COMPLETE_DATA_AVAILABLE, token));
    }

    void enqueueSynthesisDone(SynthesisMessageParams token) {
        mQueue.add(new ListEntry(SYNTHESIS_DONE, token));
    }
@@ -280,8 +275,6 @@ class AudioPlaybackHandler {
            handleSynthesisDataAvailable(msg);
        } else if (entry.mWhat == SYNTHESIS_DONE) {
            handleSynthesisDone(msg);
        } else if (entry.mWhat == SYNTHESIS_COMPLETE_DATA_AVAILABLE) {
            handleSynthesisCompleteDataAvailable(msg);
        } else if (entry.mWhat == PLAY_AUDIO) {
            handleAudio(msg);
        } else if (entry.mWhat == PLAY_SILENCE) {
@@ -424,54 +417,11 @@ class AudioPlaybackHandler {
            return;
        }

        final AudioTrack track = params.mAudioTrack;
        final AudioTrack audioTrack = params.mAudioTrack;
        final int bytesPerFrame = getBytesPerFrame(params.mAudioFormat);
        final int lengthInBytes = params.mBytesWritten;
        final int lengthInFrames = lengthInBytes / bytesPerFrame;

        blockUntilDone(track, bytesPerFrame, lengthInBytes);
    }

    private void handleSynthesisCompleteDataAvailable(MessageParams msg) {
        final SynthesisMessageParams params = (SynthesisMessageParams) msg;
        if (DBG) Log.d(TAG, "completeAudioAvailable(" + params + ")");

        params.mLogger.onPlaybackStart();

        // Channel config and bytes per frame are checked before
        // this message is sent.
        int channelConfig = AudioPlaybackHandler.getChannelConfig(params.mChannelCount);
        int bytesPerFrame = AudioPlaybackHandler.getBytesPerFrame(params.mAudioFormat);

        SynthesisMessageParams.ListEntry entry = params.getNextBuffer();

        if (entry == null) {
            Log.w(TAG, "completeDataAvailable : No buffers available to play.");
            return;
        }

        final AudioTrack audioTrack = new AudioTrack(params.mStreamType, params.mSampleRateInHz,
                channelConfig, params.mAudioFormat, entry.mLength, AudioTrack.MODE_STATIC);

        // So that handleDone can access this correctly.
        params.mAudioTrack = audioTrack;

        try {
            audioTrack.write(entry.mBytes, entry.mOffset, entry.mLength);
            setupVolume(audioTrack, params.mVolume, params.mPan);
            audioTrack.play();
            blockUntilDone(audioTrack, bytesPerFrame, entry.mLength);
            if (DBG) Log.d(TAG, "Wrote data to audio track successfully : " + entry.mLength);
        } catch (IllegalStateException ex) {
            Log.e(TAG, "Playback error", ex);
        } finally {
            handleSynthesisDone(msg);
        }
    }


    private static void blockUntilDone(AudioTrack audioTrack, int bytesPerFrame,
            int lengthInBytes) {
        int lengthInFrames = lengthInBytes / bytesPerFrame;
        int currentPosition = 0;
        while ((currentPosition = audioTrack.getPlaybackHeadPosition()) < lengthInFrames) {
            if (audioTrack.getPlayState() != AudioTrack.PLAYSTATE_PLAYING) {
+0 −31
Original line number Diff line number Diff line
@@ -187,37 +187,6 @@ class FileSynthesisCallback extends AbstractSynthesisCallback {
        }
    }

    @Override
    public int completeAudioAvailable(int sampleRateInHz, int audioFormat, int channelCount,
            byte[] buffer, int offset, int length) {
        synchronized (mStateLock) {
            if (mStopped) {
                if (DBG) Log.d(TAG, "Request has been aborted.");
                return TextToSpeech.ERROR;
            }
        }
        FileOutputStream out = null;
        try {
            out = new FileOutputStream(mFileName);
            out.write(makeWavHeader(sampleRateInHz, audioFormat, channelCount, length));
            out.write(buffer, offset, length);
            mDone = true;
            return TextToSpeech.SUCCESS;
        } catch (IOException ex) {
            Log.e(TAG, "Failed to write to " + mFileName + ": " + ex);
            mFileName.delete();
            return TextToSpeech.ERROR;
        } finally {
            try {
                if (out != null) {
                    out.close();
                }
            } catch (IOException ex) {
                Log.e(TAG, "Failed to close " + mFileName + ": " + ex);
            }
        }
    }

    private byte[] makeWavHeader(int sampleRateInHz, int audioFormat, int channelCount,
            int dataLength) {
        // TODO: is AudioFormat.ENCODING_DEFAULT always the same as ENCODING_PCM_16BIT?
+1 −33
Original line number Diff line number Diff line
@@ -53,8 +53,7 @@ class PlaybackSynthesisCallback extends AbstractSynthesisCallback {

    // Handler associated with a thread that plays back audio requests.
    private final AudioPlaybackHandler mAudioTrackHandler;
    // A request "token", which will be non null after start() or
    // completeAudioAvailable() have been called.
    // A request "token", which will be non null after start() has been called.
    private SynthesisMessageParams mToken = null;
    // Whether this request has been stopped. This is useful for keeping
    // track whether stop() has been called before start(). In all other cases,
@@ -206,35 +205,4 @@ class PlaybackSynthesisCallback extends AbstractSynthesisCallback {
        stop();
    }

    @Override
    public int completeAudioAvailable(int sampleRateInHz, int audioFormat, int channelCount,
            byte[] buffer, int offset, int length) {
        int channelConfig = AudioPlaybackHandler.getChannelConfig(channelCount);
        if (channelConfig == 0) {
            Log.e(TAG, "Unsupported number of channels :" + channelCount);
            return TextToSpeech.ERROR;
        }

        int bytesPerFrame = AudioPlaybackHandler.getBytesPerFrame(audioFormat);
        if (bytesPerFrame < 0) {
            Log.e(TAG, "Unsupported audio format :" + audioFormat);
            return TextToSpeech.ERROR;
        }

        synchronized (mStateLock) {
            if (mStopped) {
                return TextToSpeech.ERROR;
            }
            SynthesisMessageParams params = new SynthesisMessageParams(
                    mStreamType, sampleRateInHz, audioFormat, channelCount, mVolume, mPan,
                    mDispatcher, mCallingApp, mLogger);
            params.addBuffer(buffer, offset, length);

            mAudioTrackHandler.enqueueSynthesisCompleteDataAvailable(params);
            mToken = params;
        }

        return TextToSpeech.SUCCESS;
    }

}
+4 −24
Original line number Diff line number Diff line
@@ -22,19 +22,16 @@ package android.speech.tts;
 * {@link #start}, then {@link #audioAvailable} until all audio has been provided, then finally
 * {@link #done}.
 *
 * Alternatively, the engine can provide all the audio at once, by using
 * {@link #completeAudioAvailable}.
 *
 * {@link #error} can be called at any stage in the synthesis process to
 * indicate that an error has occured, but if the call is made after a call
 * to {@link #done} or {@link #completeAudioAvailable} it might be discarded.
 * indicate that an error has occurred, but if the call is made after a call
 * to {@link #done}, it might be discarded.
 */
public interface SynthesisCallback {
    /**
     * @return the maximum number of bytes that the TTS engine can pass in a single call of
     *         {@link #audioAvailable}. This does not apply to {@link #completeAudioAvailable}.
     *         Calls to {@link #audioAvailable} with data lengths larger than this
     *         value will not succeed.
     *         {@link #audioAvailable}. Calls to {@link #audioAvailable} with data lengths
     *         larger than this value will not succeed.
     */
    public int getMaxBufferSize();

@@ -68,23 +65,6 @@ public interface SynthesisCallback {
     */
    public int audioAvailable(byte[] buffer, int offset, int length);

    /**
     * The service can call this method instead of using {@link #start}, {@link #audioAvailable}
     * and {@link #done} if all the audio data is available in a single buffer.
     *
     * @param sampleRateInHz Sample rate in HZ of the generated audio.
     * @param audioFormat Audio format of the generated audio. Must be one of
     *         the ENCODING_ constants defined in {@link android.media.AudioFormat}.
     * @param channelCount The number of channels. Must be {@code 1} or {@code 2}.
     * @param buffer The generated audio data. This method will not hold on to {@code buffer},
     *         so the caller is free to modify it after this method returns.
     * @param offset The offset into {@code buffer} where the audio data starts.
     * @param length The number of bytes of audio data in {@code buffer}.
     * @return {@link TextToSpeech#SUCCESS} or {@link TextToSpeech#ERROR}.
     */
    public int completeAudioAvailable(int sampleRateInHz, int audioFormat,
            int channelCount, byte[] buffer, int offset, int length);

    /**
     * The service should call this method when all the synthesized audio for a request has
     * been passed to {@link #audioAvailable}.