Support for Signal calls.

Merge in RedPhone

// FREEBIE
This commit is contained in:
Moxie Marlinspike
2015-09-09 13:54:29 -07:00
parent 3d4ae60d81
commit d83a3d71bc
2585 changed files with 803492 additions and 45 deletions

View File

@@ -0,0 +1,5 @@
# These are for the common case of adding or renaming files. If you're doing
# structural changes, please get a review from a reviewer in this file.
per-file *.gyp=*
per-file *.gypi=*

View File

@@ -0,0 +1,295 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_
#include "webrtc/typedefs.h"
#ifdef __cplusplus
extern "C" {
#endif
// Opaque wrapper types for the codec state.
typedef struct WebRtcOpusEncInst OpusEncInst;
typedef struct WebRtcOpusDecInst OpusDecInst;
int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst, int32_t channels);
int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst);
/****************************************************************************
* WebRtcOpus_Encode(...)
*
* This function encodes audio as a series of Opus frames and inserts
* it into a packet. Input buffer can be any length.
*
* Input:
* - inst : Encoder context
* - audio_in : Input speech data buffer
* - samples : Samples per channel in audio_in
* - length_encoded_buffer : Output buffer size
*
* Output:
* - encoded : Output compressed data buffer
*
* Return value : >0 - Length (in bytes) of coded data
* -1 - Error
*/
int16_t WebRtcOpus_Encode(OpusEncInst* inst, int16_t* audio_in, int16_t samples,
int16_t length_encoded_buffer, uint8_t* encoded);
/****************************************************************************
* WebRtcOpus_SetBitRate(...)
*
* This function adjusts the target bitrate of the encoder.
*
* Input:
* - inst : Encoder context
* - rate : New target bitrate
*
* Return value : 0 - Success
* -1 - Error
*/
int16_t WebRtcOpus_SetBitRate(OpusEncInst* inst, int32_t rate);
/****************************************************************************
* WebRtcOpus_SetPacketLossRate(...)
*
* This function configures the encoder's expected packet loss percentage.
*
* Input:
* - inst : Encoder context
* - loss_rate : loss percentage in the range 0-100, inclusive.
* Return value : 0 - Success
* -1 - Error
*/
int16_t WebRtcOpus_SetPacketLossRate(OpusEncInst* inst, int32_t loss_rate);
/****************************************************************************
* WebRtcOpus_SetMaxBandwidth(...)
*
* Configures the maximum bandwidth for encoding. This can be taken as a hint
* about the maximum output bandwidth that the receiver is capable to render,
* due to hardware limitations. Sending signals with higher audio bandwidth
* results in higher than necessary network usage and encoding complexity.
*
* Input:
* - inst : Encoder context
* - bandwidth : Maximum encoding bandwidth in Hz.
* This parameter can take any value, but values
* other than Opus typical bandwidths: 4000, 6000,
* 8000, 12000, and 20000 will be rounded up (values
* greater than 20000 will be rounded down) to
* these values.
* Return value : 0 - Success
* -1 - Error
*/
int16_t WebRtcOpus_SetMaxBandwidth(OpusEncInst* inst, int32_t bandwidth);
/* TODO(minyue): Check whether an API to check the FEC and the packet loss rate
* is needed. It might not be very useful since there are not many use cases and
* the caller can always maintain the states. */
/****************************************************************************
* WebRtcOpus_EnableFec()
*
* This function enables FEC for encoding.
*
* Input:
* - inst : Encoder context
*
* Return value : 0 - Success
* -1 - Error
*/
int16_t WebRtcOpus_EnableFec(OpusEncInst* inst);
/****************************************************************************
* WebRtcOpus_DisableFec()
*
* This function disables FEC for encoding.
*
* Input:
* - inst : Encoder context
*
* Return value : 0 - Success
* -1 - Error
*/
int16_t WebRtcOpus_DisableFec(OpusEncInst* inst);
/*
* WebRtcOpus_SetComplexity(...)
*
* This function adjusts the computational complexity. The effect is the same as
* calling the complexity setting of Opus as an Opus encoder related CTL.
*
* Input:
* - inst : Encoder context
* - complexity : New target complexity (0-10, inclusive)
*
* Return value : 0 - Success
* -1 - Error
*/
int16_t WebRtcOpus_SetComplexity(OpusEncInst* inst, int32_t complexity);
int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels);
int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst);
/****************************************************************************
* WebRtcOpus_DecoderChannels(...)
*
* This function returns the number of channels created for Opus decoder.
*/
int WebRtcOpus_DecoderChannels(OpusDecInst* inst);
/****************************************************************************
* WebRtcOpus_DecoderInit(...)
*
* This function resets state of the decoder.
*
* Input:
* - inst : Decoder context
*
* Return value : 0 - Success
* -1 - Error
*/
int16_t WebRtcOpus_DecoderInitNew(OpusDecInst* inst);
int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst);
int16_t WebRtcOpus_DecoderInitSlave(OpusDecInst* inst);
/****************************************************************************
* WebRtcOpus_Decode(...)
*
* This function decodes an Opus packet into one or more audio frames at the
* ACM interface's sampling rate (32 kHz).
*
* Input:
* - inst : Decoder context
* - encoded : Encoded data
* - encoded_bytes : Bytes in encoded vector
*
* Output:
* - decoded : The decoded vector
* - audio_type : 1 normal, 2 CNG (for Opus it should
* always return 1 since we're not using Opus's
* built-in DTX/CNG scheme)
*
* Return value : >0 - Samples per channel in decoded vector
* -1 - Error
*/
int16_t WebRtcOpus_DecodeNew(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
int16_t WebRtcOpus_Decode(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
/****************************************************************************
* WebRtcOpus_DecodePlc(...)
* TODO(tlegrand): Remove master and slave functions when NetEq4 is in place.
* WebRtcOpus_DecodePlcMaster(...)
* WebRtcOpus_DecodePlcSlave(...)
*
* This function processes PLC for opus frame(s).
* Input:
* - inst : Decoder context
* - number_of_lost_frames : Number of PLC frames to produce
*
* Output:
* - decoded : The decoded vector
*
* Return value : >0 - number of samples in decoded PLC vector
* -1 - Error
*/
int16_t WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames);
int16_t WebRtcOpus_DecodePlcMaster(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames);
int16_t WebRtcOpus_DecodePlcSlave(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames);
/****************************************************************************
* WebRtcOpus_DecodeFec(...)
*
* This function decodes the FEC data from an Opus packet into one or more audio
* frames at the ACM interface's sampling rate (32 kHz).
*
* Input:
* - inst : Decoder context
* - encoded : Encoded data
* - encoded_bytes : Bytes in encoded vector
*
* Output:
* - decoded : The decoded vector (previous frame)
*
* Return value : >0 - Samples per channel in decoded vector
* 0 - No FEC data in the packet
* -1 - Error
*/
int16_t WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type);
/****************************************************************************
* WebRtcOpus_DurationEst(...)
*
* This function calculates the duration of an opus packet.
* Input:
* - inst : Decoder context
* - payload : Encoded data pointer
* - payload_length_bytes : Bytes of encoded data
*
* Return value : The duration of the packet, in samples.
*/
int WebRtcOpus_DurationEst(OpusDecInst* inst,
const uint8_t* payload,
int payload_length_bytes);
/* TODO(minyue): Check whether it is needed to add a decoder context to the
* arguments, like WebRtcOpus_DurationEst(...). In fact, the packet itself tells
* the duration. The decoder context in WebRtcOpus_DurationEst(...) is not used.
* So it may be advisable to remove it from WebRtcOpus_DurationEst(...). */
/****************************************************************************
* WebRtcOpus_FecDurationEst(...)
*
* This function calculates the duration of the FEC data within an opus packet.
* Input:
* - payload : Encoded data pointer
* - payload_length_bytes : Bytes of encoded data
*
* Return value : >0 - The duration of the FEC data in the
* packet in samples.
* 0 - No FEC data in the packet.
*/
int WebRtcOpus_FecDurationEst(const uint8_t* payload,
int payload_length_bytes);
/****************************************************************************
* WebRtcOpus_PacketHasFec(...)
*
* This function detects if an opus packet has FEC.
* Input:
* - payload : Encoded data pointer
* - payload_length_bytes : Bytes of encoded data
*
* Return value : 0 - the packet does NOT contain FEC.
* 1 - the packet contains FEC.
*/
int WebRtcOpus_PacketHasFec(const uint8_t* payload,
int payload_length_bytes);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_

View File

@@ -0,0 +1,58 @@
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'targets': [
{
'target_name': 'webrtc_opus',
'type': 'static_library',
'conditions': [
['build_with_mozilla==1', {
# Mozilla provides its own build of the opus library.
'include_dirs': [
'$(DIST)/include/opus',
]
}, {
'dependencies': [
'<(DEPTH)/third_party/opus/opus.gyp:opus'
],
}],
],
'include_dirs': [
'<(webrtc_root)',
],
'sources': [
'interface/opus_interface.h',
'opus_inst.h',
'opus_interface.c',
],
},
],
'conditions': [
['include_tests==1', {
'targets': [
{
'target_name': 'webrtc_opus_fec_test',
'type': 'executable',
'dependencies': [
'webrtc_opus',
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
'<(webrtc_root)/test/test.gyp:test_support_main',
'<(DEPTH)/testing/gtest.gyp:gtest',
],
'include_dirs': [
'<(webrtc_root)',
],
'sources': [
'opus_fec_test.cc',
],
},
],
}],
],
}

View File

@@ -0,0 +1,238 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
using ::std::string;
using ::std::tr1::tuple;
using ::std::tr1::get;
using ::testing::TestWithParam;
namespace webrtc {
// Define coding parameter as <channels, bit_rate, filename, extension>.
typedef tuple<int, int, string, string> coding_param;
typedef struct mode mode;
struct mode {
bool fec;
uint8_t target_packet_loss_rate;
};
const int kOpusBlockDurationMs = 20;
const int kOpusSamplingKhz = 48;
class OpusFecTest : public TestWithParam<coding_param> {
protected:
OpusFecTest();
virtual void SetUp();
virtual void TearDown();
virtual void EncodeABlock();
virtual void DecodeABlock(bool lost_previous, bool lost_current);
int block_duration_ms_;
int sampling_khz_;
int block_length_sample_;
int channels_;
int bit_rate_;
size_t data_pointer_;
size_t loop_length_samples_;
int max_bytes_;
int encoded_bytes_;
WebRtcOpusEncInst* opus_encoder_;
WebRtcOpusDecInst* opus_decoder_;
string in_filename_;
scoped_ptr<int16_t[]> in_data_;
scoped_ptr<int16_t[]> out_data_;
scoped_ptr<uint8_t[]> bit_stream_;
};
void OpusFecTest::SetUp() {
channels_ = get<0>(GetParam());
bit_rate_ = get<1>(GetParam());
printf("Coding %d channel signal at %d bps.\n", channels_, bit_rate_);
in_filename_ = test::ResourcePath(get<2>(GetParam()), get<3>(GetParam()));
FILE* fp = fopen(in_filename_.c_str(), "rb");
ASSERT_FALSE(fp == NULL);
// Obtain file size.
fseek(fp, 0, SEEK_END);
loop_length_samples_ = ftell(fp) / sizeof(int16_t);
rewind(fp);
// Allocate memory to contain the whole file.
in_data_.reset(new int16_t[loop_length_samples_ +
block_length_sample_ * channels_]);
// Copy the file into the buffer.
ASSERT_EQ(fread(&in_data_[0], sizeof(int16_t), loop_length_samples_, fp),
loop_length_samples_);
fclose(fp);
// The audio will be used in a looped manner. To ease the acquisition of an
// audio frame that crosses the end of the excerpt, we add an extra block
// length of samples to the end of the array, starting over again from the
// beginning of the array. Audio frames cross the end of the excerpt always
// appear as a continuum of memory.
memcpy(&in_data_[loop_length_samples_], &in_data_[0],
block_length_sample_ * channels_ * sizeof(int16_t));
// Maximum number of bytes in output bitstream.
max_bytes_ = block_length_sample_ * channels_ * sizeof(int16_t);
out_data_.reset(new int16_t[2 * block_length_sample_ * channels_]);
bit_stream_.reset(new uint8_t[max_bytes_]);
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_, channels_));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
// Set bitrate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, bit_rate_));
}
void OpusFecTest::TearDown() {
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
}
OpusFecTest::OpusFecTest()
: block_duration_ms_(kOpusBlockDurationMs),
sampling_khz_(kOpusSamplingKhz),
block_length_sample_(block_duration_ms_ * sampling_khz_),
data_pointer_(0),
max_bytes_(0),
encoded_bytes_(0),
opus_encoder_(NULL),
opus_decoder_(NULL) {
}
void OpusFecTest::EncodeABlock() {
int16_t value = WebRtcOpus_Encode(opus_encoder_,
&in_data_[data_pointer_],
block_length_sample_,
max_bytes_, &bit_stream_[0]);
EXPECT_GT(value, 0);
encoded_bytes_ = value;
}
void OpusFecTest::DecodeABlock(bool lost_previous, bool lost_current) {
int16_t audio_type;
int16_t value_1 = 0, value_2 = 0;
if (lost_previous) {
// Decode previous frame.
if (!lost_current &&
WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_) == 1) {
value_1 = WebRtcOpus_DecodeFec(opus_decoder_, &bit_stream_[0],
encoded_bytes_, &out_data_[0],
&audio_type);
} else {
value_1 = WebRtcOpus_DecodePlc(opus_decoder_, &out_data_[0], 1);
}
EXPECT_EQ(block_length_sample_, value_1);
}
if (!lost_current) {
// Decode current frame.
value_2 = WebRtcOpus_DecodeNew(opus_decoder_, &bit_stream_[0],
encoded_bytes_,
&out_data_[value_1 * channels_],
&audio_type);
EXPECT_EQ(block_length_sample_, value_2);
}
}
TEST_P(OpusFecTest, RandomPacketLossTest) {
const int kDurationMs = 200000;
int time_now_ms, fec_frames;
int actual_packet_loss_rate;
bool lost_current, lost_previous;
mode mode_set[3] = {{true, 0},
{false, 0},
{true, 50}};
lost_current = false;
for (int i = 0; i < 3; i++) {
if (mode_set[i].fec) {
EXPECT_EQ(0, WebRtcOpus_EnableFec(opus_encoder_));
EXPECT_EQ(0, WebRtcOpus_SetPacketLossRate(opus_encoder_,
mode_set[i].target_packet_loss_rate));
printf("FEC is ON, target at packet loss rate %d percent.\n",
mode_set[i].target_packet_loss_rate);
} else {
EXPECT_EQ(0, WebRtcOpus_DisableFec(opus_encoder_));
printf("FEC is OFF.\n");
}
// In this test, we let the target packet loss rate match the actual rate.
actual_packet_loss_rate = mode_set[i].target_packet_loss_rate;
// Run every mode a certain time.
time_now_ms = 0;
fec_frames = 0;
while (time_now_ms < kDurationMs) {
// Encode & decode.
EncodeABlock();
// Check if payload has FEC.
int16_t fec = WebRtcOpus_PacketHasFec(&bit_stream_[0], encoded_bytes_);
// If FEC is disabled or the target packet loss rate is set to 0, there
// should be no FEC in the bit stream.
if (!mode_set[i].fec || mode_set[i].target_packet_loss_rate == 0) {
EXPECT_EQ(fec, 0);
} else if (fec == 1) {
fec_frames++;
}
lost_previous = lost_current;
lost_current = rand() < actual_packet_loss_rate * (RAND_MAX / 100);
DecodeABlock(lost_previous, lost_current);
time_now_ms += block_duration_ms_;
// |data_pointer_| is incremented and wrapped across
// |loop_length_samples_|.
data_pointer_ = (data_pointer_ + block_length_sample_ * channels_) %
loop_length_samples_;
}
if (mode_set[i].fec) {
printf("%.2f percent frames has FEC.\n",
static_cast<float>(fec_frames) * block_duration_ms_ / 2000);
}
}
}
const coding_param param_set[] =
{::std::tr1::make_tuple(1, 64000, string("audio_coding/testfile32kHz"),
string("pcm")),
::std::tr1::make_tuple(1, 32000, string("audio_coding/testfile32kHz"),
string("pcm")),
::std::tr1::make_tuple(2, 64000, string("audio_coding/teststereo32kHz"),
string("pcm"))};
// 64 kbps, stereo
INSTANTIATE_TEST_CASE_P(AllTest, OpusFecTest,
::testing::ValuesIn(param_set));
} // namespace webrtc

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INST_H_
#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INST_H_
#include "opus.h"
struct WebRtcOpusEncInst {
OpusEncoder* encoder;
};
struct WebRtcOpusDecInst {
OpusDecoder* decoder_left;
OpusDecoder* decoder_right;
int prev_decoded_samples;
int channels;
};
#endif // WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_OPUS_INST_H_

View File

@@ -0,0 +1,548 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/opus/opus_inst.h"
#include <stdlib.h>
#include <string.h>
enum {
/* Maximum supported frame size in WebRTC is 60 ms. */
kWebRtcOpusMaxEncodeFrameSizeMs = 60,
/* The format allows up to 120 ms frames. Since we don't control the other
* side, we must allow for packets of that size. NetEq is currently limited
* to 60 ms on the receive side. */
kWebRtcOpusMaxDecodeFrameSizeMs = 120,
/* Maximum sample count per channel is 48 kHz * maximum frame size in
* milliseconds. */
kWebRtcOpusMaxFrameSizePerChannel = 48 * kWebRtcOpusMaxDecodeFrameSizeMs,
/* Default frame size, 20 ms @ 48 kHz, in samples (for one channel). */
kWebRtcOpusDefaultFrameSize = 960,
};
int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst, int32_t channels) {
OpusEncInst* state;
if (inst != NULL) {
state = (OpusEncInst*) calloc(1, sizeof(OpusEncInst));
if (state) {
int error;
/* Default to VoIP application for mono, and AUDIO for stereo. */
int application = (channels == 1) ? OPUS_APPLICATION_VOIP :
OPUS_APPLICATION_AUDIO;
state->encoder = opus_encoder_create(48000, channels, application,
&error);
if (error == OPUS_OK && state->encoder != NULL) {
*inst = state;
return 0;
}
free(state);
}
}
return -1;
}
int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst) {
if (inst) {
opus_encoder_destroy(inst->encoder);
free(inst);
return 0;
} else {
return -1;
}
}
int16_t WebRtcOpus_Encode(OpusEncInst* inst, int16_t* audio_in, int16_t samples,
int16_t length_encoded_buffer, uint8_t* encoded) {
opus_int16* audio = (opus_int16*) audio_in;
unsigned char* coded = encoded;
int res;
if (samples > 48 * kWebRtcOpusMaxEncodeFrameSizeMs) {
return -1;
}
res = opus_encode(inst->encoder, audio, samples, coded,
length_encoded_buffer);
if (res > 0) {
return res;
}
return -1;
}
int16_t WebRtcOpus_SetBitRate(OpusEncInst* inst, int32_t rate) {
if (inst) {
return opus_encoder_ctl(inst->encoder, OPUS_SET_BITRATE(rate));
} else {
return -1;
}
}
int16_t WebRtcOpus_SetPacketLossRate(OpusEncInst* inst, int32_t loss_rate) {
if (inst) {
return opus_encoder_ctl(inst->encoder,
OPUS_SET_PACKET_LOSS_PERC(loss_rate));
} else {
return -1;
}
}
int16_t WebRtcOpus_SetMaxBandwidth(OpusEncInst* inst, int32_t bandwidth) {
opus_int32 set_bandwidth;
if (!inst)
return -1;
if (bandwidth <= 4000) {
set_bandwidth = OPUS_BANDWIDTH_NARROWBAND;
} else if (bandwidth <= 6000) {
set_bandwidth = OPUS_BANDWIDTH_MEDIUMBAND;
} else if (bandwidth <= 8000) {
set_bandwidth = OPUS_BANDWIDTH_WIDEBAND;
} else if (bandwidth <= 12000) {
set_bandwidth = OPUS_BANDWIDTH_SUPERWIDEBAND;
} else {
set_bandwidth = OPUS_BANDWIDTH_FULLBAND;
}
return opus_encoder_ctl(inst->encoder,
OPUS_SET_MAX_BANDWIDTH(set_bandwidth));
}
int16_t WebRtcOpus_EnableFec(OpusEncInst* inst) {
if (inst) {
return opus_encoder_ctl(inst->encoder, OPUS_SET_INBAND_FEC(1));
} else {
return -1;
}
}
int16_t WebRtcOpus_DisableFec(OpusEncInst* inst) {
if (inst) {
return opus_encoder_ctl(inst->encoder, OPUS_SET_INBAND_FEC(0));
} else {
return -1;
}
}
int16_t WebRtcOpus_SetComplexity(OpusEncInst* inst, int32_t complexity) {
if (inst) {
return opus_encoder_ctl(inst->encoder, OPUS_SET_COMPLEXITY(complexity));
} else {
return -1;
}
}
int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels) {
int error_l;
int error_r;
OpusDecInst* state;
if (inst != NULL) {
/* Create Opus decoder state. */
state = (OpusDecInst*) calloc(1, sizeof(OpusDecInst));
if (state == NULL) {
return -1;
}
/* Create new memory for left and right channel, always at 48000 Hz. */
state->decoder_left = opus_decoder_create(48000, channels, &error_l);
state->decoder_right = opus_decoder_create(48000, channels, &error_r);
if (error_l == OPUS_OK && error_r == OPUS_OK && state->decoder_left != NULL
&& state->decoder_right != NULL) {
/* Creation of memory all ok. */
state->channels = channels;
state->prev_decoded_samples = kWebRtcOpusDefaultFrameSize;
*inst = state;
return 0;
}
/* If memory allocation was unsuccessful, free the entire state. */
if (state->decoder_left) {
opus_decoder_destroy(state->decoder_left);
}
if (state->decoder_right) {
opus_decoder_destroy(state->decoder_right);
}
free(state);
}
return -1;
}
int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst) {
if (inst) {
opus_decoder_destroy(inst->decoder_left);
opus_decoder_destroy(inst->decoder_right);
free(inst);
return 0;
} else {
return -1;
}
}
int WebRtcOpus_DecoderChannels(OpusDecInst* inst) {
return inst->channels;
}
int16_t WebRtcOpus_DecoderInitNew(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_left, OPUS_RESET_STATE);
if (error == OPUS_OK) {
return 0;
}
return -1;
}
int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_left, OPUS_RESET_STATE);
if (error == OPUS_OK) {
return 0;
}
return -1;
}
int16_t WebRtcOpus_DecoderInitSlave(OpusDecInst* inst) {
int error = opus_decoder_ctl(inst->decoder_right, OPUS_RESET_STATE);
if (error == OPUS_OK) {
return 0;
}
return -1;
}
/* |frame_size| is set to maximum Opus frame size in the normal case, and
* is set to the number of samples needed for PLC in case of losses.
* It is up to the caller to make sure the value is correct. */
static int DecodeNative(OpusDecoder* inst, const int16_t* encoded,
int16_t encoded_bytes, int frame_size,
int16_t* decoded, int16_t* audio_type) {
unsigned char* coded = (unsigned char*) encoded;
opus_int16* audio = (opus_int16*) decoded;
int res = opus_decode(inst, coded, encoded_bytes, audio, frame_size, 0);
/* TODO(tlegrand): set to DTX for zero-length packets? */
*audio_type = 0;
if (res > 0) {
return res;
}
return -1;
}
static int DecodeFec(OpusDecoder* inst, const int16_t* encoded,
int16_t encoded_bytes, int frame_size,
int16_t* decoded, int16_t* audio_type) {
unsigned char* coded = (unsigned char*) encoded;
opus_int16* audio = (opus_int16*) decoded;
int res = opus_decode(inst, coded, encoded_bytes, audio, frame_size, 1);
/* TODO(tlegrand): set to DTX for zero-length packets? */
*audio_type = 0;
if (res > 0) {
return res;
}
return -1;
}
int16_t WebRtcOpus_DecodeNew(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
int16_t* coded = (int16_t*)encoded;
int decoded_samples;
decoded_samples = DecodeNative(inst->decoder_left, coded, encoded_bytes,
kWebRtcOpusMaxFrameSizePerChannel,
decoded, audio_type);
if (decoded_samples < 0) {
return -1;
}
/* Update decoded sample memory, to be used by the PLC in case of losses. */
inst->prev_decoded_samples = decoded_samples;
return decoded_samples;
}
int16_t WebRtcOpus_Decode(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
int decoded_samples;
int i;
/* If mono case, just do a regular call to the decoder.
* If stereo, call to WebRtcOpus_Decode() gives left channel as output, and
* calls to WebRtcOpus_Decode_slave() give right channel as output.
* This is to make stereo work with the current setup of NetEQ, which
* requires two calls to the decoder to produce stereo. */
decoded_samples = DecodeNative(inst->decoder_left, encoded, encoded_bytes,
kWebRtcOpusMaxFrameSizePerChannel, decoded,
audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of samples pairs, in
* case of stereo. Number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the first sample. This gives
* the left channel. */
decoded[i] = decoded[i * 2];
}
}
/* Update decoded sample memory, to be used by the PLC in case of losses. */
inst->prev_decoded_samples = decoded_samples;
return decoded_samples;
}
int16_t WebRtcOpus_DecodeSlave(OpusDecInst* inst, const int16_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
int decoded_samples;
int i;
decoded_samples = DecodeNative(inst->decoder_right, encoded, encoded_bytes,
kWebRtcOpusMaxFrameSizePerChannel, decoded,
audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of samples pairs, in
* case of stereo. Number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the second sample. This gives
* the right channel. */
decoded[i] = decoded[i * 2 + 1];
}
} else {
/* Decode slave should never be called for mono packets. */
return -1;
}
return decoded_samples;
}
int16_t WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
int16_t audio_type = 0;
int decoded_samples;
int plc_samples;
/* The number of samples we ask for is |number_of_lost_frames| times
* |prev_decoded_samples_|. Limit the number of samples to maximum
* |kWebRtcOpusMaxFrameSizePerChannel|. */
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel) ?
plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_left, NULL, 0, plc_samples,
decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
}
return decoded_samples;
}
int16_t WebRtcOpus_DecodePlcMaster(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
int decoded_samples;
int16_t audio_type = 0;
int plc_samples;
int i;
/* If mono case, just do a regular call to the decoder.
* If stereo, call to WebRtcOpus_DecodePlcMaster() gives left channel as
* output, and calls to WebRtcOpus_DecodePlcSlave() give right channel as
* output. This is to make stereo work with the current setup of NetEQ, which
* requires two calls to the decoder to produce stereo. */
/* The number of samples we ask for is |number_of_lost_frames| times
* |prev_decoded_samples_|. Limit the number of samples to maximum
* |kWebRtcOpusMaxFrameSizePerChannel|. */
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel) ?
plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_left, NULL, 0, plc_samples,
decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
}
if (inst->channels == 2) {
/* The parameter |decoded_samples| holds the number of sample pairs, in
* case of stereo. The original number of samples in |decoded| equals
* |decoded_samples| times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the first sample. This gives
* the left channel. */
decoded[i] = decoded[i * 2];
}
}
return decoded_samples;
}
int16_t WebRtcOpus_DecodePlcSlave(OpusDecInst* inst, int16_t* decoded,
int16_t number_of_lost_frames) {
int decoded_samples;
int16_t audio_type = 0;
int plc_samples;
int i;
/* Calls to WebRtcOpus_DecodePlcSlave() give right channel as output.
* The function should never be called in the mono case. */
if (inst->channels != 2) {
return -1;
}
/* The number of samples we ask for is |number_of_lost_frames| times
* |prev_decoded_samples_|. Limit the number of samples to maximum
* |kWebRtcOpusMaxFrameSizePerChannel|. */
plc_samples = number_of_lost_frames * inst->prev_decoded_samples;
plc_samples = (plc_samples <= kWebRtcOpusMaxFrameSizePerChannel)
? plc_samples : kWebRtcOpusMaxFrameSizePerChannel;
decoded_samples = DecodeNative(inst->decoder_right, NULL, 0, plc_samples,
decoded, &audio_type);
if (decoded_samples < 0) {
return -1;
}
/* The parameter |decoded_samples| holds the number of sample pairs,
* The original number of samples in |decoded| equals |decoded_samples|
* times 2. */
for (i = 0; i < decoded_samples; i++) {
/* Take every second sample, starting at the second sample. This gives
* the right channel. */
decoded[i] = decoded[i * 2 + 1];
}
return decoded_samples;
}
int16_t WebRtcOpus_DecodeFec(OpusDecInst* inst, const uint8_t* encoded,
int16_t encoded_bytes, int16_t* decoded,
int16_t* audio_type) {
int16_t* coded = (int16_t*)encoded;
int decoded_samples;
int fec_samples;
if (WebRtcOpus_PacketHasFec(encoded, encoded_bytes) != 1) {
return 0;
}
fec_samples = opus_packet_get_samples_per_frame(encoded, 48000);
decoded_samples = DecodeFec(inst->decoder_left, coded, encoded_bytes,
fec_samples, decoded, audio_type);
if (decoded_samples < 0) {
return -1;
}
return decoded_samples;
}
int WebRtcOpus_DurationEst(OpusDecInst* inst,
const uint8_t* payload,
int payload_length_bytes) {
int frames, samples;
frames = opus_packet_get_nb_frames(payload, payload_length_bytes);
if (frames < 0) {
/* Invalid payload data. */
return 0;
}
samples = frames * opus_packet_get_samples_per_frame(payload, 48000);
if (samples < 120 || samples > 5760) {
/* Invalid payload duration. */
return 0;
}
return samples;
}
int WebRtcOpus_FecDurationEst(const uint8_t* payload,
int payload_length_bytes) {
int samples;
if (WebRtcOpus_PacketHasFec(payload, payload_length_bytes) != 1) {
return 0;
}
samples = opus_packet_get_samples_per_frame(payload, 48000);
if (samples < 480 || samples > 5760) {
/* Invalid payload duration. */
return 0;
}
return samples;
}
int WebRtcOpus_PacketHasFec(const uint8_t* payload,
int payload_length_bytes) {
int frames, channels, payload_length_ms;
int n;
opus_int16 frame_sizes[48];
const unsigned char *frame_data[48];
if (payload == NULL || payload_length_bytes <= 0)
return 0;
/* In CELT_ONLY mode, packets should not have FEC. */
if (payload[0] & 0x80)
return 0;
payload_length_ms = opus_packet_get_samples_per_frame(payload, 48000) / 48;
if (10 > payload_length_ms)
payload_length_ms = 10;
channels = opus_packet_get_nb_channels(payload);
switch (payload_length_ms) {
case 10:
case 20: {
frames = 1;
break;
}
case 40: {
frames = 2;
break;
}
case 60: {
frames = 3;
break;
}
default: {
return 0; // It is actually even an invalid packet.
}
}
/* The following is to parse the LBRR flags. */
if (opus_packet_parse(payload, payload_length_bytes, NULL, frame_data,
frame_sizes, NULL) < 0) {
return 0;
}
if (frame_sizes[0] <= 1) {
return 0;
}
for (n = 0; n < channels; n++) {
if (frame_data[0][0] & (0x80 >> ((n + 1) * (frames + 1) - 1)))
return 1;
}
return 0;
}

View File

@@ -0,0 +1,119 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/tools/audio_codec_speed_test.h"
using ::std::string;
namespace webrtc {
static const int kOpusBlockDurationMs = 20;
static const int kOpusSamplingKhz = 48;
class OpusSpeedTest : public AudioCodecSpeedTest {
protected:
OpusSpeedTest();
virtual void SetUp() OVERRIDE;
virtual void TearDown() OVERRIDE;
virtual float EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
int max_bytes, int* encoded_bytes);
virtual float DecodeABlock(const uint8_t* bit_stream, int encoded_bytes,
int16_t* out_data);
WebRtcOpusEncInst* opus_encoder_;
WebRtcOpusDecInst* opus_decoder_;
};
OpusSpeedTest::OpusSpeedTest()
: AudioCodecSpeedTest(kOpusBlockDurationMs,
kOpusSamplingKhz,
kOpusSamplingKhz),
opus_encoder_(NULL),
opus_decoder_(NULL) {
}
void OpusSpeedTest::SetUp() {
AudioCodecSpeedTest::SetUp();
/* Create encoder memory. */
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_encoder_, channels_));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_decoder_, channels_));
/* Set bitrate. */
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_encoder_, bit_rate_));
}
void OpusSpeedTest::TearDown() {
AudioCodecSpeedTest::TearDown();
/* Free memory. */
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_decoder_));
}
float OpusSpeedTest::EncodeABlock(int16_t* in_data, uint8_t* bit_stream,
int max_bytes, int* encoded_bytes) {
clock_t clocks = clock();
int value = WebRtcOpus_Encode(opus_encoder_, in_data,
input_length_sample_, max_bytes,
bit_stream);
clocks = clock() - clocks;
EXPECT_GT(value, 0);
*encoded_bytes = value;
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
float OpusSpeedTest::DecodeABlock(const uint8_t* bit_stream,
int encoded_bytes, int16_t* out_data) {
int value;
int16_t audio_type;
clock_t clocks = clock();
value = WebRtcOpus_DecodeNew(opus_decoder_, bit_stream, encoded_bytes,
out_data, &audio_type);
clocks = clock() - clocks;
EXPECT_EQ(output_length_sample_, value);
return 1000.0 * clocks / CLOCKS_PER_SEC;
}
#define ADD_TEST(complexity) \
TEST_P(OpusSpeedTest, OpusSetComplexityTest##complexity) { \
/* Test audio length in second. */ \
size_t kDurationSec = 400; \
/* Set complexity. */ \
printf("Setting complexity to %d ...\n", complexity); \
EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_encoder_, complexity)); \
EncodeDecode(kDurationSec); \
}
ADD_TEST(10);
ADD_TEST(9);
ADD_TEST(8);
ADD_TEST(7);
ADD_TEST(6);
ADD_TEST(5);
ADD_TEST(4);
ADD_TEST(3);
ADD_TEST(2);
ADD_TEST(1);
ADD_TEST(0);
// List all test cases: (channel, bit rat, filename, extension).
const coding_param param_set[] =
{::std::tr1::make_tuple(1, 64000,
string("audio_coding/speech_mono_32_48kHz"),
string("pcm"), true),
::std::tr1::make_tuple(1, 32000,
string("audio_coding/speech_mono_32_48kHz"),
string("pcm"), true),
::std::tr1::make_tuple(2, 64000,
string("audio_coding/music_stereo_48kHz"),
string("pcm"), true)};
INSTANTIATE_TEST_CASE_P(AllTest, OpusSpeedTest,
::testing::ValuesIn(param_set));
} // namespace webrtc

View File

@@ -0,0 +1,523 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <string>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
#include "webrtc/modules/audio_coding/codecs/opus/opus_inst.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
// Number of samples in a 60 ms stereo frame, sampled at 48 kHz.
const int kOpusMaxFrameSamples = 48 * 60 * 2;
// Maximum number of bytes in output bitstream.
const size_t kMaxBytes = 1000;
// Number of samples-per-channel in a 20 ms frame, sampled at 48 kHz.
const int kOpus20msFrameSamples = 48 * 20;
// Number of samples-per-channel in a 10 ms frame, sampled at 48 kHz.
const int kOpus10msFrameSamples = 48 * 10;
class OpusTest : public ::testing::Test {
protected:
OpusTest();
virtual void SetUp();
void TestSetMaxBandwidth(opus_int32 expect, int32_t set);
WebRtcOpusEncInst* opus_mono_encoder_;
WebRtcOpusEncInst* opus_stereo_encoder_;
WebRtcOpusDecInst* opus_mono_decoder_;
WebRtcOpusDecInst* opus_mono_decoder_new_;
WebRtcOpusDecInst* opus_stereo_decoder_;
WebRtcOpusDecInst* opus_stereo_decoder_new_;
int16_t speech_data_[kOpusMaxFrameSamples];
int16_t output_data_[kOpusMaxFrameSamples];
uint8_t bitstream_[kMaxBytes];
};
OpusTest::OpusTest()
: opus_mono_encoder_(NULL),
opus_stereo_encoder_(NULL),
opus_mono_decoder_(NULL),
opus_mono_decoder_new_(NULL),
opus_stereo_decoder_(NULL),
opus_stereo_decoder_new_(NULL) {
}
void OpusTest::SetUp() {
FILE* input_file;
const std::string file_name =
webrtc::test::ResourcePath("audio_coding/speech_mono_32_48kHz", "pcm");
input_file = fopen(file_name.c_str(), "rb");
ASSERT_TRUE(input_file != NULL);
ASSERT_EQ(kOpusMaxFrameSamples,
static_cast<int32_t>(fread(speech_data_, sizeof(int16_t),
kOpusMaxFrameSamples, input_file)));
fclose(input_file);
input_file = NULL;
}
void OpusTest::TestSetMaxBandwidth(opus_int32 expect, int32_t set) {
opus_int32 bandwidth;
// Test mono encoder.
EXPECT_EQ(0, WebRtcOpus_SetMaxBandwidth(opus_mono_encoder_, set));
opus_encoder_ctl(opus_mono_encoder_->encoder,
OPUS_GET_MAX_BANDWIDTH(&bandwidth));
EXPECT_EQ(expect, bandwidth);
// Test stereo encoder.
EXPECT_EQ(0, WebRtcOpus_SetMaxBandwidth(opus_stereo_encoder_, set));
opus_encoder_ctl(opus_stereo_encoder_->encoder,
OPUS_GET_MAX_BANDWIDTH(&bandwidth));
EXPECT_EQ(expect, bandwidth);
}
// Test failing Create.
TEST_F(OpusTest, OpusCreateFail) {
// Test to see that an invalid pointer is caught.
EXPECT_EQ(-1, WebRtcOpus_EncoderCreate(NULL, 1));
EXPECT_EQ(-1, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 3));
EXPECT_EQ(-1, WebRtcOpus_DecoderCreate(NULL, 1));
EXPECT_EQ(-1, WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 3));
}
// Test failing Free.
TEST_F(OpusTest, OpusFreeFail) {
// Test to see that an invalid pointer is caught.
EXPECT_EQ(-1, WebRtcOpus_EncoderFree(NULL));
EXPECT_EQ(-1, WebRtcOpus_DecoderFree(NULL));
}
// Test normal Create and Free.
TEST_F(OpusTest, OpusCreateFree) {
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1));
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
EXPECT_TRUE(opus_mono_encoder_ != NULL);
EXPECT_TRUE(opus_mono_decoder_ != NULL);
EXPECT_TRUE(opus_stereo_encoder_ != NULL);
EXPECT_TRUE(opus_stereo_decoder_ != NULL);
// Free encoder and decoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_));
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
}
TEST_F(OpusTest, OpusEncodeDecodeMono) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_new_, 1));
// Set bitrate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, 32000));
// Check number of channels for decoder.
EXPECT_EQ(1, WebRtcOpus_DecoderChannels(opus_mono_decoder_));
EXPECT_EQ(1, WebRtcOpus_DecoderChannels(opus_mono_decoder_new_));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusMaxFrameSamples];
int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_,
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_mono_decoder_, coded,
encoded_bytes, output_data_decode,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode|.
for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i], output_data_decode[i]);
}
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_new_));
}
TEST_F(OpusTest, OpusEncodeDecodeStereo) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_new_, 2));
// Set bitrate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, 64000));
// Check number of channels for decoder.
EXPECT_EQ(2, WebRtcOpus_DecoderChannels(opus_stereo_decoder_));
EXPECT_EQ(2, WebRtcOpus_DecoderChannels(opus_stereo_decoder_new_));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusMaxFrameSamples];
int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t output_data_decode_slave[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode_slave,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_new_));
}
TEST_F(OpusTest, OpusSetBitRate) {
// Test without creating encoder memory.
EXPECT_EQ(-1, WebRtcOpus_SetBitRate(opus_mono_encoder_, 60000));
EXPECT_EQ(-1, WebRtcOpus_SetBitRate(opus_stereo_encoder_, 60000));
// Create encoder memory, try with different bitrates.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, 30000));
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, 60000));
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, 300000));
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, 600000));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
}
TEST_F(OpusTest, OpusSetComplexity) {
// Test without creating encoder memory.
EXPECT_EQ(-1, WebRtcOpus_SetComplexity(opus_mono_encoder_, 9));
EXPECT_EQ(-1, WebRtcOpus_SetComplexity(opus_stereo_encoder_, 9));
// Create encoder memory, try with different complexities.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_mono_encoder_, 0));
EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_stereo_encoder_, 0));
EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_mono_encoder_, 10));
EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_stereo_encoder_, 10));
EXPECT_EQ(-1, WebRtcOpus_SetComplexity(opus_mono_encoder_, 11));
EXPECT_EQ(-1, WebRtcOpus_SetComplexity(opus_stereo_encoder_, 11));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
}
// Encode and decode one frame (stereo), initialize the decoder and
// decode once more.
TEST_F(OpusTest, OpusDecodeInit) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_new_, 2));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusMaxFrameSamples];
int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t output_data_decode_slave[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode_slave,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
EXPECT_EQ(0, WebRtcOpus_DecoderInitNew(opus_stereo_decoder_new_));
EXPECT_EQ(0, WebRtcOpus_DecoderInit(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderInitSlave(opus_stereo_decoder_));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode_slave,
&audio_type));
// Data in |output_data_decode_new| should be the same as in
// |output_data_decode| and |output_data_decode_slave| interleaved to a
// stereo signal.
for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(output_data_decode_new[i * 2], output_data_decode[i]);
EXPECT_EQ(output_data_decode_new[i * 2 + 1], output_data_decode_slave[i]);
}
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_new_));
}
TEST_F(OpusTest, OpusEnableDisableFec) {
// Test without creating encoder memory.
EXPECT_EQ(-1, WebRtcOpus_EnableFec(opus_mono_encoder_));
EXPECT_EQ(-1, WebRtcOpus_DisableFec(opus_stereo_encoder_));
// Create encoder memory, try with different bitrates.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_EnableFec(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_EnableFec(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DisableFec(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_DisableFec(opus_stereo_encoder_));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
}
TEST_F(OpusTest, OpusSetPacketLossRate) {
// Test without creating encoder memory.
EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_mono_encoder_, 50));
EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_stereo_encoder_, 50));
// Create encoder memory, try with different bitrates.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_SetPacketLossRate(opus_mono_encoder_, 50));
EXPECT_EQ(0, WebRtcOpus_SetPacketLossRate(opus_stereo_encoder_, 50));
EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_mono_encoder_, -1));
EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_stereo_encoder_, -1));
EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_mono_encoder_, 101));
EXPECT_EQ(-1, WebRtcOpus_SetPacketLossRate(opus_stereo_encoder_, 101));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
}
TEST_F(OpusTest, OpusSetMaxBandwidth) {
// Test without creating encoder memory.
EXPECT_EQ(-1, WebRtcOpus_SetMaxBandwidth(opus_mono_encoder_, 20000));
EXPECT_EQ(-1, WebRtcOpus_SetMaxBandwidth(opus_stereo_encoder_, 20000));
// Create encoder memory, try with different bitrates.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
TestSetMaxBandwidth(OPUS_BANDWIDTH_FULLBAND, 24000);
TestSetMaxBandwidth(OPUS_BANDWIDTH_FULLBAND, 14000);
TestSetMaxBandwidth(OPUS_BANDWIDTH_SUPERWIDEBAND, 10000);
TestSetMaxBandwidth(OPUS_BANDWIDTH_WIDEBAND, 7000);
TestSetMaxBandwidth(OPUS_BANDWIDTH_MEDIUMBAND, 6000);
TestSetMaxBandwidth(OPUS_BANDWIDTH_NARROWBAND, 4000);
TestSetMaxBandwidth(OPUS_BANDWIDTH_NARROWBAND, 3000);
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
}
// PLC in mono mode.
TEST_F(OpusTest, OpusDecodePlcMono) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_mono_decoder_new_, 1));
// Set bitrate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, 32000));
// Check number of channels for decoder.
EXPECT_EQ(1, WebRtcOpus_DecoderChannels(opus_mono_decoder_));
EXPECT_EQ(1, WebRtcOpus_DecoderChannels(opus_mono_decoder_new_));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusMaxFrameSamples];
int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
encoded_bytes = WebRtcOpus_Encode(opus_mono_encoder_, speech_data_,
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_mono_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_mono_decoder_, coded,
encoded_bytes, output_data_decode,
&audio_type));
// Call decoder PLC for both versions of the decoder.
int16_t plc_buffer[kOpusMaxFrameSamples];
int16_t plc_buffer_new[kOpusMaxFrameSamples];
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodePlcMaster(opus_mono_decoder_, plc_buffer, 1));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodePlc(opus_mono_decoder_new_, plc_buffer_new, 1));
// Data in |plc_buffer| should be the same as in |plc_buffer_new|.
for (int i = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(plc_buffer[i], plc_buffer_new[i]);
}
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_mono_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_mono_decoder_new_));
}
// PLC in stereo mode.
TEST_F(OpusTest, OpusDecodePlcStereo) {
// Create encoder memory.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_new_, 2));
// Set bitrate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, 64000));
// Check number of channels for decoder.
EXPECT_EQ(2, WebRtcOpus_DecoderChannels(opus_stereo_decoder_));
EXPECT_EQ(2, WebRtcOpus_DecoderChannels(opus_stereo_decoder_new_));
// Encode & decode.
int16_t encoded_bytes;
int16_t audio_type;
int16_t output_data_decode_new[kOpusMaxFrameSamples];
int16_t output_data_decode[kOpusMaxFrameSamples];
int16_t output_data_decode_slave[kOpusMaxFrameSamples];
int16_t* coded = reinterpret_cast<int16_t*>(bitstream_);
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeNew(opus_stereo_decoder_new_, bitstream_,
encoded_bytes, output_data_decode_new,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_Decode(opus_stereo_decoder_, coded,
encoded_bytes, output_data_decode,
&audio_type));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodeSlave(opus_stereo_decoder_, coded,
encoded_bytes,
output_data_decode_slave,
&audio_type));
// Call decoder PLC for both versions of the decoder.
int16_t plc_buffer_left[kOpusMaxFrameSamples];
int16_t plc_buffer_right[kOpusMaxFrameSamples];
int16_t plc_buffer_new[kOpusMaxFrameSamples];
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodePlcMaster(opus_stereo_decoder_,
plc_buffer_left, 1));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodePlcSlave(opus_stereo_decoder_,
plc_buffer_right, 1));
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DecodePlc(opus_stereo_decoder_new_, plc_buffer_new, 1));
// Data in |plc_buffer_left| and |plc_buffer_right|should be the same as the
// interleaved samples in |plc_buffer_new|.
for (int i = 0, j = 0; i < kOpus20msFrameSamples; i++) {
EXPECT_EQ(plc_buffer_left[i], plc_buffer_new[j++]);
EXPECT_EQ(plc_buffer_right[i], plc_buffer_new[j++]);
}
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_new_));
}
// Duration estimation.
TEST_F(OpusTest, OpusDurationEstimation) {
// Create.
EXPECT_EQ(0, WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2));
EXPECT_EQ(0, WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2));
int16_t encoded_bytes;
// 10 ms.
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
kOpus10msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus10msFrameSamples,
WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
encoded_bytes));
// 20 ms
encoded_bytes = WebRtcOpus_Encode(opus_stereo_encoder_, speech_data_,
kOpus20msFrameSamples, kMaxBytes,
bitstream_);
EXPECT_EQ(kOpus20msFrameSamples,
WebRtcOpus_DurationEst(opus_stereo_decoder_, bitstream_,
encoded_bytes));
// Free memory.
EXPECT_EQ(0, WebRtcOpus_EncoderFree(opus_stereo_encoder_));
EXPECT_EQ(0, WebRtcOpus_DecoderFree(opus_stereo_decoder_));
}
} // namespace webrtc