mirror of
https://github.com/signalapp/Signal-Android.git
synced 2026-04-27 12:15:50 +01:00
Support for Signal calls.
Merge in RedPhone // FREEBIE
This commit is contained in:
5
jni/webrtc/modules/audio_coding/main/acm2/OWNERS
Normal file
5
jni/webrtc/modules/audio_coding/main/acm2/OWNERS
Normal file
@@ -0,0 +1,5 @@
|
||||
|
||||
# These are for the common case of adding or renaming files. If you're doing
|
||||
# structural changes, please get a review from a reviewer in this file.
|
||||
per-file *.gyp=*
|
||||
per-file *.gypi=*
|
||||
314
jni/webrtc/modules/audio_coding/main/acm2/acm_amr.cc
Normal file
314
jni/webrtc/modules/audio_coding/main/acm2/acm_amr.cc
Normal file
@@ -0,0 +1,314 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_amr.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_AMR
|
||||
// NOTE! GSM AMR is not included in the open-source package. The following
|
||||
// interface file is needed:
|
||||
#include "webrtc/modules/audio_coding/main/codecs/amr/interface/amr_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
// The API in the header file should match the one below.
|
||||
//
|
||||
// int16_t WebRtcAmr_CreateEnc(AMR_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcAmr_CreateDec(AMR_decinst_t_** dec_inst);
|
||||
// int16_t WebRtcAmr_FreeEnc(AMR_encinst_t_* enc_inst);
|
||||
// int16_t WebRtcAmr_FreeDec(AMR_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcAmr_Encode(AMR_encinst_t_* enc_inst,
|
||||
// int16_t* input,
|
||||
// int16_t len,
|
||||
// int16_t*output,
|
||||
// int16_t mode);
|
||||
// int16_t WebRtcAmr_EncoderInit(AMR_encinst_t_* enc_inst,
|
||||
// int16_t dtx_mode);
|
||||
// int16_t WebRtcAmr_EncodeBitmode(AMR_encinst_t_* enc_inst,
|
||||
// int format);
|
||||
// int16_t WebRtcAmr_Decode(AMR_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcAmr_DecodePlc(AMR_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcAmr_DecoderInit(AMR_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcAmr_DecodeBitmode(AMR_decinst_t_* dec_inst,
|
||||
// int format);
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_AMR
|
||||
ACMAMR::ACMAMR(int16_t /* codec_id */)
|
||||
: encoder_inst_ptr_(NULL),
|
||||
encoding_mode_(-1), // Invalid value.
|
||||
encoding_rate_(0), // Invalid value.
|
||||
encoder_packing_format_(AMRBandwidthEfficient) {
|
||||
return;
|
||||
}
|
||||
|
||||
ACMAMR::~ACMAMR() { return; }
|
||||
|
||||
int16_t ACMAMR::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMAMR::EnableDTX() { return -1; }
|
||||
|
||||
int16_t ACMAMR::DisableDTX() { return -1; }
|
||||
|
||||
int16_t ACMAMR::InternalInitEncoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMAMR::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMAMR::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMAMR::DestructEncoderSafe() { return; }
|
||||
|
||||
int16_t ACMAMR::SetBitRateSafe(const int32_t /* rate */) { return -1; }
|
||||
|
||||
void ACMAMR::InternalDestructEncoderInst(void* /* ptr_inst */) { return; }
|
||||
|
||||
int16_t ACMAMR::SetAMREncoderPackingFormat(
|
||||
ACMAMRPackingFormat /* packing_format */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMAMRPackingFormat ACMAMR::AMREncoderPackingFormat() const {
|
||||
return AMRUndefined;
|
||||
}
|
||||
|
||||
int16_t ACMAMR::SetAMRDecoderPackingFormat(
|
||||
ACMAMRPackingFormat /* packing_format */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMAMRPackingFormat ACMAMR::AMRDecoderPackingFormat() const {
|
||||
return AMRUndefined;
|
||||
}
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
|
||||
#define WEBRTC_AMR_MR475 0
|
||||
#define WEBRTC_AMR_MR515 1
|
||||
#define WEBRTC_AMR_MR59 2
|
||||
#define WEBRTC_AMR_MR67 3
|
||||
#define WEBRTC_AMR_MR74 4
|
||||
#define WEBRTC_AMR_MR795 5
|
||||
#define WEBRTC_AMR_MR102 6
|
||||
#define WEBRTC_AMR_MR122 7
|
||||
|
||||
ACMAMR::ACMAMR(int16_t codec_id)
|
||||
: encoder_inst_ptr_(NULL),
|
||||
encoding_mode_(-1), // invalid value
|
||||
encoding_rate_(0) { // invalid value
|
||||
codec_id_ = codec_id;
|
||||
has_internal_dtx_ = true;
|
||||
encoder_packing_format_ = AMRBandwidthEfficient;
|
||||
return;
|
||||
}
|
||||
|
||||
ACMAMR::~ACMAMR() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcAmr_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMAMR::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
int16_t vad_decision = 1;
|
||||
// sanity check, if the rate is set correctly. we might skip this
|
||||
// sanity check. if rate is not set correctly, initialization flag
|
||||
// should be false and should not be here.
|
||||
if ((encoding_mode_ < WEBRTC_AMR_MR475) ||
|
||||
(encoding_mode_ > WEBRTC_AMR_MR122)) {
|
||||
*bitstream_len_byte = 0;
|
||||
return -1;
|
||||
}
|
||||
*bitstream_len_byte = WebRtcAmr_Encode(encoder_inst_ptr_,
|
||||
&in_audio_[in_audio_ix_read_],
|
||||
frame_len_smpl_,
|
||||
reinterpret_cast<int16_t*>(bitstream),
|
||||
encoding_mode_);
|
||||
|
||||
// Update VAD, if internal DTX is used
|
||||
if (has_internal_dtx_ && dtx_enabled_) {
|
||||
if (*bitstream_len_byte <= (7 * frame_len_smpl_ / 160)) {
|
||||
vad_decision = 0;
|
||||
}
|
||||
for (int16_t n = 0; n < MAX_FRAME_SIZE_10MSEC; n++) {
|
||||
vad_label_[n] = vad_decision;
|
||||
}
|
||||
}
|
||||
// increment the read index
|
||||
in_audio_ix_read_ += frame_len_smpl_;
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMAMR::EnableDTX() {
|
||||
if (dtx_enabled_) {
|
||||
return 0;
|
||||
} else if (encoder_exist_) { // check if encoder exist
|
||||
// enable DTX
|
||||
if (WebRtcAmr_EncoderInit(encoder_inst_ptr_, 1) < 0) {
|
||||
return -1;
|
||||
}
|
||||
dtx_enabled_ = true;
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int16_t ACMAMR::DisableDTX() {
|
||||
if (!dtx_enabled_) {
|
||||
return 0;
|
||||
} else if (encoder_exist_) { // check if encoder exist
|
||||
// disable DTX
|
||||
if (WebRtcAmr_EncoderInit(encoder_inst_ptr_, 0) < 0) {
|
||||
return -1;
|
||||
}
|
||||
dtx_enabled_ = false;
|
||||
return 0;
|
||||
} else {
|
||||
// encoder doesn't exists, therefore disabling is harmless
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int16_t ACMAMR::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
int16_t status = SetBitRateSafe((codec_params->codec_inst).rate);
|
||||
status += (WebRtcAmr_EncoderInit(encoder_inst_ptr_,
|
||||
((codec_params->enable_dtx) ? 1 : 0)) < 0)
|
||||
? -1
|
||||
: 0;
|
||||
status +=
|
||||
(WebRtcAmr_EncodeBitmode(encoder_inst_ptr_, encoder_packing_format_) < 0)
|
||||
? -1
|
||||
: 0;
|
||||
return (status < 0) ? -1 : 0;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMAMR::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMAMR::InternalCreateEncoder() {
|
||||
return WebRtcAmr_CreateEnc(&encoder_inst_ptr_);
|
||||
}
|
||||
|
||||
void ACMAMR::DestructEncoderSafe() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcAmr_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
// there is no encoder set the following
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
encoding_mode_ = -1; // invalid value
|
||||
encoding_rate_ = 0; // invalid value
|
||||
}
|
||||
|
||||
int16_t ACMAMR::SetBitRateSafe(const int32_t rate) {
|
||||
switch (rate) {
|
||||
case 4750: {
|
||||
encoding_mode_ = WEBRTC_AMR_MR475;
|
||||
encoding_rate_ = 4750;
|
||||
break;
|
||||
}
|
||||
case 5150: {
|
||||
encoding_mode_ = WEBRTC_AMR_MR515;
|
||||
encoding_rate_ = 5150;
|
||||
break;
|
||||
}
|
||||
case 5900: {
|
||||
encoding_mode_ = WEBRTC_AMR_MR59;
|
||||
encoding_rate_ = 5900;
|
||||
break;
|
||||
}
|
||||
case 6700: {
|
||||
encoding_mode_ = WEBRTC_AMR_MR67;
|
||||
encoding_rate_ = 6700;
|
||||
break;
|
||||
}
|
||||
case 7400: {
|
||||
encoding_mode_ = WEBRTC_AMR_MR74;
|
||||
encoding_rate_ = 7400;
|
||||
break;
|
||||
}
|
||||
case 7950: {
|
||||
encoding_mode_ = WEBRTC_AMR_MR795;
|
||||
encoding_rate_ = 7950;
|
||||
break;
|
||||
}
|
||||
case 10200: {
|
||||
encoding_mode_ = WEBRTC_AMR_MR102;
|
||||
encoding_rate_ = 10200;
|
||||
break;
|
||||
}
|
||||
case 12200: {
|
||||
encoding_mode_ = WEBRTC_AMR_MR122;
|
||||
encoding_rate_ = 12200;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMAMR::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
// Free the memory where ptr_inst is pointing to
|
||||
if (ptr_inst != NULL) {
|
||||
WebRtcAmr_FreeEnc(static_cast<AMR_encinst_t_*>(ptr_inst));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMAMR::SetAMREncoderPackingFormat(ACMAMRPackingFormat packing_format) {
|
||||
if ((packing_format != AMRBandwidthEfficient) &&
|
||||
(packing_format != AMROctetAlligned) &&
|
||||
(packing_format != AMRFileStorage)) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Invalid AMR Encoder packing-format.");
|
||||
return -1;
|
||||
} else {
|
||||
if (WebRtcAmr_EncodeBitmode(encoder_inst_ptr_, packing_format) < 0) {
|
||||
return -1;
|
||||
} else {
|
||||
encoder_packing_format_ = packing_format;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACMAMRPackingFormat ACMAMR::AMREncoderPackingFormat() const {
|
||||
return encoder_packing_format_;
|
||||
}
|
||||
|
||||
int16_t ACMAMR::SetAMRDecoderPackingFormat(
|
||||
ACMAMRPackingFormat /* packing_format */) {
|
||||
// Not implemented.
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMAMRPackingFormat ACMAMR::AMRDecoderPackingFormat() const {
|
||||
// Not implemented.
|
||||
return AMRUndefined;
|
||||
}
|
||||
|
||||
#endif
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
69
jni/webrtc/modules/audio_coding/main/acm2/acm_amr.h
Normal file
69
jni/webrtc/modules/audio_coding/main/acm2/acm_amr.h
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_AMR_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_AMR_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
// forward declaration
|
||||
struct AMR_encinst_t_;
|
||||
struct AMR_decinst_t_;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
enum ACMAMRPackingFormat;
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMAMR : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMAMR(int16_t codec_id);
|
||||
~ACMAMR();
|
||||
|
||||
// for FEC
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream, int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
int16_t SetAMREncoderPackingFormat(const ACMAMRPackingFormat packing_format);
|
||||
|
||||
ACMAMRPackingFormat AMREncoderPackingFormat() const;
|
||||
|
||||
int16_t SetAMRDecoderPackingFormat(const ACMAMRPackingFormat packing_format);
|
||||
|
||||
ACMAMRPackingFormat AMRDecoderPackingFormat() const;
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int16_t SetBitRateSafe(const int32_t rate);
|
||||
|
||||
int16_t EnableDTX();
|
||||
|
||||
int16_t DisableDTX();
|
||||
|
||||
AMR_encinst_t_* encoder_inst_ptr_;
|
||||
int16_t encoding_mode_;
|
||||
int16_t encoding_rate_;
|
||||
ACMAMRPackingFormat encoder_packing_format_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_AMR_H_
|
||||
320
jni/webrtc/modules/audio_coding/main/acm2/acm_amrwb.cc
Normal file
320
jni/webrtc/modules/audio_coding/main/acm2/acm_amrwb.cc
Normal file
@@ -0,0 +1,320 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_amrwb.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_AMRWB
|
||||
// NOTE! GSM AMR-wb is not included in the open-source package. The
|
||||
// following interface file is needed:
|
||||
#include "webrtc/modules/audio_coding/main/codecs/amrwb/interface/amrwb_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
// The API in the header file should match the one below.
|
||||
//
|
||||
// int16_t WebRtcAmrWb_CreateEnc(AMRWB_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcAmrWb_CreateDec(AMRWB_decinst_t_** dec_inst);
|
||||
// int16_t WebRtcAmrWb_FreeEnc(AMRWB_encinst_t_* enc_inst);
|
||||
// int16_t WebRtcAmrWb_FreeDec(AMRWB_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcAmrWb_Encode(AMRWB_encinst_t_* enc_inst, int16_t* input,
|
||||
// int16_t len, int16_t* output, int16_t mode);
|
||||
// int16_t WebRtcAmrWb_EncoderInit(AMRWB_encinst_t_* enc_inst,
|
||||
// int16_t dtx_mode);
|
||||
// int16_t WebRtcAmrWb_EncodeBitmode(AMRWB_encinst_t_* enc_inst,
|
||||
// int format);
|
||||
// int16_t WebRtcAmrWb_Decode(AMRWB_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcAmrWb_DecodePlc(AMRWB_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcAmrWb_DecoderInit(AMRWB_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcAmrWb_DecodeBitmode(AMRWB_decinst_t_* dec_inst,
|
||||
// int format);
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_AMRWB
|
||||
ACMAMRwb::ACMAMRwb(int16_t /* codec_id */)
|
||||
: encoder_inst_ptr_(NULL),
|
||||
encoding_mode_(-1), // invalid value
|
||||
encoding_rate_(0), // invalid value
|
||||
encoder_packing_format_(AMRBandwidthEfficient) {}
|
||||
|
||||
ACMAMRwb::~ACMAMRwb() {}
|
||||
|
||||
int16_t ACMAMRwb::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMAMRwb::EnableDTX() { return -1; }
|
||||
|
||||
int16_t ACMAMRwb::DisableDTX() { return -1; }
|
||||
|
||||
int16_t ACMAMRwb::InternalInitEncoder(
|
||||
WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMAMRwb::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMAMRwb::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMAMRwb::DestructEncoderSafe() { return; }
|
||||
|
||||
int16_t ACMAMRwb::SetBitRateSafe(const int32_t /* rate */) { return -1; }
|
||||
|
||||
void ACMAMRwb::InternalDestructEncoderInst(void* /* ptr_inst */) { return; }
|
||||
|
||||
int16_t ACMAMRwb::SetAMRwbEncoderPackingFormat(
|
||||
ACMAMRPackingFormat /* packing_format */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMAMRPackingFormat ACMAMRwb::AMRwbEncoderPackingFormat() const {
|
||||
return AMRUndefined;
|
||||
}
|
||||
|
||||
int16_t ACMAMRwb::SetAMRwbDecoderPackingFormat(
|
||||
ACMAMRPackingFormat /* packing_format */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMAMRPackingFormat ACMAMRwb::AMRwbDecoderPackingFormat() const {
|
||||
return AMRUndefined;
|
||||
}
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
|
||||
#define AMRWB_MODE_7k 0
|
||||
#define AMRWB_MODE_9k 1
|
||||
#define AMRWB_MODE_12k 2
|
||||
#define AMRWB_MODE_14k 3
|
||||
#define AMRWB_MODE_16k 4
|
||||
#define AMRWB_MODE_18k 5
|
||||
#define AMRWB_MODE_20k 6
|
||||
#define AMRWB_MODE_23k 7
|
||||
#define AMRWB_MODE_24k 8
|
||||
|
||||
ACMAMRwb::ACMAMRwb(int16_t codec_id)
|
||||
: encoder_inst_ptr_(NULL),
|
||||
encoding_mode_(-1), // invalid value
|
||||
encoding_rate_(0) { // invalid value
|
||||
codec_id_ = codec_id;
|
||||
has_internal_dtx_ = true;
|
||||
encoder_packing_format_ = AMRBandwidthEfficient;
|
||||
return;
|
||||
}
|
||||
|
||||
ACMAMRwb::~ACMAMRwb() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcAmrWb_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMAMRwb::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
int16_t vad_decision = 1;
|
||||
// sanity check, if the rate is set correctly. we might skip this
|
||||
// sanity check. if rate is not set correctly, initialization flag
|
||||
// should be false and should not be here.
|
||||
if ((encoding_mode_ < AMRWB_MODE_7k) || (encoding_mode_ > AMRWB_MODE_24k)) {
|
||||
*bitstream_len_byte = 0;
|
||||
return -1;
|
||||
}
|
||||
*bitstream_len_byte = WebRtcAmrWb_Encode(
|
||||
encoder_inst_ptr_, &in_audio_[in_audio_ix_read_], frame_len_smpl_,
|
||||
reinterpret_cast<int16_t*>(bitstream), encoding_mode_);
|
||||
|
||||
// Update VAD, if internal DTX is used
|
||||
if (has_internal_dtx_ && dtx_enabled_) {
|
||||
if (*bitstream_len_byte <= (7 * frame_len_smpl_ / 160)) {
|
||||
vad_decision = 0;
|
||||
}
|
||||
for (int16_t n = 0; n < MAX_FRAME_SIZE_10MSEC; n++) {
|
||||
vad_label_[n] = vad_decision;
|
||||
}
|
||||
}
|
||||
// increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer
|
||||
in_audio_ix_read_ += frame_len_smpl_;
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMAMRwb::EnableDTX() {
|
||||
if (dtx_enabled_) {
|
||||
return 0;
|
||||
} else if (encoder_exist_) { // check if encoder exist
|
||||
// enable DTX
|
||||
if (WebRtcAmrWb_EncoderInit(encoder_inst_ptr_, 1) < 0) {
|
||||
return -1;
|
||||
}
|
||||
dtx_enabled_ = true;
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int16_t ACMAMRwb::DisableDTX() {
|
||||
if (!dtx_enabled_) {
|
||||
return 0;
|
||||
} else if (encoder_exist_) { // check if encoder exist
|
||||
// disable DTX
|
||||
if (WebRtcAmrWb_EncoderInit(encoder_inst_ptr_, 0) < 0) {
|
||||
return -1;
|
||||
}
|
||||
dtx_enabled_ = false;
|
||||
return 0;
|
||||
} else {
|
||||
// encoder doesn't exists, therefore disabling is harmless
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int16_t ACMAMRwb::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
// sanity check
|
||||
if (encoder_inst_ptr_ == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t status = SetBitRateSafe((codec_params->codec_inst).rate);
|
||||
status += (WebRtcAmrWb_EncoderInit(encoder_inst_ptr_,
|
||||
((codec_params->enable_dtx) ? 1 : 0)) < 0)
|
||||
? -1
|
||||
: 0;
|
||||
status += (WebRtcAmrWb_EncodeBitmode(encoder_inst_ptr_,
|
||||
encoder_packing_format_) < 0)
|
||||
? -1
|
||||
: 0;
|
||||
return (status < 0) ? -1 : 0;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMAMRwb::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMAMRwb::InternalCreateEncoder() {
|
||||
return WebRtcAmrWb_CreateEnc(&encoder_inst_ptr_);
|
||||
}
|
||||
|
||||
void ACMAMRwb::DestructEncoderSafe() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcAmrWb_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
// there is no encoder set the following
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
encoding_mode_ = -1; // invalid value
|
||||
encoding_rate_ = 0;
|
||||
}
|
||||
|
||||
int16_t ACMAMRwb::SetBitRateSafe(const int32_t rate) {
|
||||
switch (rate) {
|
||||
case 7000: {
|
||||
encoding_mode_ = AMRWB_MODE_7k;
|
||||
encoding_rate_ = 7000;
|
||||
break;
|
||||
}
|
||||
case 9000: {
|
||||
encoding_mode_ = AMRWB_MODE_9k;
|
||||
encoding_rate_ = 9000;
|
||||
break;
|
||||
}
|
||||
case 12000: {
|
||||
encoding_mode_ = AMRWB_MODE_12k;
|
||||
encoding_rate_ = 12000;
|
||||
break;
|
||||
}
|
||||
case 14000: {
|
||||
encoding_mode_ = AMRWB_MODE_14k;
|
||||
encoding_rate_ = 14000;
|
||||
break;
|
||||
}
|
||||
case 16000: {
|
||||
encoding_mode_ = AMRWB_MODE_16k;
|
||||
encoding_rate_ = 16000;
|
||||
break;
|
||||
}
|
||||
case 18000: {
|
||||
encoding_mode_ = AMRWB_MODE_18k;
|
||||
encoding_rate_ = 18000;
|
||||
break;
|
||||
}
|
||||
case 20000: {
|
||||
encoding_mode_ = AMRWB_MODE_20k;
|
||||
encoding_rate_ = 20000;
|
||||
break;
|
||||
}
|
||||
case 23000: {
|
||||
encoding_mode_ = AMRWB_MODE_23k;
|
||||
encoding_rate_ = 23000;
|
||||
break;
|
||||
}
|
||||
case 24000: {
|
||||
encoding_mode_ = AMRWB_MODE_24k;
|
||||
encoding_rate_ = 24000;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMAMRwb::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
WebRtcAmrWb_FreeEnc(static_cast<AMRWB_encinst_t_*>(ptr_inst));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMAMRwb::SetAMRwbEncoderPackingFormat(
|
||||
ACMAMRPackingFormat packing_format) {
|
||||
if ((packing_format != AMRBandwidthEfficient) &&
|
||||
(packing_format != AMROctetAlligned) &&
|
||||
(packing_format != AMRFileStorage)) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Invalid AMRwb encoder packing-format.");
|
||||
return -1;
|
||||
} else {
|
||||
if (WebRtcAmrWb_EncodeBitmode(encoder_inst_ptr_, packing_format) < 0) {
|
||||
return -1;
|
||||
} else {
|
||||
encoder_packing_format_ = packing_format;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACMAMRPackingFormat ACMAMRwb::AMRwbEncoderPackingFormat() const {
|
||||
return encoder_packing_format_;
|
||||
}
|
||||
|
||||
int16_t ACMAMRwb::SetAMRwbDecoderPackingFormat(
|
||||
ACMAMRPackingFormat packing_format) {
|
||||
// Not implemented.
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMAMRPackingFormat ACMAMRwb::AMRwbDecoderPackingFormat() const {
|
||||
// Not implemented.
|
||||
return AMRUndefined;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
70
jni/webrtc/modules/audio_coding/main/acm2/acm_amrwb.h
Normal file
70
jni/webrtc/modules/audio_coding/main/acm2/acm_amrwb.h
Normal file
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_AMRWB_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_AMRWB_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
// forward declaration
|
||||
struct AMRWB_encinst_t_;
|
||||
struct AMRWB_decinst_t_;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMAMRwb : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMAMRwb(int16_t codec_id);
|
||||
~ACMAMRwb();
|
||||
|
||||
// for FEC
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream, int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
int16_t SetAMRwbEncoderPackingFormat(
|
||||
const ACMAMRPackingFormat packing_format);
|
||||
|
||||
ACMAMRPackingFormat AMRwbEncoderPackingFormat() const;
|
||||
|
||||
int16_t SetAMRwbDecoderPackingFormat(
|
||||
const ACMAMRPackingFormat packing_format);
|
||||
|
||||
ACMAMRPackingFormat AMRwbDecoderPackingFormat() const;
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int16_t SetBitRateSafe(const int32_t rate);
|
||||
|
||||
int16_t EnableDTX();
|
||||
|
||||
int16_t DisableDTX();
|
||||
|
||||
AMRWB_encinst_t_* encoder_inst_ptr_;
|
||||
|
||||
int16_t encoding_mode_;
|
||||
int16_t encoding_rate_;
|
||||
ACMAMRPackingFormat encoder_packing_format_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_AMRWB_H_
|
||||
195
jni/webrtc/modules/audio_coding/main/acm2/acm_celt.cc
Normal file
195
jni/webrtc/modules/audio_coding/main/acm2/acm_celt.cc
Normal file
@@ -0,0 +1,195 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_celt.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_CELT
|
||||
// NOTE! Celt is not included in the open-source package. Modify this file or
|
||||
// your codec API to match the function call and name of used CELT API file.
|
||||
#include "webrtc/modules/audio_coding/codecs/celt/include/celt_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_CELT
|
||||
|
||||
ACMCELT::ACMCELT(int16_t /* codec_id */)
|
||||
: enc_inst_ptr_(NULL),
|
||||
sampling_freq_(0),
|
||||
bitrate_(0),
|
||||
channels_(1) {
|
||||
return;
|
||||
}
|
||||
|
||||
ACMCELT::~ACMCELT() {
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMCELT::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMCELT::InternalInitEncoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMCELT::CreateInstance(void) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int16_t ACMCELT::InternalCreateEncoder() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ACMCELT::DestructEncoderSafe() {
|
||||
return;
|
||||
}
|
||||
|
||||
void ACMCELT::InternalDestructEncoderInst(void* /* ptr_inst */) {
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMCELT::SetBitRateSafe(const int32_t /*rate*/) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
|
||||
ACMCELT::ACMCELT(int16_t codec_id)
|
||||
: enc_inst_ptr_(NULL),
|
||||
sampling_freq_(32000), // Default sampling frequency.
|
||||
bitrate_(64000), // Default rate.
|
||||
channels_(1) { // Default send mono.
|
||||
// TODO(tlegrand): remove later when ACMGenericCodec has a new constructor.
|
||||
codec_id_ = codec_id;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
ACMCELT::~ACMCELT() {
|
||||
if (enc_inst_ptr_ != NULL) {
|
||||
WebRtcCelt_FreeEnc(enc_inst_ptr_);
|
||||
enc_inst_ptr_ = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMCELT::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
*bitstream_len_byte = 0;
|
||||
|
||||
// Call Encoder.
|
||||
*bitstream_len_byte = WebRtcCelt_Encode(enc_inst_ptr_,
|
||||
&in_audio_[in_audio_ix_read_],
|
||||
bitstream);
|
||||
|
||||
// Increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer.
|
||||
in_audio_ix_read_ += frame_len_smpl_ * channels_;
|
||||
|
||||
if (*bitstream_len_byte < 0) {
|
||||
// Error reported from the encoder.
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"InternalEncode: Encode error for Celt");
|
||||
*bitstream_len_byte = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMCELT::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
// Set bitrate and check that it is within the valid range.
|
||||
int16_t status = SetBitRateSafe((codec_params->codec_inst).rate);
|
||||
if (status < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// If number of channels changed we need to re-create memory.
|
||||
if (codec_params->codec_inst.channels != channels_) {
|
||||
WebRtcCelt_FreeEnc(enc_inst_ptr_);
|
||||
enc_inst_ptr_ = NULL;
|
||||
// Store new number of channels.
|
||||
channels_ = codec_params->codec_inst.channels;
|
||||
if (WebRtcCelt_CreateEnc(&enc_inst_ptr_, channels_) < 0) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Initiate encoder.
|
||||
if (WebRtcCelt_EncoderInit(enc_inst_ptr_, channels_, bitrate_) >= 0) {
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMCELT::CreateInstance(void) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int16_t ACMCELT::InternalCreateEncoder() {
|
||||
if (WebRtcCelt_CreateEnc(&enc_inst_ptr_, num_channels_) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"InternalCreateEncoder: create encoder failed for Celt");
|
||||
return -1;
|
||||
}
|
||||
channels_ = num_channels_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMCELT::DestructEncoderSafe() {
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
if (enc_inst_ptr_ != NULL) {
|
||||
WebRtcCelt_FreeEnc(enc_inst_ptr_);
|
||||
enc_inst_ptr_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void ACMCELT::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
WebRtcCelt_FreeEnc(static_cast<CELT_encinst_t*>(ptr_inst));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMCELT::SetBitRateSafe(const int32_t rate) {
|
||||
// Check that rate is in the valid range.
|
||||
if ((rate >= 48000) && (rate <= 128000)) {
|
||||
// Store new rate.
|
||||
bitrate_ = rate;
|
||||
|
||||
// Initiate encoder with new rate.
|
||||
if (WebRtcCelt_EncoderInit(enc_inst_ptr_, channels_, bitrate_) >= 0) {
|
||||
return 0;
|
||||
} else {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"SetBitRateSafe: Failed to initiate Celt with rate %d",
|
||||
rate);
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"SetBitRateSafe: Invalid rate Celt, %d", rate);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
54
jni/webrtc/modules/audio_coding/main/acm2/acm_celt.h
Normal file
54
jni/webrtc/modules/audio_coding/main/acm2/acm_celt.h
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CELT_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CELT_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
// forward declaration
|
||||
struct CELT_encinst_t_;
|
||||
struct CELT_decinst_t_;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMCELT : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMCELT(int16_t codec_id);
|
||||
~ACMCELT();
|
||||
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream, int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams *codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int16_t SetBitRateSafe(const int32_t rate);
|
||||
|
||||
CELT_encinst_t_* enc_inst_ptr_;
|
||||
uint16_t sampling_freq_;
|
||||
int32_t bitrate_;
|
||||
uint16_t channels_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CELT_H_
|
||||
83
jni/webrtc/modules/audio_coding/main/acm2/acm_cng.cc
Normal file
83
jni/webrtc/modules/audio_coding/main/acm2/acm_cng.cc
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_cng.h"
|
||||
|
||||
#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
ACMCNG::ACMCNG(int16_t codec_id) {
|
||||
encoder_inst_ptr_ = NULL;
|
||||
codec_id_ = codec_id;
|
||||
samp_freq_hz_ = ACMCodecDB::CodecFreq(codec_id_);
|
||||
return;
|
||||
}
|
||||
|
||||
ACMCNG::~ACMCNG() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcCng_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// CNG is not like a regular encoder, this function
|
||||
// should not be called normally
|
||||
// instead the following function is called from inside
|
||||
// ACMGenericCodec::ProcessFrameVADDTX
|
||||
int16_t ACMCNG::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// CNG is not like a regular encoder,
|
||||
// this function should not be called normally
|
||||
// instead the following function is called from inside
|
||||
// ACMGenericCodec::ProcessFrameVADDTX
|
||||
int16_t ACMCNG::InternalInitEncoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMCNG::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMCNG::InternalCreateEncoder() {
|
||||
if (WebRtcCng_CreateEnc(&encoder_inst_ptr_) < 0) {
|
||||
encoder_inst_ptr_ = NULL;
|
||||
return -1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void ACMCNG::DestructEncoderSafe() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcCng_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
}
|
||||
|
||||
void ACMCNG::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
WebRtcCng_FreeEnc(static_cast<CNG_enc_inst*>(ptr_inst));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
61
jni/webrtc/modules/audio_coding/main/acm2/acm_cng.h
Normal file
61
jni/webrtc/modules/audio_coding/main/acm2/acm_cng.h
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CNG_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CNG_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
// forward declaration
|
||||
struct WebRtcCngEncInst;
|
||||
struct WebRtcCngDecInst;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMCNG: public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMCNG(int16_t codec_id);
|
||||
~ACMCNG();
|
||||
|
||||
// for FEC
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams *codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe() OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int16_t EnableDTX() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t DisableDTX() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
WebRtcCngEncInst* encoder_inst_ptr_;
|
||||
uint16_t samp_freq_hz_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CNG_H_
|
||||
961
jni/webrtc/modules/audio_coding/main/acm2/acm_codec_database.cc
Normal file
961
jni/webrtc/modules/audio_coding/main/acm2/acm_codec_database.cc
Normal file
@@ -0,0 +1,961 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file generates databases with information about all supported audio
|
||||
* codecs.
|
||||
*/
|
||||
|
||||
// TODO(tlegrand): Change constant input pointers in all functions to constant
|
||||
// references, where appropriate.
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/interface/audio_decoder.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
// Includes needed to create the codecs.
|
||||
// G711, PCM mu-law and A-law
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_pcma.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_pcmu.h"
|
||||
#include "webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h"
|
||||
// CNG
|
||||
#include "webrtc/modules/audio_coding/codecs/cng/include/webrtc_cng.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_cng.h"
|
||||
#ifdef WEBRTC_CODEC_ISAC
|
||||
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_ISACFX
|
||||
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h"
|
||||
#endif
|
||||
#if (defined WEBRTC_CODEC_ISACFX) || (defined WEBRTC_CODEC_ISAC)
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_isac.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_isac_macros.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_PCM16
|
||||
#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_pcm16b.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_ILBC
|
||||
#include "webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_ilbc.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AMR
|
||||
#include "webrtc/modules/audio_coding/codecs/amr/include/amr_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_amr.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AMRWB
|
||||
#include "webrtc/modules/audio_coding/codecs/amrwb/include/amrwb_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_amrwb.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_CELT
|
||||
#include "webrtc/modules/audio_coding/codecs/celt/include/celt_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_celt.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722
|
||||
#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_g722.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722_1
|
||||
#include "webrtc/modules/audio_coding/codecs/g7221/include/g7221_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_g7221.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722_1C
|
||||
#include "webrtc/modules/audio_coding/codecs/g7221c/include/g7221c_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_g7221c.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G729
|
||||
#include "webrtc/modules/audio_coding/codecs/g729/include/g729_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_g729.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G729_1
|
||||
#include "webrtc/modules/audio_coding/codecs/g7291/include/g7291_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_g7291.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_GSMFR
|
||||
#include "webrtc/modules/audio_coding/codecs/gsmfr/include/gsmfr_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_gsmfr.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_OPUS
|
||||
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_opus.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_SPEEX
|
||||
#include "webrtc/modules/audio_coding/codecs/speex/include/speex_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_speex.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AVT
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_dtmf_playout.h"
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_RED
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_red.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
// Not yet used payload-types.
|
||||
// 83, 82, 81, 80, 79, 78, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68,
|
||||
// 67, 66, 65
|
||||
|
||||
const CodecInst ACMCodecDB::database_[] = {
|
||||
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
|
||||
{103, "ISAC", 16000, kIsacPacSize480, 1, kIsacWbDefaultRate},
|
||||
# if (defined(WEBRTC_CODEC_ISAC))
|
||||
{104, "ISAC", 32000, kIsacPacSize960, 1, kIsacSwbDefaultRate},
|
||||
{105, "ISAC", 48000, kIsacPacSize1440, 1, kIsacSwbDefaultRate},
|
||||
# endif
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_PCM16
|
||||
// Mono
|
||||
{107, "L16", 8000, 80, 1, 128000},
|
||||
{108, "L16", 16000, 160, 1, 256000},
|
||||
{109, "L16", 32000, 320, 1, 512000},
|
||||
// Stereo
|
||||
{111, "L16", 8000, 80, 2, 128000},
|
||||
{112, "L16", 16000, 160, 2, 256000},
|
||||
{113, "L16", 32000, 320, 2, 512000},
|
||||
#endif
|
||||
// G.711, PCM mu-law and A-law.
|
||||
// Mono
|
||||
{0, "PCMU", 8000, 160, 1, 64000},
|
||||
{8, "PCMA", 8000, 160, 1, 64000},
|
||||
// Stereo
|
||||
{110, "PCMU", 8000, 160, 2, 64000},
|
||||
{118, "PCMA", 8000, 160, 2, 64000},
|
||||
#ifdef WEBRTC_CODEC_ILBC
|
||||
{102, "ILBC", 8000, 240, 1, 13300},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AMR
|
||||
{114, "AMR", 8000, 160, 1, 12200},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AMRWB
|
||||
{115, "AMR-WB", 16000, 320, 1, 20000},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_CELT
|
||||
// Mono
|
||||
{116, "CELT", 32000, 640, 1, 64000},
|
||||
// Stereo
|
||||
{117, "CELT", 32000, 640, 2, 64000},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722
|
||||
// Mono
|
||||
{9, "G722", 16000, 320, 1, 64000},
|
||||
// Stereo
|
||||
{119, "G722", 16000, 320, 2, 64000},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722_1
|
||||
{92, "G7221", 16000, 320, 1, 32000},
|
||||
{91, "G7221", 16000, 320, 1, 24000},
|
||||
{90, "G7221", 16000, 320, 1, 16000},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722_1C
|
||||
{89, "G7221", 32000, 640, 1, 48000},
|
||||
{88, "G7221", 32000, 640, 1, 32000},
|
||||
{87, "G7221", 32000, 640, 1, 24000},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G729
|
||||
{18, "G729", 8000, 240, 1, 8000},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G729_1
|
||||
{86, "G7291", 16000, 320, 1, 32000},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_GSMFR
|
||||
{3, "GSM", 8000, 160, 1, 13200},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_OPUS
|
||||
// Opus internally supports 48, 24, 16, 12, 8 kHz.
|
||||
// Mono and stereo.
|
||||
{120, "opus", 48000, 960, 2, 64000},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_SPEEX
|
||||
{85, "speex", 8000, 160, 1, 11000},
|
||||
{84, "speex", 16000, 320, 1, 22000},
|
||||
#endif
|
||||
// Comfort noise for four different sampling frequencies.
|
||||
{13, "CN", 8000, 240, 1, 0},
|
||||
{98, "CN", 16000, 480, 1, 0},
|
||||
{99, "CN", 32000, 960, 1, 0},
|
||||
#ifdef ENABLE_48000_HZ
|
||||
{100, "CN", 48000, 1440, 1, 0},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AVT
|
||||
{106, "telephone-event", 8000, 240, 1, 0},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_RED
|
||||
{127, "red", 8000, 0, 1, 0},
|
||||
#endif
|
||||
// To prevent compile errors due to trailing commas.
|
||||
{-1, "Null", -1, -1, -1, -1}
|
||||
};
|
||||
|
||||
// Create database with all codec settings at compile time.
|
||||
// Each entry needs the following parameters in the given order:
|
||||
// Number of allowed packet sizes, a vector with the allowed packet sizes,
|
||||
// Basic block samples, max number of channels that are supported.
|
||||
const ACMCodecDB::CodecSettings ACMCodecDB::codec_settings_[] = {
|
||||
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
|
||||
{2, {kIsacPacSize480, kIsacPacSize960}, 0, 1, true},
|
||||
# if (defined(WEBRTC_CODEC_ISAC))
|
||||
{1, {kIsacPacSize960}, 0, 1, false},
|
||||
{1, {kIsacPacSize1440}, 0, 1, true},
|
||||
# endif
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_PCM16
|
||||
// Mono
|
||||
{4, {80, 160, 240, 320}, 0, 2, false},
|
||||
{4, {160, 320, 480, 640}, 0, 2, false},
|
||||
{2, {320, 640}, 0, 2, false},
|
||||
// Stereo
|
||||
{4, {80, 160, 240, 320}, 0, 2, false},
|
||||
{4, {160, 320, 480, 640}, 0, 2, false},
|
||||
{2, {320, 640}, 0, 2},
|
||||
#endif
|
||||
// G.711, PCM mu-law and A-law.
|
||||
// Mono
|
||||
{6, {80, 160, 240, 320, 400, 480}, 0, 2, false},
|
||||
{6, {80, 160, 240, 320, 400, 480}, 0, 2, false},
|
||||
// Stereo
|
||||
{6, {80, 160, 240, 320, 400, 480}, 0, 2, false},
|
||||
{6, {80, 160, 240, 320, 400, 480}, 0, 2, false},
|
||||
#ifdef WEBRTC_CODEC_ILBC
|
||||
{4, {160, 240, 320, 480}, 0, 1, false},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AMR
|
||||
{3, {160, 320, 480}, 0, 1, true},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AMRWB
|
||||
{3, {320, 640, 960}, 0, 1, true},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_CELT
|
||||
// Mono
|
||||
{1, {640}, 0, 2, false},
|
||||
// Stereo
|
||||
{1, {640}, 0, 2, false},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722
|
||||
// Mono
|
||||
{6, {160, 320, 480, 640, 800, 960}, 0, 2, false},
|
||||
// Stereo
|
||||
{6, {160, 320, 480, 640, 800, 960}, 0, 2, false},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722_1
|
||||
{1, {320}, 320, 1, false},
|
||||
{1, {320}, 320, 1, false},
|
||||
{1, {320}, 320, 1, false},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722_1C
|
||||
{1, {640}, 640, 1, false},
|
||||
{1, {640}, 640, 1, false},
|
||||
{1, {640}, 640, 1, false},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G729
|
||||
{6, {80, 160, 240, 320, 400, 480}, 0, 1, false},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G729_1
|
||||
{3, {320, 640, 960}, 0, 1, false},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_GSMFR
|
||||
{3, {160, 320, 480}, 160, 1, false},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_OPUS
|
||||
// Opus supports frames shorter than 10ms,
|
||||
// but it doesn't help us to use them.
|
||||
// Mono and stereo.
|
||||
{4, {480, 960, 1920, 2880}, 0, 2, false},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_SPEEX
|
||||
{3, {160, 320, 480}, 0, 1, false},
|
||||
{3, {320, 640, 960}, 0, 1, false},
|
||||
#endif
|
||||
// Comfort noise for three different sampling frequencies.
|
||||
{1, {240}, 240, 1, false},
|
||||
{1, {480}, 480, 1, false},
|
||||
{1, {960}, 960, 1, false},
|
||||
#ifdef ENABLE_48000_HZ
|
||||
{1, {1440}, 1440, 1, false},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AVT
|
||||
{1, {240}, 240, 1, false},
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_RED
|
||||
{1, {0}, 0, 1, false},
|
||||
#endif
|
||||
// To prevent compile errors due to trailing commas.
|
||||
{-1, {-1}, -1, -1, false}
|
||||
};
|
||||
|
||||
// Create a database of all NetEQ decoders at compile time.
|
||||
const NetEqDecoder ACMCodecDB::neteq_decoders_[] = {
|
||||
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
|
||||
kDecoderISAC,
|
||||
# if (defined(WEBRTC_CODEC_ISAC))
|
||||
kDecoderISACswb,
|
||||
kDecoderISACfb,
|
||||
# endif
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_PCM16
|
||||
// Mono
|
||||
kDecoderPCM16B,
|
||||
kDecoderPCM16Bwb,
|
||||
kDecoderPCM16Bswb32kHz,
|
||||
// Stereo
|
||||
kDecoderPCM16B_2ch,
|
||||
kDecoderPCM16Bwb_2ch,
|
||||
kDecoderPCM16Bswb32kHz_2ch,
|
||||
#endif
|
||||
// G.711, PCM mu-las and A-law.
|
||||
// Mono
|
||||
kDecoderPCMu,
|
||||
kDecoderPCMa,
|
||||
// Stereo
|
||||
kDecoderPCMu_2ch,
|
||||
kDecoderPCMa_2ch,
|
||||
#ifdef WEBRTC_CODEC_ILBC
|
||||
kDecoderILBC,
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AMR
|
||||
kDecoderAMR,
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AMRWB
|
||||
kDecoderAMRWB,
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_CELT
|
||||
// Mono
|
||||
kDecoderCELT_32,
|
||||
// Stereo
|
||||
kDecoderCELT_32_2ch,
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722
|
||||
// Mono
|
||||
kDecoderG722,
|
||||
// Stereo
|
||||
kDecoderG722_2ch,
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722_1
|
||||
kDecoderG722_1_32,
|
||||
kDecoderG722_1_24,
|
||||
kDecoderG722_1_16,
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722_1C
|
||||
kDecoderG722_1C_48,
|
||||
kDecoderG722_1C_32,
|
||||
kDecoderG722_1C_24,
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G729
|
||||
kDecoderG729,
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G729_1
|
||||
kDecoderG729_1,
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_GSMFR
|
||||
kDecoderGSMFR,
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_OPUS
|
||||
// Mono and stereo.
|
||||
kDecoderOpus,
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_SPEEX
|
||||
kDecoderSPEEX_8,
|
||||
kDecoderSPEEX_16,
|
||||
#endif
|
||||
// Comfort noise for three different sampling frequencies.
|
||||
kDecoderCNGnb,
|
||||
kDecoderCNGwb,
|
||||
kDecoderCNGswb32kHz
|
||||
#ifdef ENABLE_48000_HZ
|
||||
, kDecoderCNGswb48kHz
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AVT
|
||||
, kDecoderAVT
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_RED
|
||||
, kDecoderRED
|
||||
#endif
|
||||
};
|
||||
|
||||
// Get codec information from database.
|
||||
// TODO(tlegrand): replace memcpy with a pointer to the data base memory.
|
||||
int ACMCodecDB::Codec(int codec_id, CodecInst* codec_inst) {
|
||||
// Error check to see that codec_id is not out of bounds.
|
||||
if ((codec_id < 0) || (codec_id >= kNumCodecs)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Copy database information for the codec to the output.
|
||||
memcpy(codec_inst, &database_[codec_id], sizeof(CodecInst));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Enumerator for error codes when asking for codec database id.
|
||||
enum {
|
||||
kInvalidCodec = -10,
|
||||
kInvalidPayloadtype = -30,
|
||||
kInvalidPacketSize = -40,
|
||||
kInvalidRate = -50
|
||||
};
|
||||
|
||||
// Gets the codec id number from the database. If there is some mismatch in
|
||||
// the codec settings, the function will return an error code.
|
||||
// NOTE! The first mismatch found will generate the return value.
|
||||
int ACMCodecDB::CodecNumber(const CodecInst& codec_inst, int* mirror_id) {
|
||||
// Look for a matching codec in the database.
|
||||
int codec_id = CodecId(codec_inst);
|
||||
|
||||
// Checks if we found a matching codec.
|
||||
if (codec_id == -1) {
|
||||
return kInvalidCodec;
|
||||
}
|
||||
|
||||
// Checks the validity of payload type
|
||||
if (!ValidPayloadType(codec_inst.pltype)) {
|
||||
return kInvalidPayloadtype;
|
||||
}
|
||||
|
||||
// Comfort Noise is special case, packet-size & rate is not checked.
|
||||
if (STR_CASE_CMP(database_[codec_id].plname, "CN") == 0) {
|
||||
*mirror_id = codec_id;
|
||||
return codec_id;
|
||||
}
|
||||
|
||||
// RED is special case, packet-size & rate is not checked.
|
||||
if (STR_CASE_CMP(database_[codec_id].plname, "red") == 0) {
|
||||
*mirror_id = codec_id;
|
||||
return codec_id;
|
||||
}
|
||||
|
||||
// Checks the validity of packet size.
|
||||
if (codec_settings_[codec_id].num_packet_sizes > 0) {
|
||||
bool packet_size_ok = false;
|
||||
int i;
|
||||
int packet_size_samples;
|
||||
for (i = 0; i < codec_settings_[codec_id].num_packet_sizes; i++) {
|
||||
packet_size_samples =
|
||||
codec_settings_[codec_id].packet_sizes_samples[i];
|
||||
if (codec_inst.pacsize == packet_size_samples) {
|
||||
packet_size_ok = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!packet_size_ok) {
|
||||
return kInvalidPacketSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (codec_inst.pacsize < 1) {
|
||||
return kInvalidPacketSize;
|
||||
}
|
||||
|
||||
// Check the validity of rate. Codecs with multiple rates have their own
|
||||
// function for this.
|
||||
*mirror_id = codec_id;
|
||||
if (STR_CASE_CMP("isac", codec_inst.plname) == 0) {
|
||||
if (IsISACRateValid(codec_inst.rate)) {
|
||||
// Set mirrorID to iSAC WB which is only created once to be used both for
|
||||
// iSAC WB and SWB, because they need to share struct.
|
||||
*mirror_id = kISAC;
|
||||
return codec_id;
|
||||
} else {
|
||||
return kInvalidRate;
|
||||
}
|
||||
} else if (STR_CASE_CMP("ilbc", codec_inst.plname) == 0) {
|
||||
return IsILBCRateValid(codec_inst.rate, codec_inst.pacsize)
|
||||
? codec_id : kInvalidRate;
|
||||
} else if (STR_CASE_CMP("amr", codec_inst.plname) == 0) {
|
||||
return IsAMRRateValid(codec_inst.rate)
|
||||
? codec_id : kInvalidRate;
|
||||
} else if (STR_CASE_CMP("amr-wb", codec_inst.plname) == 0) {
|
||||
return IsAMRwbRateValid(codec_inst.rate)
|
||||
? codec_id : kInvalidRate;
|
||||
} else if (STR_CASE_CMP("g7291", codec_inst.plname) == 0) {
|
||||
return IsG7291RateValid(codec_inst.rate)
|
||||
? codec_id : kInvalidRate;
|
||||
} else if (STR_CASE_CMP("opus", codec_inst.plname) == 0) {
|
||||
return IsOpusRateValid(codec_inst.rate)
|
||||
? codec_id : kInvalidRate;
|
||||
} else if (STR_CASE_CMP("speex", codec_inst.plname) == 0) {
|
||||
return IsSpeexRateValid(codec_inst.rate)
|
||||
? codec_id : kInvalidRate;
|
||||
} else if (STR_CASE_CMP("celt", codec_inst.plname) == 0) {
|
||||
return IsCeltRateValid(codec_inst.rate)
|
||||
? codec_id : kInvalidRate;
|
||||
}
|
||||
|
||||
return IsRateValid(codec_id, codec_inst.rate) ?
|
||||
codec_id : kInvalidRate;
|
||||
}
|
||||
|
||||
// Looks for a matching payload name, frequency, and channels in the
|
||||
// codec list. Need to check all three since some codecs have several codec
|
||||
// entries with different frequencies and/or channels.
|
||||
// Does not check other codec settings, such as payload type and packet size.
|
||||
// Returns the id of the codec, or -1 if no match is found.
|
||||
int ACMCodecDB::CodecId(const CodecInst& codec_inst) {
|
||||
return (CodecId(codec_inst.plname, codec_inst.plfreq,
|
||||
codec_inst.channels));
|
||||
}
|
||||
|
||||
int ACMCodecDB::CodecId(const char* payload_name, int frequency, int channels) {
|
||||
for (int id = 0; id < kNumCodecs; id++) {
|
||||
bool name_match = false;
|
||||
bool frequency_match = false;
|
||||
bool channels_match = false;
|
||||
|
||||
// Payload name, sampling frequency and number of channels need to match.
|
||||
// NOTE! If |frequency| is -1, the frequency is not applicable, and is
|
||||
// always treated as true, like for RED.
|
||||
name_match = (STR_CASE_CMP(database_[id].plname, payload_name) == 0);
|
||||
frequency_match = (frequency == database_[id].plfreq) || (frequency == -1);
|
||||
// The number of channels must match for all codecs but Opus.
|
||||
if (STR_CASE_CMP(payload_name, "opus") != 0) {
|
||||
channels_match = (channels == database_[id].channels);
|
||||
} else {
|
||||
// For opus we just check that number of channels is valid.
|
||||
channels_match = (channels == 1 || channels == 2);
|
||||
}
|
||||
|
||||
if (name_match && frequency_match && channels_match) {
|
||||
// We have found a matching codec in the list.
|
||||
return id;
|
||||
}
|
||||
}
|
||||
|
||||
// We didn't find a matching codec.
|
||||
return -1;
|
||||
}
|
||||
// Gets codec id number, and mirror id, from database for the receiver.
|
||||
int ACMCodecDB::ReceiverCodecNumber(const CodecInst& codec_inst,
|
||||
int* mirror_id) {
|
||||
// Look for a matching codec in the database.
|
||||
int codec_id = CodecId(codec_inst);
|
||||
|
||||
// Set |mirror_id| to |codec_id|, except for iSAC. In case of iSAC we always
|
||||
// set |mirror_id| to iSAC WB (kISAC) which is only created once to be used
|
||||
// both for iSAC WB and SWB, because they need to share struct.
|
||||
if (STR_CASE_CMP(codec_inst.plname, "ISAC") != 0) {
|
||||
*mirror_id = codec_id;
|
||||
} else {
|
||||
*mirror_id = kISAC;
|
||||
}
|
||||
|
||||
return codec_id;
|
||||
}
|
||||
|
||||
// Returns the codec sampling frequency for codec with id = "codec_id" in
|
||||
// database.
|
||||
int ACMCodecDB::CodecFreq(int codec_id) {
|
||||
// Error check to see that codec_id is not out of bounds.
|
||||
if (codec_id < 0 || codec_id >= kNumCodecs) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return database_[codec_id].plfreq;
|
||||
}
|
||||
|
||||
// Returns the codec's basic coding block size in samples.
|
||||
int ACMCodecDB::BasicCodingBlock(int codec_id) {
|
||||
// Error check to see that codec_id is not out of bounds.
|
||||
if (codec_id < 0 || codec_id >= kNumCodecs) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return codec_settings_[codec_id].basic_block_samples;
|
||||
}
|
||||
|
||||
// Returns the NetEQ decoder database.
|
||||
const NetEqDecoder* ACMCodecDB::NetEQDecoders() {
|
||||
return neteq_decoders_;
|
||||
}
|
||||
|
||||
// Gets mirror id. The Id is used for codecs sharing struct for settings that
|
||||
// need different payload types.
|
||||
int ACMCodecDB::MirrorID(int codec_id) {
|
||||
if (STR_CASE_CMP(database_[codec_id].plname, "isac") == 0) {
|
||||
return kISAC;
|
||||
} else {
|
||||
return codec_id;
|
||||
}
|
||||
}
|
||||
|
||||
// Creates memory/instance for storing codec state.
|
||||
ACMGenericCodec* ACMCodecDB::CreateCodecInstance(const CodecInst& codec_inst) {
|
||||
// All we have support for right now.
|
||||
if (!STR_CASE_CMP(codec_inst.plname, "ISAC")) {
|
||||
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
|
||||
return new ACMISAC(kISAC);
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "PCMU")) {
|
||||
if (codec_inst.channels == 1) {
|
||||
return new ACMPCMU(kPCMU);
|
||||
} else {
|
||||
return new ACMPCMU(kPCMU_2ch);
|
||||
}
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "PCMA")) {
|
||||
if (codec_inst.channels == 1) {
|
||||
return new ACMPCMA(kPCMA);
|
||||
} else {
|
||||
return new ACMPCMA(kPCMA_2ch);
|
||||
}
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "ILBC")) {
|
||||
#ifdef WEBRTC_CODEC_ILBC
|
||||
return new ACMILBC(kILBC);
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "AMR")) {
|
||||
#ifdef WEBRTC_CODEC_AMR
|
||||
return new ACMAMR(kGSMAMR);
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "AMR-WB")) {
|
||||
#ifdef WEBRTC_CODEC_AMRWB
|
||||
return new ACMAMRwb(kGSMAMRWB);
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "CELT")) {
|
||||
#ifdef WEBRTC_CODEC_CELT
|
||||
if (codec_inst.channels == 1) {
|
||||
return new ACMCELT(kCELT32);
|
||||
} else {
|
||||
return new ACMCELT(kCELT32_2ch);
|
||||
}
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "G722")) {
|
||||
#ifdef WEBRTC_CODEC_G722
|
||||
if (codec_inst.channels == 1) {
|
||||
return new ACMG722(kG722);
|
||||
} else {
|
||||
return new ACMG722(kG722_2ch);
|
||||
}
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "G7221")) {
|
||||
switch (codec_inst.plfreq) {
|
||||
case 16000: {
|
||||
#ifdef WEBRTC_CODEC_G722_1
|
||||
int codec_id;
|
||||
switch (codec_inst->rate) {
|
||||
case 16000 : {
|
||||
codec_id = kG722_1_16;
|
||||
break;
|
||||
}
|
||||
case 24000 : {
|
||||
codec_id = kG722_1_24;
|
||||
break;
|
||||
}
|
||||
case 32000 : {
|
||||
codec_id = kG722_1_32;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
return NULL;
|
||||
}
|
||||
return new ACMG722_1(codec_id);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
case 32000: {
|
||||
#ifdef WEBRTC_CODEC_G722_1C
|
||||
int codec_id;
|
||||
switch (codec_inst->rate) {
|
||||
case 24000 : {
|
||||
codec_id = kG722_1C_24;
|
||||
break;
|
||||
}
|
||||
case 32000 : {
|
||||
codec_id = kG722_1C_32;
|
||||
break;
|
||||
}
|
||||
case 48000 : {
|
||||
codec_id = kG722_1C_48;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
return NULL;
|
||||
}
|
||||
return new ACMG722_1C(codec_id);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "CN")) {
|
||||
// For CN we need to check sampling frequency to know what codec to create.
|
||||
int codec_id;
|
||||
switch (codec_inst.plfreq) {
|
||||
case 8000: {
|
||||
codec_id = kCNNB;
|
||||
break;
|
||||
}
|
||||
case 16000: {
|
||||
codec_id = kCNWB;
|
||||
break;
|
||||
}
|
||||
case 32000: {
|
||||
codec_id = kCNSWB;
|
||||
break;
|
||||
}
|
||||
#ifdef ENABLE_48000_HZ
|
||||
case 48000: {
|
||||
codec_id = kCNFB;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
default: {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
return new ACMCNG(codec_id);
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "G729")) {
|
||||
#ifdef WEBRTC_CODEC_G729
|
||||
return new ACMG729(kG729);
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "G7291")) {
|
||||
#ifdef WEBRTC_CODEC_G729_1
|
||||
return new ACMG729_1(kG729_1);
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "opus")) {
|
||||
#ifdef WEBRTC_CODEC_OPUS
|
||||
return new ACMOpus(kOpus);
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "speex")) {
|
||||
#ifdef WEBRTC_CODEC_SPEEX
|
||||
int codec_id;
|
||||
switch (codec_inst->plfreq) {
|
||||
case 8000: {
|
||||
codec_id = kSPEEX8;
|
||||
break;
|
||||
}
|
||||
case 16000: {
|
||||
codec_id = kSPEEX16;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
return new ACMSPEEX(codec_id);
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "CN")) {
|
||||
// For CN we need to check sampling frequency to know what codec to create.
|
||||
int codec_id;
|
||||
switch (codec_inst.plfreq) {
|
||||
case 8000: {
|
||||
codec_id = kCNNB;
|
||||
break;
|
||||
}
|
||||
case 16000: {
|
||||
codec_id = kCNWB;
|
||||
break;
|
||||
}
|
||||
case 32000: {
|
||||
codec_id = kCNSWB;
|
||||
break;
|
||||
}
|
||||
#ifdef ENABLE_48000_HZ
|
||||
case 48000: {
|
||||
codec_id = kCNFB;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
default: {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
return new ACMCNG(codec_id);
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "L16")) {
|
||||
#ifdef WEBRTC_CODEC_PCM16
|
||||
// For L16 we need to check sampling frequency to know what codec to create.
|
||||
int codec_id;
|
||||
if (codec_inst.channels == 1) {
|
||||
switch (codec_inst.plfreq) {
|
||||
case 8000: {
|
||||
codec_id = kPCM16B;
|
||||
break;
|
||||
}
|
||||
case 16000: {
|
||||
codec_id = kPCM16Bwb;
|
||||
break;
|
||||
}
|
||||
case 32000: {
|
||||
codec_id = kPCM16Bswb32kHz;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch (codec_inst.plfreq) {
|
||||
case 8000: {
|
||||
codec_id = kPCM16B_2ch;
|
||||
break;
|
||||
}
|
||||
case 16000: {
|
||||
codec_id = kPCM16Bwb_2ch;
|
||||
break;
|
||||
}
|
||||
case 32000: {
|
||||
codec_id = kPCM16Bswb32kHz_2ch;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
return new ACMPCM16B(codec_id);
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "telephone-event")) {
|
||||
#ifdef WEBRTC_CODEC_AVT
|
||||
return new ACMDTMFPlayout(kAVT);
|
||||
#endif
|
||||
} else if (!STR_CASE_CMP(codec_inst.plname, "red")) {
|
||||
#ifdef WEBRTC_CODEC_RED
|
||||
return new ACMRED(kRED);
|
||||
#endif
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Checks if the bitrate is valid for the codec.
|
||||
bool ACMCodecDB::IsRateValid(int codec_id, int rate) {
|
||||
if (database_[codec_id].rate == rate) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the bitrate is valid for iSAC.
|
||||
bool ACMCodecDB::IsISACRateValid(int rate) {
|
||||
if ((rate == -1) || ((rate <= 56000) && (rate >= 10000))) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the bitrate is valid for iLBC.
|
||||
bool ACMCodecDB::IsILBCRateValid(int rate, int frame_size_samples) {
|
||||
if (((frame_size_samples == 240) || (frame_size_samples == 480)) &&
|
||||
(rate == 13300)) {
|
||||
return true;
|
||||
} else if (((frame_size_samples == 160) || (frame_size_samples == 320)) &&
|
||||
(rate == 15200)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the bitrate is valid for the GSM-AMR.
|
||||
bool ACMCodecDB::IsAMRRateValid(int rate) {
|
||||
switch (rate) {
|
||||
case 4750:
|
||||
case 5150:
|
||||
case 5900:
|
||||
case 6700:
|
||||
case 7400:
|
||||
case 7950:
|
||||
case 10200:
|
||||
case 12200: {
|
||||
return true;
|
||||
}
|
||||
default: {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the bitrate is valid for GSM-AMR-WB.
|
||||
bool ACMCodecDB::IsAMRwbRateValid(int rate) {
|
||||
switch (rate) {
|
||||
case 7000:
|
||||
case 9000:
|
||||
case 12000:
|
||||
case 14000:
|
||||
case 16000:
|
||||
case 18000:
|
||||
case 20000:
|
||||
case 23000:
|
||||
case 24000: {
|
||||
return true;
|
||||
}
|
||||
default: {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the bitrate is valid for G.729.1.
|
||||
bool ACMCodecDB::IsG7291RateValid(int rate) {
|
||||
switch (rate) {
|
||||
case 8000:
|
||||
case 12000:
|
||||
case 14000:
|
||||
case 16000:
|
||||
case 18000:
|
||||
case 20000:
|
||||
case 22000:
|
||||
case 24000:
|
||||
case 26000:
|
||||
case 28000:
|
||||
case 30000:
|
||||
case 32000: {
|
||||
return true;
|
||||
}
|
||||
default: {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the bitrate is valid for Speex.
|
||||
bool ACMCodecDB::IsSpeexRateValid(int rate) {
|
||||
if (rate > 2000) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the bitrate is valid for Opus.
|
||||
bool ACMCodecDB::IsOpusRateValid(int rate) {
|
||||
if ((rate < 6000) || (rate > 510000)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Checks if the bitrate is valid for Celt.
|
||||
bool ACMCodecDB::IsCeltRateValid(int rate) {
|
||||
if ((rate >= 48000) && (rate <= 128000)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the payload type is in the valid range.
|
||||
bool ACMCodecDB::ValidPayloadType(int payload_type) {
|
||||
if ((payload_type < 0) || (payload_type > 127)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ACMCodecDB::OwnsDecoder(int codec_id) {
|
||||
assert(codec_id >= 0 && codec_id < ACMCodecDB::kNumCodecs);
|
||||
return ACMCodecDB::codec_settings_[codec_id].owns_decoder;
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
359
jni/webrtc/modules/audio_coding/main/acm2/acm_codec_database.h
Normal file
359
jni/webrtc/modules/audio_coding/main/acm2/acm_codec_database.h
Normal file
@@ -0,0 +1,359 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file generates databases with information about all supported audio
|
||||
* codecs.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CODEC_DATABASE_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CODEC_DATABASE_H_
|
||||
|
||||
#include "webrtc/common_types.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
// TODO(tlegrand): replace class ACMCodecDB with a namespace.
|
||||
class ACMCodecDB {
|
||||
public:
|
||||
// Enum with array indexes for the supported codecs. NOTE! The order MUST
|
||||
// be the same as when creating the database in acm_codec_database.cc.
|
||||
enum {
|
||||
kNone = -1
|
||||
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
|
||||
, kISAC
|
||||
# if (defined(WEBRTC_CODEC_ISAC))
|
||||
, kISACSWB
|
||||
, kISACFB
|
||||
# endif
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_PCM16
|
||||
// Mono
|
||||
, kPCM16B
|
||||
, kPCM16Bwb
|
||||
, kPCM16Bswb32kHz
|
||||
// Stereo
|
||||
, kPCM16B_2ch
|
||||
, kPCM16Bwb_2ch
|
||||
, kPCM16Bswb32kHz_2ch
|
||||
#endif
|
||||
// Mono
|
||||
, kPCMU
|
||||
, kPCMA
|
||||
// Stereo
|
||||
, kPCMU_2ch
|
||||
, kPCMA_2ch
|
||||
#ifdef WEBRTC_CODEC_ILBC
|
||||
, kILBC
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AMR
|
||||
, kGSMAMR
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AMRWB
|
||||
, kGSMAMRWB
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_CELT
|
||||
// Mono
|
||||
, kCELT32
|
||||
// Stereo
|
||||
, kCELT32_2ch
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722
|
||||
// Mono
|
||||
, kG722
|
||||
// Stereo
|
||||
, kG722_2ch
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722_1
|
||||
, kG722_1_32
|
||||
, kG722_1_24
|
||||
, kG722_1_16
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G722_1C
|
||||
, kG722_1C_48
|
||||
, kG722_1C_32
|
||||
, kG722_1C_24
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G729
|
||||
, kG729
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_G729_1
|
||||
, kG729_1
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_GSMFR
|
||||
, kGSMFR
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_OPUS
|
||||
// Mono and stereo
|
||||
, kOpus
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_SPEEX
|
||||
, kSPEEX8
|
||||
, kSPEEX16
|
||||
#endif
|
||||
, kCNNB
|
||||
, kCNWB
|
||||
, kCNSWB
|
||||
#ifdef ENABLE_48000_HZ
|
||||
, kCNFB
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_AVT
|
||||
, kAVT
|
||||
#endif
|
||||
#ifdef WEBRTC_CODEC_RED
|
||||
, kRED
|
||||
#endif
|
||||
, kNumCodecs
|
||||
};
|
||||
|
||||
// Set unsupported codecs to -1
|
||||
#ifndef WEBRTC_CODEC_ISAC
|
||||
enum {kISACSWB = -1};
|
||||
enum {kISACFB = -1};
|
||||
# ifndef WEBRTC_CODEC_ISACFX
|
||||
enum {kISAC = -1};
|
||||
# endif
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_PCM16
|
||||
// Mono
|
||||
enum {kPCM16B = -1};
|
||||
enum {kPCM16Bwb = -1};
|
||||
enum {kPCM16Bswb32kHz = -1};
|
||||
// Stereo
|
||||
enum {kPCM16B_2ch = -1};
|
||||
enum {kPCM16Bwb_2ch = -1};
|
||||
enum {kPCM16Bswb32kHz_2ch = -1};
|
||||
#endif
|
||||
// 48 kHz not supported, always set to -1.
|
||||
enum {kPCM16Bswb48kHz = -1};
|
||||
#ifndef WEBRTC_CODEC_ILBC
|
||||
enum {kILBC = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_AMR
|
||||
enum {kGSMAMR = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_AMRWB
|
||||
enum {kGSMAMRWB = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_CELT
|
||||
// Mono
|
||||
enum {kCELT32 = -1};
|
||||
// Stereo
|
||||
enum {kCELT32_2ch = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_G722
|
||||
// Mono
|
||||
enum {kG722 = -1};
|
||||
// Stereo
|
||||
enum {kG722_2ch = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_G722_1
|
||||
enum {kG722_1_32 = -1};
|
||||
enum {kG722_1_24 = -1};
|
||||
enum {kG722_1_16 = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_G722_1C
|
||||
enum {kG722_1C_48 = -1};
|
||||
enum {kG722_1C_32 = -1};
|
||||
enum {kG722_1C_24 = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_G729
|
||||
enum {kG729 = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_G729_1
|
||||
enum {kG729_1 = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_GSMFR
|
||||
enum {kGSMFR = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_SPEEX
|
||||
enum {kSPEEX8 = -1};
|
||||
enum {kSPEEX16 = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_OPUS
|
||||
// Mono and stereo
|
||||
enum {kOpus = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_AVT
|
||||
enum {kAVT = -1};
|
||||
#endif
|
||||
#ifndef WEBRTC_CODEC_RED
|
||||
enum {kRED = -1};
|
||||
#endif
|
||||
#ifndef ENABLE_48000_HZ
|
||||
enum { kCNFB = -1 };
|
||||
#endif
|
||||
|
||||
// kMaxNumCodecs - Maximum number of codecs that can be activated in one
|
||||
// build.
|
||||
// kMaxNumPacketSize - Maximum number of allowed packet sizes for one codec.
|
||||
// These might need to be increased if adding a new codec to the database
|
||||
static const int kMaxNumCodecs = 50;
|
||||
static const int kMaxNumPacketSize = 6;
|
||||
|
||||
// Codec specific settings
|
||||
//
|
||||
// num_packet_sizes - number of allowed packet sizes.
|
||||
// packet_sizes_samples - list of the allowed packet sizes.
|
||||
// basic_block_samples - assigned a value different from 0 if the codec
|
||||
// requires to be fed with a specific number of samples
|
||||
// that can be different from packet size.
|
||||
// channel_support - number of channels supported to encode;
|
||||
// 1 = mono, 2 = stereo, etc.
|
||||
// owns_decoder - if true, it means that the codec should own the
|
||||
// decoder instance. In this case, the codec should
|
||||
// implement ACMGenericCodec::Decoder(), which returns
|
||||
// a pointer to AudioDecoder. This pointer is injected
|
||||
// into NetEq when this codec is registered as receive
|
||||
// codec.
|
||||
struct CodecSettings {
|
||||
int num_packet_sizes;
|
||||
int packet_sizes_samples[kMaxNumPacketSize];
|
||||
int basic_block_samples;
|
||||
int channel_support;
|
||||
bool owns_decoder;
|
||||
};
|
||||
|
||||
// Gets codec information from database at the position in database given by
|
||||
// [codec_id].
|
||||
// Input:
|
||||
// [codec_id] - number that specifies at what position in the database to
|
||||
// get the information.
|
||||
// Output:
|
||||
// [codec_inst] - filled with information about the codec.
|
||||
// Return:
|
||||
// 0 if successful, otherwise -1.
|
||||
static int Codec(int codec_id, CodecInst* codec_inst);
|
||||
|
||||
// Returns codec id and mirror id from database, given the information
|
||||
// received in the input [codec_inst]. Mirror id is a number that tells
|
||||
// where to find the codec's memory (instance). The number is either the
|
||||
// same as codec id (most common), or a number pointing at a different
|
||||
// entry in the database, if the codec has several entries with different
|
||||
// payload types. This is used for codecs that must share one struct even if
|
||||
// the payload type differs.
|
||||
// One example is the codec iSAC which has the same struct for both 16 and
|
||||
// 32 khz, but they have different entries in the database. Let's say the
|
||||
// function is called with iSAC 32kHz. The function will return 1 as that is
|
||||
// the entry in the data base, and [mirror_id] = 0, as that is the entry for
|
||||
// iSAC 16 kHz, which holds the shared memory.
|
||||
// Input:
|
||||
// [codec_inst] - Information about the codec for which we require the
|
||||
// database id.
|
||||
// Output:
|
||||
// [mirror_id] - mirror id, which most often is the same as the return
|
||||
// value, see above.
|
||||
// [err_message] - if present, in the event of a mismatch found between the
|
||||
// input and the database, a descriptive error message is
|
||||
// written here.
|
||||
// [err_message] - if present, the length of error message is returned here.
|
||||
// Return:
|
||||
// codec id if successful, otherwise < 0.
|
||||
static int CodecNumber(const CodecInst& codec_inst, int* mirror_id,
|
||||
char* err_message, int max_message_len_byte);
|
||||
static int CodecNumber(const CodecInst& codec_inst, int* mirror_id);
|
||||
static int CodecId(const CodecInst& codec_inst);
|
||||
static int CodecId(const char* payload_name, int frequency, int channels);
|
||||
static int ReceiverCodecNumber(const CodecInst& codec_inst, int* mirror_id);
|
||||
|
||||
// Returns the codec sampling frequency for codec with id = "codec_id" in
|
||||
// database.
|
||||
// TODO(tlegrand): Check if function is needed, or if we can change
|
||||
// to access database directly.
|
||||
// Input:
|
||||
// [codec_id] - number that specifies at what position in the database to
|
||||
// get the information.
|
||||
// Return:
|
||||
// codec sampling frequency if successful, otherwise -1.
|
||||
static int CodecFreq(int codec_id);
|
||||
|
||||
// Return the codec's basic coding block size in samples.
|
||||
// TODO(tlegrand): Check if function is needed, or if we can change
|
||||
// to access database directly.
|
||||
// Input:
|
||||
// [codec_id] - number that specifies at what position in the database to
|
||||
// get the information.
|
||||
// Return:
|
||||
// codec basic block size if successful, otherwise -1.
|
||||
static int BasicCodingBlock(int codec_id);
|
||||
|
||||
// Returns the NetEQ decoder database.
|
||||
static const NetEqDecoder* NetEQDecoders();
|
||||
|
||||
// Returns mirror id, which is a number that tells where to find the codec's
|
||||
// memory (instance). It is either the same as codec id (most common), or a
|
||||
// number pointing at a different entry in the database, if the codec have
|
||||
// several entries with different payload types. This is used for codecs that
|
||||
// must share struct even if the payload type differs.
|
||||
// TODO(tlegrand): Check if function is needed, or if we can change
|
||||
// to access database directly.
|
||||
// Input:
|
||||
// [codec_id] - number that specifies codec's position in the database.
|
||||
// Return:
|
||||
// Mirror id on success, otherwise -1.
|
||||
static int MirrorID(int codec_id);
|
||||
|
||||
// Create memory/instance for storing codec state.
|
||||
// Input:
|
||||
// [codec_inst] - information about codec. Only name of codec, "plname", is
|
||||
// used in this function.
|
||||
static ACMGenericCodec* CreateCodecInstance(const CodecInst& codec_inst);
|
||||
|
||||
// Specifies if the codec specified by |codec_id| MUST own its own decoder.
|
||||
// This is the case for codecs which *should* share a single codec instance
|
||||
// between encoder and decoder. Or for codecs which ACM should have control
|
||||
// over the decoder. For instance iSAC is such a codec that encoder and
|
||||
// decoder share the same codec instance.
|
||||
static bool OwnsDecoder(int codec_id);
|
||||
|
||||
// Checks if the bitrate is valid for the codec.
|
||||
// Input:
|
||||
// [codec_id] - number that specifies codec's position in the database.
|
||||
// [rate] - bitrate to check.
|
||||
// [frame_size_samples] - (used for iLBC) specifies which frame size to go
|
||||
// with the rate.
|
||||
static bool IsRateValid(int codec_id, int rate);
|
||||
static bool IsISACRateValid(int rate);
|
||||
static bool IsILBCRateValid(int rate, int frame_size_samples);
|
||||
static bool IsAMRRateValid(int rate);
|
||||
static bool IsAMRwbRateValid(int rate);
|
||||
static bool IsG7291RateValid(int rate);
|
||||
static bool IsSpeexRateValid(int rate);
|
||||
static bool IsOpusRateValid(int rate);
|
||||
static bool IsCeltRateValid(int rate);
|
||||
|
||||
// Check if the payload type is valid, meaning that it is in the valid range
|
||||
// of 0 to 127.
|
||||
// Input:
|
||||
// [payload_type] - payload type.
|
||||
static bool ValidPayloadType(int payload_type);
|
||||
|
||||
// Databases with information about the supported codecs
|
||||
// database_ - stored information about all codecs: payload type, name,
|
||||
// sampling frequency, packet size in samples, default channel
|
||||
// support, and default rate.
|
||||
// codec_settings_ - stored codec settings: number of allowed packet sizes,
|
||||
// a vector with the allowed packet sizes, basic block
|
||||
// samples, and max number of channels that are supported.
|
||||
// neteq_decoders_ - list of supported decoders in NetEQ.
|
||||
static const CodecInst database_[kMaxNumCodecs];
|
||||
static const CodecSettings codec_settings_[kMaxNumCodecs];
|
||||
static const NetEqDecoder neteq_decoders_[kMaxNumCodecs];
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_CODEC_DATABASE_H_
|
||||
100
jni/webrtc/modules/audio_coding/main/acm2/acm_common_defs.h
Normal file
100
jni/webrtc/modules/audio_coding/main/acm2/acm_common_defs.h
Normal file
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_COMMON_DEFS_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_COMMON_DEFS_H_
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "webrtc/common_types.h"
|
||||
#include "webrtc/engine_configurations.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
|
||||
// Checks for enabled codecs, we prevent enabling codecs which are not
|
||||
// compatible.
|
||||
#if ((defined WEBRTC_CODEC_ISAC) && (defined WEBRTC_CODEC_ISACFX))
|
||||
#error iSAC and iSACFX codecs cannot be enabled at the same time
|
||||
#endif
|
||||
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// 60 ms is the maximum block size we support. An extra 20 ms is considered
|
||||
// for safety if process() method is not called when it should be, i.e. we
|
||||
// accept 20 ms of jitter. 80 ms @ 48 kHz (full-band) stereo is 7680 samples.
|
||||
#define AUDIO_BUFFER_SIZE_W16 7680
|
||||
|
||||
// There is one timestamp per each 10 ms of audio
|
||||
// the audio buffer, at max, may contain 32 blocks of 10ms
|
||||
// audio if the sampling frequency is 8000 Hz (80 samples per block).
|
||||
// Therefore, The size of the buffer where we keep timestamps
|
||||
// is defined as follows
|
||||
#define TIMESTAMP_BUFFER_SIZE_W32 (AUDIO_BUFFER_SIZE_W16/80)
|
||||
|
||||
// The maximum size of a payload, that is 60 ms of PCM-16 @ 32 kHz stereo
|
||||
#define MAX_PAYLOAD_SIZE_BYTE 7680
|
||||
|
||||
// General codec specific defines
|
||||
const int kIsacWbDefaultRate = 32000;
|
||||
const int kIsacSwbDefaultRate = 56000;
|
||||
const int kIsacPacSize480 = 480;
|
||||
const int kIsacPacSize960 = 960;
|
||||
const int kIsacPacSize1440 = 1440;
|
||||
|
||||
// An encoded bit-stream is labeled by one of the following enumerators.
|
||||
//
|
||||
// kNoEncoding : There has been no encoding.
|
||||
// kActiveNormalEncoded : Active audio frame coded by the codec.
|
||||
// kPassiveNormalEncoded : Passive audio frame coded by the codec.
|
||||
// kPassiveDTXNB : Passive audio frame coded by narrow-band CN.
|
||||
// kPassiveDTXWB : Passive audio frame coded by wide-band CN.
|
||||
// kPassiveDTXSWB : Passive audio frame coded by super-wide-band CN.
|
||||
// kPassiveDTXFB : Passive audio frame coded by full-band CN.
|
||||
enum WebRtcACMEncodingType {
|
||||
kNoEncoding,
|
||||
kActiveNormalEncoded,
|
||||
kPassiveNormalEncoded,
|
||||
kPassiveDTXNB,
|
||||
kPassiveDTXWB,
|
||||
kPassiveDTXSWB,
|
||||
kPassiveDTXFB
|
||||
};
|
||||
|
||||
// A structure which contains codec parameters. For instance, used when
|
||||
// initializing encoder and decoder.
|
||||
//
|
||||
// codec_inst: c.f. common_types.h
|
||||
// enable_dtx: set true to enable DTX. If codec does not have
|
||||
// internal DTX, this will enable VAD.
|
||||
// enable_vad: set true to enable VAD.
|
||||
// vad_mode: VAD mode, c.f. audio_coding_module_typedefs.h
|
||||
// for possible values.
|
||||
struct WebRtcACMCodecParams {
|
||||
CodecInst codec_inst;
|
||||
bool enable_dtx;
|
||||
bool enable_vad;
|
||||
ACMVADMode vad_mode;
|
||||
};
|
||||
|
||||
// TODO(turajs): Remove when ACM1 is removed.
|
||||
struct WebRtcACMAudioBuff {
|
||||
int16_t in_audio[AUDIO_BUFFER_SIZE_W16];
|
||||
int16_t in_audio_ix_read;
|
||||
int16_t in_audio_ix_write;
|
||||
uint32_t in_timestamp[TIMESTAMP_BUFFER_SIZE_W32];
|
||||
int16_t in_timestamp_ix_write;
|
||||
uint32_t last_timestamp;
|
||||
uint32_t last_in_timestamp;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_COMMON_DEFS_H_
|
||||
@@ -0,0 +1,90 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_dtmf_playout.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_AVT
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_receiver.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_AVT
|
||||
|
||||
ACMDTMFPlayout::ACMDTMFPlayout(int16_t /* codec_id */) { return; }
|
||||
|
||||
ACMDTMFPlayout::~ACMDTMFPlayout() { return; }
|
||||
|
||||
int16_t ACMDTMFPlayout::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMDTMFPlayout::InternalInitEncoder(
|
||||
WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMDTMFPlayout::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMDTMFPlayout::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMDTMFPlayout::InternalDestructEncoderInst(void* /* ptr_inst */) {
|
||||
return;
|
||||
}
|
||||
|
||||
void ACMDTMFPlayout::DestructEncoderSafe() {
|
||||
return;
|
||||
}
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
|
||||
ACMDTMFPlayout::ACMDTMFPlayout(int16_t codec_id) { codec_id_ = codec_id; }
|
||||
|
||||
ACMDTMFPlayout::~ACMDTMFPlayout() { return; }
|
||||
|
||||
int16_t ACMDTMFPlayout::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int16_t ACMDTMFPlayout::InternalInitEncoder(
|
||||
WebRtcACMCodecParams* /* codec_params */) {
|
||||
// This codec does not need initialization,
|
||||
// DTMFPlayout has no instance
|
||||
return 0;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMDTMFPlayout::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMDTMFPlayout::InternalCreateEncoder() {
|
||||
// DTMFPlayout has no instance
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMDTMFPlayout::InternalDestructEncoderInst(void* /* ptr_inst */) {
|
||||
// DTMFPlayout has no instance
|
||||
return;
|
||||
}
|
||||
|
||||
void ACMDTMFPlayout::DestructEncoderSafe() {
|
||||
// DTMFPlayout has no instance
|
||||
return;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
44
jni/webrtc/modules/audio_coding/main/acm2/acm_dtmf_playout.h
Normal file
44
jni/webrtc/modules/audio_coding/main/acm2/acm_dtmf_playout.h
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_DTMF_PLAYOUT_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_DTMF_PLAYOUT_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMDTMFPlayout : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMDTMFPlayout(int16_t codec_id);
|
||||
~ACMDTMFPlayout();
|
||||
|
||||
// for FEC
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream, int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_DTMF_PLAYOUT_H_
|
||||
201
jni/webrtc/modules/audio_coding/main/acm2/acm_g722.cc
Normal file
201
jni/webrtc/modules/audio_coding/main/acm2/acm_g722.cc
Normal file
@@ -0,0 +1,201 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_g722.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_G722
|
||||
#include "webrtc/modules/audio_coding/codecs/g722/include/g722_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_G722
|
||||
|
||||
ACMG722::ACMG722(int16_t /* codec_id */)
|
||||
: ptr_enc_str_(NULL),
|
||||
encoder_inst_ptr_(NULL),
|
||||
encoder_inst_ptr_right_(NULL) {}
|
||||
|
||||
ACMG722::~ACMG722() {}
|
||||
|
||||
int32_t ACMG722::Add10MsDataSafe(const uint32_t /* timestamp */,
|
||||
const int16_t* /* data */,
|
||||
const uint16_t /* length_smpl */,
|
||||
const uint8_t /* audio_channel */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMG722::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMG722::InternalInitEncoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMG722::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMG722::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMG722::DestructEncoderSafe() { return; }
|
||||
|
||||
void ACMG722::InternalDestructEncoderInst(void* /* ptr_inst */) { return; }
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
|
||||
// Encoder and decoder memory
|
||||
struct ACMG722EncStr {
|
||||
G722EncInst* inst; // instance for left channel in case of stereo
|
||||
G722EncInst* inst_right; // instance for right channel in case of stereo
|
||||
};
|
||||
struct ACMG722DecStr {
|
||||
G722DecInst* inst; // instance for left channel in case of stereo
|
||||
G722DecInst* inst_right; // instance for right channel in case of stereo
|
||||
};
|
||||
|
||||
ACMG722::ACMG722(int16_t codec_id)
|
||||
: encoder_inst_ptr_(NULL), encoder_inst_ptr_right_(NULL) {
|
||||
ptr_enc_str_ = new ACMG722EncStr;
|
||||
if (ptr_enc_str_ != NULL) {
|
||||
ptr_enc_str_->inst = NULL;
|
||||
ptr_enc_str_->inst_right = NULL;
|
||||
}
|
||||
codec_id_ = codec_id;
|
||||
return;
|
||||
}
|
||||
|
||||
ACMG722::~ACMG722() {
|
||||
// Encoder
|
||||
if (ptr_enc_str_ != NULL) {
|
||||
if (ptr_enc_str_->inst != NULL) {
|
||||
WebRtcG722_FreeEncoder(ptr_enc_str_->inst);
|
||||
ptr_enc_str_->inst = NULL;
|
||||
}
|
||||
if (ptr_enc_str_->inst_right != NULL) {
|
||||
WebRtcG722_FreeEncoder(ptr_enc_str_->inst_right);
|
||||
ptr_enc_str_->inst_right = NULL;
|
||||
}
|
||||
delete ptr_enc_str_;
|
||||
ptr_enc_str_ = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t ACMG722::Add10MsDataSafe(const uint32_t timestamp,
|
||||
const int16_t* data,
|
||||
const uint16_t length_smpl,
|
||||
const uint8_t audio_channel) {
|
||||
return ACMGenericCodec::Add10MsDataSafe(
|
||||
(timestamp >> 1), data, length_smpl, audio_channel);
|
||||
}
|
||||
|
||||
int16_t ACMG722::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
// If stereo, split input signal in left and right channel before encoding
|
||||
if (num_channels_ == 2) {
|
||||
int16_t left_channel[960];
|
||||
int16_t right_channel[960];
|
||||
uint8_t out_left[480];
|
||||
uint8_t out_right[480];
|
||||
int16_t len_in_bytes;
|
||||
for (int i = 0, j = 0; i < frame_len_smpl_ * 2; i += 2, j++) {
|
||||
left_channel[j] = in_audio_[in_audio_ix_read_ + i];
|
||||
right_channel[j] = in_audio_[in_audio_ix_read_ + i + 1];
|
||||
}
|
||||
len_in_bytes = WebRtcG722_Encode(
|
||||
encoder_inst_ptr_, left_channel, frame_len_smpl_,
|
||||
reinterpret_cast<int16_t*>(out_left));
|
||||
len_in_bytes += WebRtcG722_Encode(encoder_inst_ptr_right_,
|
||||
right_channel,
|
||||
frame_len_smpl_,
|
||||
reinterpret_cast<int16_t*>(out_right));
|
||||
*bitstream_len_byte = len_in_bytes;
|
||||
|
||||
// Interleave the 4 bits per sample from left and right channel
|
||||
for (int i = 0, j = 0; i < len_in_bytes; i += 2, j++) {
|
||||
bitstream[i] = (out_left[j] & 0xF0) + (out_right[j] >> 4);
|
||||
bitstream[i + 1] = ((out_left[j] & 0x0F) << 4) + (out_right[j] & 0x0F);
|
||||
}
|
||||
} else {
|
||||
*bitstream_len_byte = WebRtcG722_Encode(
|
||||
encoder_inst_ptr_, &in_audio_[in_audio_ix_read_], frame_len_smpl_,
|
||||
reinterpret_cast<int16_t*>(bitstream));
|
||||
}
|
||||
|
||||
// increment the read index this tell the caller how far
|
||||
// we have gone forward in reading the audio buffer
|
||||
in_audio_ix_read_ += frame_len_smpl_ * num_channels_;
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMG722::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
if (codec_params->codec_inst.channels == 2) {
|
||||
// Create codec struct for right channel
|
||||
if (ptr_enc_str_->inst_right == NULL) {
|
||||
WebRtcG722_CreateEncoder(&ptr_enc_str_->inst_right);
|
||||
if (ptr_enc_str_->inst_right == NULL) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
encoder_inst_ptr_right_ = ptr_enc_str_->inst_right;
|
||||
if (WebRtcG722_EncoderInit(encoder_inst_ptr_right_) < 0) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return WebRtcG722_EncoderInit(encoder_inst_ptr_);
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMG722::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMG722::InternalCreateEncoder() {
|
||||
if (ptr_enc_str_ == NULL) {
|
||||
// this structure must be created at the costructor
|
||||
// if it is still NULL then there is a probelm and
|
||||
// we dont continue
|
||||
return -1;
|
||||
}
|
||||
WebRtcG722_CreateEncoder(&ptr_enc_str_->inst);
|
||||
if (ptr_enc_str_->inst == NULL) {
|
||||
return -1;
|
||||
}
|
||||
encoder_inst_ptr_ = ptr_enc_str_->inst;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMG722::DestructEncoderSafe() {
|
||||
if (ptr_enc_str_ != NULL) {
|
||||
if (ptr_enc_str_->inst != NULL) {
|
||||
WebRtcG722_FreeEncoder(ptr_enc_str_->inst);
|
||||
ptr_enc_str_->inst = NULL;
|
||||
}
|
||||
}
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
}
|
||||
|
||||
void ACMG722::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
WebRtcG722_FreeEncoder(static_cast<G722EncInst*>(ptr_inst));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
66
jni/webrtc/modules/audio_coding/main/acm2/acm_g722.h
Normal file
66
jni/webrtc/modules/audio_coding/main/acm2/acm_g722.h
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G722_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G722_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
#include "webrtc/system_wrappers/interface/thread_annotations.h"
|
||||
|
||||
typedef struct WebRtcG722EncInst G722EncInst;
|
||||
typedef struct WebRtcG722DecInst G722DecInst;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
// Forward declaration.
|
||||
struct ACMG722EncStr;
|
||||
struct ACMG722DecStr;
|
||||
|
||||
class ACMG722 : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMG722(int16_t codec_id);
|
||||
~ACMG722();
|
||||
|
||||
// For FEC.
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
int32_t Add10MsDataSafe(const uint32_t timestamp,
|
||||
const int16_t* data,
|
||||
const uint16_t length_smpl,
|
||||
const uint8_t audio_channel)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
void DestructEncoderSafe() OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
ACMG722EncStr* ptr_enc_str_;
|
||||
|
||||
G722EncInst* encoder_inst_ptr_;
|
||||
G722EncInst* encoder_inst_ptr_right_; // Prepared for stereo
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G722_H_
|
||||
330
jni/webrtc/modules/audio_coding/main/acm2/acm_g7221.cc
Normal file
330
jni/webrtc/modules/audio_coding/main/acm2/acm_g7221.cc
Normal file
@@ -0,0 +1,330 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_g7221.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_G722_1
|
||||
// NOTE! G.722.1 is not included in the open-source package. The following
|
||||
// interface file is needed:
|
||||
#include "webrtc/modules/audio_coding/main/codecs/g7221/interface/g7221_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
// The API in the header file should match the one below.
|
||||
//
|
||||
// int16_t WebRtcG7221_CreateEnc16(G722_1_16_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221_CreateEnc24(G722_1_24_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221_CreateEnc32(G722_1_32_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221_CreateDec16(G722_1_16_decinst_t_** dec_inst);
|
||||
// int16_t WebRtcG7221_CreateDec24(G722_1_24_decinst_t_** dec_inst);
|
||||
// int16_t WebRtcG7221_CreateDec32(G722_1_32_decinst_t_** dec_inst);
|
||||
//
|
||||
// int16_t WebRtcG7221_FreeEnc16(G722_1_16_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221_FreeEnc24(G722_1_24_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221_FreeEnc32(G722_1_32_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221_FreeDec16(G722_1_16_decinst_t_** dec_inst);
|
||||
// int16_t WebRtcG7221_FreeDec24(G722_1_24_decinst_t_** dec_inst);
|
||||
// int16_t WebRtcG7221_FreeDec32(G722_1_32_decinst_t_** dec_inst);
|
||||
//
|
||||
// int16_t WebRtcG7221_EncoderInit16(G722_1_16_encinst_t_* enc_inst);
|
||||
// int16_t WebRtcG7221_EncoderInit24(G722_1_24_encinst_t_* enc_inst);
|
||||
// int16_t WebRtcG7221_EncoderInit32(G722_1_32_encinst_t_* enc_inst);
|
||||
// int16_t WebRtcG7221_DecoderInit16(G722_1_16_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcG7221_DecoderInit24(G722_1_24_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcG7221_DecoderInit32(G722_1_32_decinst_t_* dec_inst);
|
||||
//
|
||||
// int16_t WebRtcG7221_Encode16(G722_1_16_encinst_t_* enc_inst,
|
||||
// int16_t* input,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
// int16_t WebRtcG7221_Encode24(G722_1_24_encinst_t_* enc_inst,
|
||||
// int16_t* input,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
// int16_t WebRtcG7221_Encode32(G722_1_32_encinst_t_* enc_inst,
|
||||
// int16_t* input,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
//
|
||||
// int16_t WebRtcG7221_Decode16(G722_1_16_decinst_t_* dec_inst,
|
||||
// int16_t* bitstream,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
// int16_t WebRtcG7221_Decode24(G722_1_24_decinst_t_* dec_inst,
|
||||
// int16_t* bitstream,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
// int16_t WebRtcG7221_Decode32(G722_1_32_decinst_t_* dec_inst,
|
||||
// int16_t* bitstream,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
//
|
||||
// int16_t WebRtcG7221_DecodePlc16(G722_1_16_decinst_t_* dec_inst,
|
||||
// int16_t* output,
|
||||
// int16_t nr_lost_frames);
|
||||
// int16_t WebRtcG7221_DecodePlc24(G722_1_24_decinst_t_* dec_inst,
|
||||
// int16_t* output,
|
||||
// int16_t nr_lost_frames);
|
||||
// int16_t WebRtcG7221_DecodePlc32(G722_1_32_decinst_t_* dec_inst,
|
||||
// int16_t* output,
|
||||
// int16_t nr_lost_frames);
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_G722_1
|
||||
|
||||
ACMG722_1::ACMG722_1(int16_t /* codec_id */)
|
||||
: operational_rate_(-1),
|
||||
encoder_inst_ptr_(NULL),
|
||||
encoder_inst_ptr_right_(NULL),
|
||||
encoder_inst16_ptr_(NULL),
|
||||
encoder_inst16_ptr_right_(NULL),
|
||||
encoder_inst24_ptr_(NULL),
|
||||
encoder_inst24_ptr_right_(NULL),
|
||||
encoder_inst32_ptr_(NULL),
|
||||
encoder_inst32_ptr_right_(NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
ACMG722_1::~ACMG722_1() { return; }
|
||||
|
||||
int16_t ACMG722_1::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMG722_1::InternalInitEncoder(
|
||||
WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMG722_1::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMG722_1::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMG722_1::DestructEncoderSafe() { return; }
|
||||
|
||||
void ACMG722_1::InternalDestructEncoderInst(void* /* ptr_inst */) { return; }
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
ACMG722_1::ACMG722_1(int16_t codec_id)
|
||||
: encoder_inst_ptr_(NULL),
|
||||
encoder_inst_ptr_right_(NULL),
|
||||
encoder_inst16_ptr_(NULL),
|
||||
encoder_inst16_ptr_right_(NULL),
|
||||
encoder_inst24_ptr_(NULL),
|
||||
encoder_inst24_ptr_right_(NULL),
|
||||
encoder_inst32_ptr_(NULL),
|
||||
encoder_inst32_ptr_right_(NULL) {
|
||||
codec_id_ = codec_id;
|
||||
if (codec_id_ == ACMCodecDB::kG722_1_16) {
|
||||
operational_rate_ = 16000;
|
||||
} else if (codec_id_ == ACMCodecDB::kG722_1_24) {
|
||||
operational_rate_ = 24000;
|
||||
} else if (codec_id_ == ACMCodecDB::kG722_1_32) {
|
||||
operational_rate_ = 32000;
|
||||
} else {
|
||||
operational_rate_ = -1;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ACMG722_1::~ACMG722_1() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
delete encoder_inst_ptr_;
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
if (encoder_inst_ptr_right_ != NULL) {
|
||||
delete encoder_inst_ptr_right_;
|
||||
encoder_inst_ptr_right_ = NULL;
|
||||
}
|
||||
|
||||
switch (operational_rate_) {
|
||||
case 16000: {
|
||||
encoder_inst16_ptr_ = NULL;
|
||||
encoder_inst16_ptr_right_ = NULL;
|
||||
break;
|
||||
}
|
||||
case 24000: {
|
||||
encoder_inst24_ptr_ = NULL;
|
||||
encoder_inst24_ptr_right_ = NULL;
|
||||
break;
|
||||
}
|
||||
case 32000: {
|
||||
encoder_inst32_ptr_ = NULL;
|
||||
encoder_inst32_ptr_right_ = NULL;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMG722_1::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
int16_t left_channel[320];
|
||||
int16_t right_channel[320];
|
||||
int16_t len_in_bytes;
|
||||
int16_t out_bits[160];
|
||||
|
||||
// If stereo, split input signal in left and right channel before encoding
|
||||
if (num_channels_ == 2) {
|
||||
for (int i = 0, j = 0; i < frame_len_smpl_ * 2; i += 2, j++) {
|
||||
left_channel[j] = in_audio_[in_audio_ix_read_ + i];
|
||||
right_channel[j] = in_audio_[in_audio_ix_read_ + i + 1];
|
||||
}
|
||||
} else {
|
||||
memcpy(left_channel, &in_audio_[in_audio_ix_read_], 320);
|
||||
}
|
||||
|
||||
switch (operational_rate_) {
|
||||
case 16000: {
|
||||
len_in_bytes = WebRtcG7221_Encode16(encoder_inst16_ptr_, left_channel,
|
||||
320, &out_bits[0]);
|
||||
if (num_channels_ == 2) {
|
||||
len_in_bytes += WebRtcG7221_Encode16(encoder_inst16_ptr_right_,
|
||||
right_channel, 320,
|
||||
&out_bits[len_in_bytes / 2]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 24000: {
|
||||
len_in_bytes = WebRtcG7221_Encode24(encoder_inst24_ptr_, left_channel,
|
||||
320, &out_bits[0]);
|
||||
if (num_channels_ == 2) {
|
||||
len_in_bytes += WebRtcG7221_Encode24(encoder_inst24_ptr_right_,
|
||||
right_channel, 320,
|
||||
&out_bits[len_in_bytes / 2]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 32000: {
|
||||
len_in_bytes = WebRtcG7221_Encode32(encoder_inst32_ptr_, left_channel,
|
||||
320, &out_bits[0]);
|
||||
if (num_channels_ == 2) {
|
||||
len_in_bytes += WebRtcG7221_Encode32(encoder_inst32_ptr_right_,
|
||||
right_channel, 320,
|
||||
&out_bits[len_in_bytes / 2]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"InternalInitEncode: Wrong rate for G722_1.");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
memcpy(bitstream, out_bits, len_in_bytes);
|
||||
*bitstream_len_byte = len_in_bytes;
|
||||
|
||||
// increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer
|
||||
in_audio_ix_read_ += 320 * num_channels_;
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMG722_1::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
int16_t ret;
|
||||
|
||||
switch (operational_rate_) {
|
||||
case 16000: {
|
||||
ret = WebRtcG7221_EncoderInit16(encoder_inst16_ptr_right_);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
return WebRtcG7221_EncoderInit16(encoder_inst16_ptr_);
|
||||
}
|
||||
case 24000: {
|
||||
ret = WebRtcG7221_EncoderInit24(encoder_inst24_ptr_right_);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
return WebRtcG7221_EncoderInit24(encoder_inst24_ptr_);
|
||||
}
|
||||
case 32000: {
|
||||
ret = WebRtcG7221_EncoderInit32(encoder_inst32_ptr_right_);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
return WebRtcG7221_EncoderInit32(encoder_inst32_ptr_);
|
||||
}
|
||||
default: {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding,
|
||||
unique_id_, "InternalInitEncoder: Wrong rate for G722_1.");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMG722_1::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMG722_1::InternalCreateEncoder() {
|
||||
if ((encoder_inst_ptr_ == NULL) || (encoder_inst_ptr_right_ == NULL)) {
|
||||
return -1;
|
||||
}
|
||||
switch (operational_rate_) {
|
||||
case 16000: {
|
||||
WebRtcG7221_CreateEnc16(&encoder_inst16_ptr_);
|
||||
WebRtcG7221_CreateEnc16(&encoder_inst16_ptr_right_);
|
||||
break;
|
||||
}
|
||||
case 24000: {
|
||||
WebRtcG7221_CreateEnc24(&encoder_inst24_ptr_);
|
||||
WebRtcG7221_CreateEnc24(&encoder_inst24_ptr_right_);
|
||||
break;
|
||||
}
|
||||
case 32000: {
|
||||
WebRtcG7221_CreateEnc32(&encoder_inst32_ptr_);
|
||||
WebRtcG7221_CreateEnc32(&encoder_inst32_ptr_right_);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"InternalCreateEncoder: Wrong rate for G722_1.");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMG722_1::DestructEncoderSafe() {
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
delete encoder_inst_ptr_;
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
if (encoder_inst_ptr_right_ != NULL) {
|
||||
delete encoder_inst_ptr_right_;
|
||||
encoder_inst_ptr_right_ = NULL;
|
||||
}
|
||||
encoder_inst16_ptr_ = NULL;
|
||||
encoder_inst24_ptr_ = NULL;
|
||||
encoder_inst32_ptr_ = NULL;
|
||||
}
|
||||
|
||||
void ACMG722_1::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
delete ptr_inst;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
66
jni/webrtc/modules/audio_coding/main/acm2/acm_g7221.h
Normal file
66
jni/webrtc/modules/audio_coding/main/acm2/acm_g7221.h
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G7221_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G7221_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
// forward declaration
|
||||
struct G722_1_16_encinst_t_;
|
||||
struct G722_1_16_decinst_t_;
|
||||
struct G722_1_24_encinst_t_;
|
||||
struct G722_1_24_decinst_t_;
|
||||
struct G722_1_32_encinst_t_;
|
||||
struct G722_1_32_decinst_t_;
|
||||
struct G722_1_Inst_t_;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMG722_1 : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMG722_1(int16_t codec_id);
|
||||
~ACMG722_1();
|
||||
|
||||
// for FEC
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream, int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int32_t operational_rate_;
|
||||
|
||||
G722_1_Inst_t_* encoder_inst_ptr_;
|
||||
G722_1_Inst_t_* encoder_inst_ptr_right_; // Used in stereo mode
|
||||
|
||||
// Only one set of these pointer is valid at any instance
|
||||
G722_1_16_encinst_t_* encoder_inst16_ptr_;
|
||||
G722_1_16_encinst_t_* encoder_inst16_ptr_right_;
|
||||
G722_1_24_encinst_t_* encoder_inst24_ptr_;
|
||||
G722_1_24_encinst_t_* encoder_inst24_ptr_right_;
|
||||
G722_1_32_encinst_t_* encoder_inst32_ptr_;
|
||||
G722_1_32_encinst_t_* encoder_inst32_ptr_right_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G7221_H_
|
||||
336
jni/webrtc/modules/audio_coding/main/acm2/acm_g7221c.cc
Normal file
336
jni/webrtc/modules/audio_coding/main/acm2/acm_g7221c.cc
Normal file
@@ -0,0 +1,336 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_g7221c.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_G722_1C
|
||||
// NOTE! G.722.1C is not included in the open-source package. The following
|
||||
// interface file is needed:
|
||||
#include "webrtc/modules/audio_coding/main/codecs/g7221c/interface/g7221c_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
// The API in the header file should match the one below.
|
||||
//
|
||||
// int16_t WebRtcG7221C_CreateEnc24(G722_1C_24_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221C_CreateEnc32(G722_1C_32_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221C_CreateEnc48(G722_1C_48_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221C_CreateDec24(G722_1C_24_decinst_t_** dec_inst);
|
||||
// int16_t WebRtcG7221C_CreateDec32(G722_1C_32_decinst_t_** dec_inst);
|
||||
// int16_t WebRtcG7221C_CreateDec48(G722_1C_48_decinst_t_** dec_inst);
|
||||
//
|
||||
// int16_t WebRtcG7221C_FreeEnc24(G722_1C_24_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221C_FreeEnc32(G722_1C_32_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221C_FreeEnc48(G722_1C_48_encinst_t_** enc_inst);
|
||||
// int16_t WebRtcG7221C_FreeDec24(G722_1C_24_decinst_t_** dec_inst);
|
||||
// int16_t WebRtcG7221C_FreeDec32(G722_1C_32_decinst_t_** dec_inst);
|
||||
// int16_t WebRtcG7221C_FreeDec48(G722_1C_48_decinst_t_** dec_inst);
|
||||
//
|
||||
// int16_t WebRtcG7221C_EncoderInit24(G722_1C_24_encinst_t_* enc_inst);
|
||||
// int16_t WebRtcG7221C_EncoderInit32(G722_1C_32_encinst_t_* enc_inst);
|
||||
// int16_t WebRtcG7221C_EncoderInit48(G722_1C_48_encinst_t_* enc_inst);
|
||||
// int16_t WebRtcG7221C_DecoderInit24(G722_1C_24_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcG7221C_DecoderInit32(G722_1C_32_decinst_t_* dec_inst);
|
||||
// int16_t WebRtcG7221C_DecoderInit48(G722_1C_48_decinst_t_* dec_inst);
|
||||
//
|
||||
// int16_t WebRtcG7221C_Encode24(G722_1C_24_encinst_t_* enc_inst,
|
||||
// int16_t* input,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
// int16_t WebRtcG7221C_Encode32(G722_1C_32_encinst_t_* enc_inst,
|
||||
// int16_t* input,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
// int16_t WebRtcG7221C_Encode48(G722_1C_48_encinst_t_* enc_inst,
|
||||
// int16_t* input,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
//
|
||||
// int16_t WebRtcG7221C_Decode24(G722_1C_24_decinst_t_* dec_inst,
|
||||
// int16_t* bitstream,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
// int16_t WebRtcG7221C_Decode32(G722_1C_32_decinst_t_* dec_inst,
|
||||
// int16_t* bitstream,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
// int16_t WebRtcG7221C_Decode48(G722_1C_48_decinst_t_* dec_inst,
|
||||
// int16_t* bitstream,
|
||||
// int16_t len,
|
||||
// int16_t* output);
|
||||
//
|
||||
// int16_t WebRtcG7221C_DecodePlc24(G722_1C_24_decinst_t_* dec_inst,
|
||||
// int16_t* output,
|
||||
// int16_t nr_lost_frames);
|
||||
// int16_t WebRtcG7221C_DecodePlc32(G722_1C_32_decinst_t_* dec_inst,
|
||||
// int16_t* output,
|
||||
// int16_t nr_lost_frames);
|
||||
// int16_t WebRtcG7221C_DecodePlc48(G722_1C_48_decinst_t_* dec_inst,
|
||||
// int16_t* output,
|
||||
// int16_t nr_lost_frames);
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_G722_1C
|
||||
|
||||
ACMG722_1C::ACMG722_1C(int16_t /* codec_id */)
|
||||
: operational_rate_(-1),
|
||||
encoder_inst_ptr_(NULL),
|
||||
encoder_inst_ptr_right_(NULL),
|
||||
encoder_inst24_ptr_(NULL),
|
||||
encoder_inst24_ptr_right_(NULL),
|
||||
encoder_inst32_ptr_(NULL),
|
||||
encoder_inst32_ptr_right_(NULL),
|
||||
encoder_inst48_ptr_(NULL),
|
||||
encoder_inst48_ptr_right_(NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
ACMG722_1C::~ACMG722_1C() { return; }
|
||||
|
||||
int16_t ACMG722_1C::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMG722_1C::InternalInitEncoder(
|
||||
WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMG722_1C::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMG722_1C::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMG722_1C::DestructEncoderSafe() { return; }
|
||||
|
||||
void ACMG722_1C::InternalDestructEncoderInst(void* /* ptr_inst */) { return; }
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
ACMG722_1C::ACMG722_1C(int16_t codec_id)
|
||||
: encoder_inst_ptr_(NULL),
|
||||
encoder_inst_ptr_right_(NULL),
|
||||
encoder_inst24_ptr_(NULL),
|
||||
encoder_inst24_ptr_right_(NULL),
|
||||
encoder_inst32_ptr_(NULL),
|
||||
encoder_inst32_ptr_right_(NULL),
|
||||
encoder_inst48_ptr_(NULL),
|
||||
encoder_inst48_ptr_right_(NULL) {
|
||||
codec_id_ = codec_id;
|
||||
if (codec_id_ == ACMCodecDB::kG722_1C_24) {
|
||||
operational_rate_ = 24000;
|
||||
} else if (codec_id_ == ACMCodecDB::kG722_1C_32) {
|
||||
operational_rate_ = 32000;
|
||||
} else if (codec_id_ == ACMCodecDB::kG722_1C_48) {
|
||||
operational_rate_ = 48000;
|
||||
} else {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Wrong codec id for G722_1c.");
|
||||
operational_rate_ = -1;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ACMG722_1C::~ACMG722_1C() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
delete encoder_inst_ptr_;
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
if (encoder_inst_ptr_right_ != NULL) {
|
||||
delete encoder_inst_ptr_right_;
|
||||
encoder_inst_ptr_right_ = NULL;
|
||||
}
|
||||
|
||||
switch (operational_rate_) {
|
||||
case 24000: {
|
||||
encoder_inst24_ptr_ = NULL;
|
||||
encoder_inst24_ptr_right_ = NULL;
|
||||
break;
|
||||
}
|
||||
case 32000: {
|
||||
encoder_inst32_ptr_ = NULL;
|
||||
encoder_inst32_ptr_right_ = NULL;
|
||||
break;
|
||||
}
|
||||
case 48000: {
|
||||
encoder_inst48_ptr_ = NULL;
|
||||
encoder_inst48_ptr_right_ = NULL;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Wrong rate for G722_1c.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMG722_1C::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
int16_t left_channel[640];
|
||||
int16_t right_channel[640];
|
||||
int16_t len_in_bytes;
|
||||
int16_t out_bits[240];
|
||||
|
||||
// If stereo, split input signal in left and right channel before encoding
|
||||
if (num_channels_ == 2) {
|
||||
for (int i = 0, j = 0; i < frame_len_smpl_ * 2; i += 2, j++) {
|
||||
left_channel[j] = in_audio_[in_audio_ix_read_ + i];
|
||||
right_channel[j] = in_audio_[in_audio_ix_read_ + i + 1];
|
||||
}
|
||||
} else {
|
||||
memcpy(left_channel, &in_audio_[in_audio_ix_read_], 640);
|
||||
}
|
||||
|
||||
switch (operational_rate_) {
|
||||
case 24000: {
|
||||
len_in_bytes = WebRtcG7221C_Encode24(encoder_inst24_ptr_, left_channel,
|
||||
640, &out_bits[0]);
|
||||
if (num_channels_ == 2) {
|
||||
len_in_bytes += WebRtcG7221C_Encode24(encoder_inst24_ptr_right_,
|
||||
right_channel, 640,
|
||||
&out_bits[len_in_bytes / 2]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 32000: {
|
||||
len_in_bytes = WebRtcG7221C_Encode32(encoder_inst32_ptr_, left_channel,
|
||||
640, &out_bits[0]);
|
||||
if (num_channels_ == 2) {
|
||||
len_in_bytes += WebRtcG7221C_Encode32(encoder_inst32_ptr_right_,
|
||||
right_channel, 640,
|
||||
&out_bits[len_in_bytes / 2]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 48000: {
|
||||
len_in_bytes = WebRtcG7221C_Encode48(encoder_inst48_ptr_, left_channel,
|
||||
640, &out_bits[0]);
|
||||
if (num_channels_ == 2) {
|
||||
len_in_bytes += WebRtcG7221C_Encode48(encoder_inst48_ptr_right_,
|
||||
right_channel, 640,
|
||||
&out_bits[len_in_bytes / 2]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"InternalEncode: Wrong rate for G722_1c.");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(bitstream, out_bits, len_in_bytes);
|
||||
*bitstream_len_byte = len_in_bytes;
|
||||
|
||||
// increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer
|
||||
in_audio_ix_read_ += 640 * num_channels_;
|
||||
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMG722_1C::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
int16_t ret;
|
||||
|
||||
switch (operational_rate_) {
|
||||
case 24000: {
|
||||
ret = WebRtcG7221C_EncoderInit24(encoder_inst24_ptr_right_);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
return WebRtcG7221C_EncoderInit24(encoder_inst24_ptr_);
|
||||
}
|
||||
case 32000: {
|
||||
ret = WebRtcG7221C_EncoderInit32(encoder_inst32_ptr_right_);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
return WebRtcG7221C_EncoderInit32(encoder_inst32_ptr_);
|
||||
}
|
||||
case 48000: {
|
||||
ret = WebRtcG7221C_EncoderInit48(encoder_inst48_ptr_right_);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
return WebRtcG7221C_EncoderInit48(encoder_inst48_ptr_);
|
||||
}
|
||||
default: {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"InternalInitEncode: Wrong rate for G722_1c.");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMG722_1C::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMG722_1C::InternalCreateEncoder() {
|
||||
if ((encoder_inst_ptr_ == NULL) || (encoder_inst_ptr_right_ == NULL)) {
|
||||
return -1;
|
||||
}
|
||||
switch (operational_rate_) {
|
||||
case 24000: {
|
||||
WebRtcG7221C_CreateEnc24(&encoder_inst24_ptr_);
|
||||
WebRtcG7221C_CreateEnc24(&encoder_inst24_ptr_right_);
|
||||
break;
|
||||
}
|
||||
case 32000: {
|
||||
WebRtcG7221C_CreateEnc32(&encoder_inst32_ptr_);
|
||||
WebRtcG7221C_CreateEnc32(&encoder_inst32_ptr_right_);
|
||||
break;
|
||||
}
|
||||
case 48000: {
|
||||
WebRtcG7221C_CreateEnc48(&encoder_inst48_ptr_);
|
||||
WebRtcG7221C_CreateEnc48(&encoder_inst48_ptr_right_);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"InternalCreateEncoder: Wrong rate for G722_1c.");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMG722_1C::DestructEncoderSafe() {
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
delete encoder_inst_ptr_;
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
if (encoder_inst_ptr_right_ != NULL) {
|
||||
delete encoder_inst_ptr_right_;
|
||||
encoder_inst_ptr_right_ = NULL;
|
||||
}
|
||||
encoder_inst24_ptr_ = NULL;
|
||||
encoder_inst32_ptr_ = NULL;
|
||||
encoder_inst48_ptr_ = NULL;
|
||||
}
|
||||
|
||||
void ACMG722_1C::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
delete ptr_inst;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
66
jni/webrtc/modules/audio_coding/main/acm2/acm_g7221c.h
Normal file
66
jni/webrtc/modules/audio_coding/main/acm2/acm_g7221c.h
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G7221C_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G7221C_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
// forward declaration
|
||||
struct G722_1C_24_encinst_t_;
|
||||
struct G722_1C_24_decinst_t_;
|
||||
struct G722_1C_32_encinst_t_;
|
||||
struct G722_1C_32_decinst_t_;
|
||||
struct G722_1C_48_encinst_t_;
|
||||
struct G722_1C_48_decinst_t_;
|
||||
struct G722_1_Inst_t_;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMG722_1C : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMG722_1C(int16_t codec_id);
|
||||
~ACMG722_1C();
|
||||
|
||||
// for FEC
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream, int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int32_t operational_rate_;
|
||||
|
||||
G722_1_Inst_t_* encoder_inst_ptr_;
|
||||
G722_1_Inst_t_* encoder_inst_ptr_right_; // Used in stereo mode
|
||||
|
||||
// Only one set of these pointer is valid at any instance
|
||||
G722_1C_24_encinst_t_* encoder_inst24_ptr_;
|
||||
G722_1C_24_encinst_t_* encoder_inst24_ptr_right_;
|
||||
G722_1C_32_encinst_t_* encoder_inst32_ptr_;
|
||||
G722_1C_32_encinst_t_* encoder_inst32_ptr_right_;
|
||||
G722_1C_48_encinst_t_* encoder_inst48_ptr_;
|
||||
G722_1C_48_encinst_t_* encoder_inst48_ptr_right_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G7221C_H_
|
||||
259
jni/webrtc/modules/audio_coding/main/acm2/acm_g729.cc
Normal file
259
jni/webrtc/modules/audio_coding/main/acm2/acm_g729.cc
Normal file
@@ -0,0 +1,259 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_g729.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_G729
|
||||
// NOTE! G.729 is not included in the open-source package. Modify this file
|
||||
// or your codec API to match the function calls and names of used G.729 API
|
||||
// file.
|
||||
#include "webrtc/modules/audio_coding/main/codecs/g729/interface/g729_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_receiver.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_G729
|
||||
|
||||
ACMG729::ACMG729(int16_t /* codec_id */) : encoder_inst_ptr_(NULL) {}
|
||||
|
||||
ACMG729::~ACMG729() { return; }
|
||||
|
||||
int16_t ACMG729::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMG729::EnableDTX() { return -1; }
|
||||
|
||||
int16_t ACMG729::DisableDTX() { return -1; }
|
||||
|
||||
int32_t ACMG729::ReplaceInternalDTXSafe(const bool /*replace_internal_dtx */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t ACMG729::IsInternalDTXReplacedSafe(bool* /* internal_dtx_replaced */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMG729::InternalInitEncoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMG729::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMG729::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMG729::DestructEncoderSafe() { return; }
|
||||
|
||||
void ACMG729::InternalDestructEncoderInst(void* /* ptr_inst */) { return; }
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
ACMG729::ACMG729(int16_t codec_id)
|
||||
: codec_id_(codec_id),
|
||||
has_internal_dtx_(),
|
||||
encoder_inst_ptr_(NULL) {}
|
||||
|
||||
ACMG729::~ACMG729() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
// Delete encoder memory
|
||||
WebRtcG729_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMG729::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
// Initialize before entering the loop
|
||||
int16_t num_encoded_samples = 0;
|
||||
int16_t tmp_len_byte = 0;
|
||||
int16_t vad_decision = 0;
|
||||
*bitstream_len_byte = 0;
|
||||
while (num_encoded_samples < frame_len_smpl_) {
|
||||
// Call G.729 encoder with pointer to encoder memory, input
|
||||
// audio, number of samples and bitsream
|
||||
tmp_len_byte = WebRtcG729_Encode(
|
||||
encoder_inst_ptr_, &in_audio_[in_audio_ix_read_], 80,
|
||||
reinterpret_cast<int16_t*>(&(bitstream[*bitstream_len_byte])));
|
||||
|
||||
// increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer
|
||||
in_audio_ix_read_ += 80;
|
||||
|
||||
// sanity check
|
||||
if (tmp_len_byte < 0) {
|
||||
// error has happened
|
||||
*bitstream_len_byte = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// increment number of written bytes
|
||||
*bitstream_len_byte += tmp_len_byte;
|
||||
switch (tmp_len_byte) {
|
||||
case 0: {
|
||||
if (0 == num_encoded_samples) {
|
||||
// this is the first 10 ms in this packet and there is
|
||||
// no data generated, perhaps DTX is enabled and the
|
||||
// codec is not generating any bit-stream for this 10 ms.
|
||||
// we do not continue encoding this frame.
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
// check if G.729 internal DTX is enabled
|
||||
if (has_internal_dtx_ && dtx_enabled_) {
|
||||
vad_decision = 0;
|
||||
for (int16_t n = 0; n < MAX_FRAME_SIZE_10MSEC; n++) {
|
||||
vad_label_[n] = vad_decision;
|
||||
}
|
||||
}
|
||||
// we got a SID and have to send out this packet no matter
|
||||
// how much audio we have encoded
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
case 10: {
|
||||
vad_decision = 1;
|
||||
// this is a valid length just continue encoding
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// update number of encoded samples
|
||||
num_encoded_samples += 80;
|
||||
}
|
||||
|
||||
// update VAD decision vector
|
||||
if (has_internal_dtx_ && !vad_decision && dtx_enabled_) {
|
||||
for (int16_t n = 0; n < MAX_FRAME_SIZE_10MSEC; n++) {
|
||||
vad_label_[n] = vad_decision;
|
||||
}
|
||||
}
|
||||
|
||||
// done encoding, return number of encoded bytes
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMG729::EnableDTX() {
|
||||
if (dtx_enabled_) {
|
||||
// DTX already enabled, do nothing
|
||||
return 0;
|
||||
} else if (encoder_exist_) {
|
||||
// Re-init the G.729 encoder to turn on DTX
|
||||
if (WebRtcG729_EncoderInit(encoder_inst_ptr_, 1) < 0) {
|
||||
return -1;
|
||||
}
|
||||
dtx_enabled_ = true;
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int16_t ACMG729::DisableDTX() {
|
||||
if (!dtx_enabled_) {
|
||||
// DTX already dissabled, do nothing
|
||||
return 0;
|
||||
} else if (encoder_exist_) {
|
||||
// Re-init the G.729 decoder to turn off DTX
|
||||
if (WebRtcG729_EncoderInit(encoder_inst_ptr_, 0) < 0) {
|
||||
return -1;
|
||||
}
|
||||
dtx_enabled_ = false;
|
||||
return 0;
|
||||
} else {
|
||||
// encoder doesn't exists, therefore disabling is harmless
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t ACMG729::ReplaceInternalDTXSafe(const bool replace_internal_dtx) {
|
||||
// This function is used to disable the G.729 built in DTX and use an
|
||||
// external instead.
|
||||
|
||||
if (replace_internal_dtx == has_internal_dtx_) {
|
||||
// Make sure we keep the DTX/VAD setting if possible
|
||||
bool old_enable_dtx = dtx_enabled_;
|
||||
bool old_enable_vad = vad_enabled_;
|
||||
ACMVADMode old_mode = vad_mode_;
|
||||
if (replace_internal_dtx) {
|
||||
// Disable internal DTX before enabling external DTX
|
||||
DisableDTX();
|
||||
} else {
|
||||
// Disable external DTX before enabling internal
|
||||
ACMGenericCodec::DisableDTX();
|
||||
}
|
||||
has_internal_dtx_ = !replace_internal_dtx;
|
||||
int16_t status = SetVADSafe(old_enable_dtx, old_enable_vad, old_mode);
|
||||
// Check if VAD status has changed from inactive to active, or if error was
|
||||
// reported
|
||||
if (status == 1) {
|
||||
vad_enabled_ = true;
|
||||
return status;
|
||||
} else if (status < 0) {
|
||||
has_internal_dtx_ = replace_internal_dtx;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t ACMG729::IsInternalDTXReplacedSafe(bool* internal_dtx_replaced) {
|
||||
// Get status of wether DTX is replaced or not
|
||||
*internal_dtx_replaced = !has_internal_dtx_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int16_t ACMG729::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
// Init G.729 encoder
|
||||
return WebRtcG729_EncoderInit(encoder_inst_ptr_,
|
||||
((codec_params->enable_dtx) ? 1 : 0));
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMG729::CreateInstance(void) {
|
||||
// Function not used
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int16_t ACMG729::InternalCreateEncoder() {
|
||||
// Create encoder memory
|
||||
return WebRtcG729_CreateEnc(&encoder_inst_ptr_);
|
||||
}
|
||||
|
||||
void ACMG729::DestructEncoderSafe() {
|
||||
// Free encoder memory
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcG729_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void ACMG729::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
WebRtcG729_FreeEnc(static_cast<G729_encinst_t_*>(ptr_inst));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
58
jni/webrtc/modules/audio_coding/main/acm2/acm_g729.h
Normal file
58
jni/webrtc/modules/audio_coding/main/acm2/acm_g729.h
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G729_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G729_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
// forward declaration
|
||||
struct G729_encinst_t_;
|
||||
struct G729_decinst_t_;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMG729 : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMG729(int16_t codec_id);
|
||||
~ACMG729();
|
||||
|
||||
// for FEC
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream, int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int16_t EnableDTX();
|
||||
|
||||
int16_t DisableDTX();
|
||||
|
||||
int32_t ReplaceInternalDTXSafe(const bool replace_internal_dtx);
|
||||
|
||||
int32_t IsInternalDTXReplacedSafe(bool* internal_dtx_replaced);
|
||||
|
||||
G729_encinst_t_* encoder_inst_ptr_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G729_H_
|
||||
244
jni/webrtc/modules/audio_coding/main/acm2/acm_g7291.cc
Normal file
244
jni/webrtc/modules/audio_coding/main/acm2/acm_g7291.cc
Normal file
@@ -0,0 +1,244 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_g7291.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_G729_1
|
||||
// NOTE! G.729.1 is not included in the open-source package. Modify this file
|
||||
// or your codec API to match the function calls and names of used G.729.1 API
|
||||
// file.
|
||||
#include "webrtc/modules/audio_coding/main/codecs/g7291/interface/g7291_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_G729_1
|
||||
|
||||
ACMG729_1::ACMG729_1(int16_t /* codec_id */)
|
||||
: encoder_inst_ptr_(NULL),
|
||||
my_rate_(32000),
|
||||
flag_8khz_(0),
|
||||
flag_g729_mode_(0) {
|
||||
return;
|
||||
}
|
||||
|
||||
ACMG729_1::~ACMG729_1() { return; }
|
||||
|
||||
int16_t ACMG729_1::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMG729_1::InternalInitEncoder(
|
||||
WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMG729_1::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMG729_1::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMG729_1::DestructEncoderSafe() { return; }
|
||||
|
||||
void ACMG729_1::InternalDestructEncoderInst(void* /* ptr_inst */) { return; }
|
||||
|
||||
int16_t ACMG729_1::SetBitRateSafe(const int32_t /*rate*/) { return -1; }
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
|
||||
struct G729_1_inst_t_;
|
||||
|
||||
ACMG729_1::ACMG729_1(int16_t codec_id)
|
||||
: encoder_inst_ptr_(NULL),
|
||||
my_rate_(32000), // Default rate.
|
||||
flag_8khz_(0),
|
||||
flag_g729_mode_(0) {
|
||||
// TODO(tlegrand): We should add codec_id as a input variable to the
|
||||
// constructor of ACMGenericCodec.
|
||||
codec_id_ = codec_id;
|
||||
return;
|
||||
}
|
||||
|
||||
ACMG729_1::~ACMG729_1() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcG7291_Free(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMG729_1::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
// Initialize before entering the loop
|
||||
int16_t num_encoded_samples = 0;
|
||||
*bitstream_len_byte = 0;
|
||||
|
||||
int16_t byte_length_frame = 0;
|
||||
|
||||
// Derive number of 20ms frames per encoded packet.
|
||||
// [1,2,3] <=> [20,40,60]ms <=> [320,640,960] samples
|
||||
int16_t num_20ms_frames = (frame_len_smpl_ / 320);
|
||||
// Byte length for the frame. +1 is for rate information.
|
||||
byte_length_frame =
|
||||
my_rate_ / (8 * 50) * num_20ms_frames + (1 - flag_g729_mode_);
|
||||
|
||||
// The following might be revised if we have G729.1 Annex C (support for DTX);
|
||||
do {
|
||||
*bitstream_len_byte = WebRtcG7291_Encode(
|
||||
encoder_inst_ptr_, &in_audio_[in_audio_ix_read_],
|
||||
reinterpret_cast<int16_t*>(bitstream), my_rate_, num_20ms_frames);
|
||||
|
||||
// increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer
|
||||
in_audio_ix_read_ += 160;
|
||||
|
||||
// sanity check
|
||||
if (*bitstream_len_byte < 0) {
|
||||
// error has happened
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"InternalEncode: Encode error for G729_1");
|
||||
*bitstream_len_byte = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
num_encoded_samples += 160;
|
||||
} while (*bitstream_len_byte == 0);
|
||||
|
||||
// This criteria will change if we have Annex C.
|
||||
if (*bitstream_len_byte != byte_length_frame) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"InternalEncode: Encode error for G729_1");
|
||||
*bitstream_len_byte = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (num_encoded_samples != frame_len_smpl_) {
|
||||
*bitstream_len_byte = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMG729_1::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
// set the bit rate and initialize
|
||||
my_rate_ = codec_params->codec_inst.rate;
|
||||
return SetBitRateSafe((uint32_t)my_rate_);
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMG729_1::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMG729_1::InternalCreateEncoder() {
|
||||
if (WebRtcG7291_Create(&encoder_inst_ptr_) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError,
|
||||
webrtc::kTraceAudioCoding,
|
||||
unique_id_,
|
||||
"InternalCreateEncoder: create encoder failed for G729_1");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMG729_1::DestructEncoderSafe() {
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcG7291_Free(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void ACMG729_1::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
// WebRtcG7291_Free((G729_1_inst_t*)ptrInst);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMG729_1::SetBitRateSafe(const int32_t rate) {
|
||||
// allowed rates: { 8000, 12000, 14000, 16000, 18000, 20000,
|
||||
// 22000, 24000, 26000, 28000, 30000, 32000};
|
||||
// TODO(tlegrand): This check exists in one other place two. Should be
|
||||
// possible to reuse code.
|
||||
switch (rate) {
|
||||
case 8000: {
|
||||
my_rate_ = 8000;
|
||||
break;
|
||||
}
|
||||
case 12000: {
|
||||
my_rate_ = 12000;
|
||||
break;
|
||||
}
|
||||
case 14000: {
|
||||
my_rate_ = 14000;
|
||||
break;
|
||||
}
|
||||
case 16000: {
|
||||
my_rate_ = 16000;
|
||||
break;
|
||||
}
|
||||
case 18000: {
|
||||
my_rate_ = 18000;
|
||||
break;
|
||||
}
|
||||
case 20000: {
|
||||
my_rate_ = 20000;
|
||||
break;
|
||||
}
|
||||
case 22000: {
|
||||
my_rate_ = 22000;
|
||||
break;
|
||||
}
|
||||
case 24000: {
|
||||
my_rate_ = 24000;
|
||||
break;
|
||||
}
|
||||
case 26000: {
|
||||
my_rate_ = 26000;
|
||||
break;
|
||||
}
|
||||
case 28000: {
|
||||
my_rate_ = 28000;
|
||||
break;
|
||||
}
|
||||
case 30000: {
|
||||
my_rate_ = 30000;
|
||||
break;
|
||||
}
|
||||
case 32000: {
|
||||
my_rate_ = 32000;
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"SetBitRateSafe: Invalid rate G729_1");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Re-init with new rate
|
||||
if (WebRtcG7291_EncoderInit(encoder_inst_ptr_, my_rate_, flag_8khz_,
|
||||
flag_g729_mode_) >= 0) {
|
||||
encoder_params_.codec_inst.rate = my_rate_;
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
56
jni/webrtc/modules/audio_coding/main/acm2/acm_g7291.h
Normal file
56
jni/webrtc/modules/audio_coding/main/acm2/acm_g7291.h
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G7291_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G7291_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
// forward declaration
|
||||
struct G729_1_inst_t_;
|
||||
struct G729_1_inst_t_;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMG729_1 : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMG729_1(int16_t codec_id);
|
||||
~ACMG729_1();
|
||||
|
||||
// for FEC
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream, int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int16_t SetBitRateSafe(const int32_t rate);
|
||||
|
||||
G729_1_inst_t_* encoder_inst_ptr_;
|
||||
|
||||
uint16_t my_rate_;
|
||||
int16_t flag_8khz_;
|
||||
int16_t flag_g729_mode_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_G7291_H_
|
||||
1011
jni/webrtc/modules/audio_coding/main/acm2/acm_generic_codec.cc
Normal file
1011
jni/webrtc/modules/audio_coding/main/acm2/acm_generic_codec.cc
Normal file
File diff suppressed because it is too large
Load Diff
986
jni/webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h
Normal file
986
jni/webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h
Normal file
@@ -0,0 +1,986 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_GENERIC_CODEC_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_GENERIC_CODEC_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/interface/audio_decoder.h"
|
||||
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/thread_annotations.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
#define MAX_FRAME_SIZE_10MSEC 6
|
||||
|
||||
// forward declaration
|
||||
struct WebRtcVadInst;
|
||||
struct WebRtcCngEncInst;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
struct WebRtcACMCodecParams;
|
||||
struct CodecInst;
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
// forward declaration
|
||||
class AcmReceiver;
|
||||
|
||||
class ACMGenericCodec {
|
||||
public:
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Constructor of the class
|
||||
//
|
||||
ACMGenericCodec();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Destructor of the class.
|
||||
//
|
||||
virtual ~ACMGenericCodec();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// ACMGenericCodec* CreateInstance();
|
||||
// The function will be used for FEC. It is not implemented yet.
|
||||
//
|
||||
virtual ACMGenericCodec* CreateInstance() = 0;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t Encode()
|
||||
// The function is called to perform an encoding of the audio stored in
|
||||
// audio buffer. An encoding is performed only if enough audio, i.e. equal
|
||||
// to the frame-size of the codec, exist. The audio frame will be processed
|
||||
// by VAD and CN/DTX if required. There are few different cases.
|
||||
//
|
||||
// A) Neither VAD nor DTX is active; the frame is encoded by the encoder.
|
||||
//
|
||||
// B) VAD is enabled but not DTX; in this case the audio is processed by VAD
|
||||
// and encoded by the encoder. The "*encoding_type" will be either
|
||||
// "kActiveNormalEncode" or "kPassiveNormalEncode" if frame is active or
|
||||
// passive, respectively.
|
||||
//
|
||||
// C) DTX is enabled; if the codec has internal VAD/DTX we just encode the
|
||||
// frame by the encoder. Otherwise, the frame is passed through VAD and
|
||||
// if identified as passive, then it will be processed by CN/DTX. If the
|
||||
// frame is active it will be encoded by the encoder.
|
||||
//
|
||||
// This function acquires the appropriate locks and calls EncodeSafe() for
|
||||
// the actual processing.
|
||||
//
|
||||
// Outputs:
|
||||
// -bitstream : a buffer where bit-stream will be written to.
|
||||
// -bitstream_len_byte : contains the length of the bit-stream in
|
||||
// bytes.
|
||||
// -timestamp : contains the RTP timestamp, this is the
|
||||
// sampling time of the first sample encoded
|
||||
// (measured in number of samples).
|
||||
// -encoding_type : contains the type of encoding applied on the
|
||||
// audio samples. The alternatives are
|
||||
// (c.f. acm_common_types.h)
|
||||
// -kNoEncoding:
|
||||
// there was not enough data to encode. or
|
||||
// some error has happened that we could
|
||||
// not do encoding.
|
||||
// -kActiveNormalEncoded:
|
||||
// the audio frame is active and encoded by
|
||||
// the given codec.
|
||||
// -kPassiveNormalEncoded:
|
||||
// the audio frame is passive but coded with
|
||||
// the given codec (NO DTX).
|
||||
// -kPassiveDTXWB:
|
||||
// The audio frame is passive and used
|
||||
// wide-band CN to encode.
|
||||
// -kPassiveDTXNB:
|
||||
// The audio frame is passive and used
|
||||
// narrow-band CN to encode.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if error is occurred, otherwise the length of the bit-stream in
|
||||
// bytes.
|
||||
//
|
||||
int16_t Encode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte,
|
||||
uint32_t* timestamp,
|
||||
WebRtcACMEncodingType* encoding_type);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// bool EncoderInitialized();
|
||||
//
|
||||
// Return value:
|
||||
// True if the encoder is successfully initialized,
|
||||
// false otherwise.
|
||||
//
|
||||
bool EncoderInitialized();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t EncoderParams()
|
||||
// It is called to get encoder parameters. It will call
|
||||
// EncoderParamsSafe() in turn.
|
||||
//
|
||||
// Output:
|
||||
// -enc_params : a buffer where the encoder parameters is
|
||||
// written to. If the encoder is not
|
||||
// initialized this buffer is filled with
|
||||
// invalid values
|
||||
// Return value:
|
||||
// -1 if the encoder is not initialized,
|
||||
// 0 otherwise.
|
||||
//
|
||||
int16_t EncoderParams(WebRtcACMCodecParams* enc_params);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t InitEncoder(...)
|
||||
// This function is called to initialize the encoder with the given
|
||||
// parameters.
|
||||
//
|
||||
// Input:
|
||||
// -codec_params : parameters of encoder.
|
||||
// -force_initialization: if false the initialization is invoked only if
|
||||
// the encoder is not initialized. If true the
|
||||
// encoder is forced to (re)initialize.
|
||||
//
|
||||
// Return value:
|
||||
// 0 if could initialize successfully,
|
||||
// -1 if failed to initialize.
|
||||
//
|
||||
//
|
||||
int16_t InitEncoder(WebRtcACMCodecParams* codec_params,
|
||||
bool force_initialization);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int32_t Add10MsData(...)
|
||||
// This function is called to add 10 ms of audio to the audio buffer of
|
||||
// the codec.
|
||||
//
|
||||
// Inputs:
|
||||
// -timestamp : the timestamp of the 10 ms audio. the timestamp
|
||||
// is the sampling time of the
|
||||
// first sample measured in number of samples.
|
||||
// -data : a buffer that contains the audio. The codec
|
||||
// expects to get the audio in correct sampling
|
||||
// frequency
|
||||
// -length : the length of the audio buffer
|
||||
// -audio_channel : 0 for mono, 1 for stereo (not supported yet)
|
||||
//
|
||||
// Return values:
|
||||
// -1 if failed
|
||||
// 0 otherwise.
|
||||
//
|
||||
int32_t Add10MsData(const uint32_t timestamp,
|
||||
const int16_t* data,
|
||||
const uint16_t length,
|
||||
const uint8_t audio_channel);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// uint32_t NoMissedSamples()
|
||||
// This function returns the number of samples which are overwritten in
|
||||
// the audio buffer. The audio samples are overwritten if the input audio
|
||||
// buffer is full, but Add10MsData() is called. (We might remove this
|
||||
// function if it is not used)
|
||||
//
|
||||
// Return Value:
|
||||
// Number of samples which are overwritten.
|
||||
//
|
||||
uint32_t NoMissedSamples() const;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// void ResetNoMissedSamples()
|
||||
// This function resets the number of overwritten samples to zero.
|
||||
// (We might remove this function if we remove NoMissedSamples())
|
||||
//
|
||||
void ResetNoMissedSamples();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t SetBitRate()
|
||||
// The function is called to set the encoding rate.
|
||||
//
|
||||
// Input:
|
||||
// -bitrate_bps : encoding rate in bits per second
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed to set the rate, due to invalid input or given
|
||||
// codec is not rate-adjustable.
|
||||
// 0 if the rate is adjusted successfully
|
||||
//
|
||||
int16_t SetBitRate(const int32_t bitrate_bps);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// DestructEncoderInst()
|
||||
// This API is used in conferencing. It will free the memory that is pointed
|
||||
// by |ptr_inst|. |ptr_inst| is a pointer to encoder instance, created and
|
||||
// filled up by calling EncoderInst(...).
|
||||
//
|
||||
// Inputs:
|
||||
// -ptr_inst : pointer to an encoder instance to be deleted.
|
||||
//
|
||||
//
|
||||
void DestructEncoderInst(void* ptr_inst);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// uint32_t EarliestTimestamp()
|
||||
// Returns the timestamp of the first 10 ms in audio buffer. This is used
|
||||
// to identify if a synchronization of two encoders is required.
|
||||
//
|
||||
// Return value:
|
||||
// timestamp of the first 10 ms audio in the audio buffer.
|
||||
//
|
||||
uint32_t EarliestTimestamp() const;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t SetVAD()
|
||||
// This is called to set VAD & DTX. If the codec has internal DTX, it will
|
||||
// be used. If DTX is enabled and the codec does not have internal DTX,
|
||||
// WebRtc-VAD will be used to decide if the frame is active. If DTX is
|
||||
// disabled but VAD is enabled, the audio is passed through VAD to label it
|
||||
// as active or passive, but the frame is encoded normally. However the
|
||||
// bit-stream is labeled properly so that ACM::Process() can use this
|
||||
// information. In case of failure, the previous states of the VAD & DTX
|
||||
// are kept.
|
||||
//
|
||||
// Inputs/Output:
|
||||
// -enable_dtx : if true DTX will be enabled otherwise the DTX is
|
||||
// disabled. If codec has internal DTX that will be
|
||||
// used, otherwise WebRtc-CNG is used. In the latter
|
||||
// case VAD is automatically activated.
|
||||
// -enable_vad : if true WebRtc-VAD is enabled, otherwise VAD is
|
||||
// disabled, except for the case that DTX is enabled
|
||||
// but codec doesn't have internal DTX. In this case
|
||||
// VAD is enabled regardless of the value of
|
||||
// |enable_vad|.
|
||||
// -mode : this specifies the aggressiveness of VAD.
|
||||
//
|
||||
// Return value
|
||||
// -1 if failed to set DTX & VAD as specified,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
int16_t SetVAD(bool* enable_dtx, bool* enable_vad, ACMVADMode* mode);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int32_t ReplaceInternalDTX()
|
||||
// This is called to replace the codec internal DTX with WebRtc DTX.
|
||||
// This is only valid for G729 where the user has possibility to replace
|
||||
// AnnexB with WebRtc DTX. For other codecs this function has no effect.
|
||||
//
|
||||
// Input:
|
||||
// -replace_internal_dtx : if true the internal DTX is replaced with WebRtc.
|
||||
//
|
||||
// Return value
|
||||
// -1 if failed to replace internal DTX,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
int32_t ReplaceInternalDTX(const bool replace_internal_dtx);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int32_t IsInternalDTXReplaced()
|
||||
// This is called to check if the codec internal DTX is replaced by WebRtc
|
||||
// DTX. This is only valid for G729 where the user has possibility to replace
|
||||
// AnnexB with WebRtc DTX. For other codecs this function has no effect.
|
||||
//
|
||||
// Output:
|
||||
// -internal_dtx_replaced: if true the internal DTX is replaced with WebRtc.
|
||||
//
|
||||
// Return value
|
||||
// -1 if failed to check
|
||||
// 0 if succeeded.
|
||||
//
|
||||
int32_t IsInternalDTXReplaced(bool* internal_dtx_replaced);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// bool HasInternalDTX()
|
||||
// Used to check if the codec has internal DTX.
|
||||
//
|
||||
// Return value:
|
||||
// true if the codec has an internal DTX, e.g. G729,
|
||||
// false otherwise.
|
||||
//
|
||||
bool HasInternalDTX() const {
|
||||
ReadLockScoped rl(codec_wrapper_lock_);
|
||||
return has_internal_dtx_;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int32_t GetEstimatedBandwidth()
|
||||
// Used to get decoder estimated bandwidth. Only iSAC will provide a value.
|
||||
//
|
||||
//
|
||||
// Return value:
|
||||
// -1 if fails to get decoder estimated bandwidth,
|
||||
// >0 estimated bandwidth in bits/sec.
|
||||
//
|
||||
int32_t GetEstimatedBandwidth();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int32_t SetEstimatedBandwidth()
|
||||
// Used to set estiamted bandwidth sent out of band from other side. Only
|
||||
// iSAC will have use for the value.
|
||||
//
|
||||
// Input:
|
||||
// -estimated_bandwidth: estimated bandwidth in bits/sec
|
||||
//
|
||||
// Return value:
|
||||
// -1 if fails to set estimated bandwidth,
|
||||
// 0 on success.
|
||||
//
|
||||
int32_t SetEstimatedBandwidth(int32_t estimated_bandwidth);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int32_t GetRedPayload()
|
||||
// Used to get codec specific RED payload (if such is implemented).
|
||||
// Currently only done in iSAC.
|
||||
//
|
||||
// Outputs:
|
||||
// -red_payload : a pointer to the data for RED payload.
|
||||
// -payload_bytes : number of bytes in RED payload.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if fails to get codec specific RED,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
int32_t GetRedPayload(uint8_t* red_payload, int16_t* payload_bytes);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t ResetEncoder()
|
||||
// By calling this function you would re-initialize the encoder with the
|
||||
// current parameters. All the settings, e.g. VAD/DTX, frame-size... should
|
||||
// remain unchanged. (In case of iSAC we don't want to lose BWE history.)
|
||||
//
|
||||
// Return value
|
||||
// -1 if failed,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
int16_t ResetEncoder();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// void DestructEncoder()
|
||||
// This function is called to delete the encoder instance, if possible, to
|
||||
// have a fresh start. For codecs where encoder and decoder share the same
|
||||
// instance we cannot delete the encoder and instead we will initialize the
|
||||
// encoder. We also delete VAD and DTX if they have been created.
|
||||
//
|
||||
void DestructEncoder();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t SamplesLeftToEncode()
|
||||
// Returns the number of samples required to be able to do encoding.
|
||||
//
|
||||
// Return value:
|
||||
// Number of samples.
|
||||
//
|
||||
int16_t SamplesLeftToEncode();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// SetUniqueID()
|
||||
// Set a unique ID for the codec to be used for tracing and debugging
|
||||
//
|
||||
// Input
|
||||
// -id : A number to identify the codec.
|
||||
//
|
||||
void SetUniqueID(const uint32_t id);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// UpdateDecoderSampFreq()
|
||||
// For most of the codecs this function does nothing. It must be
|
||||
// implemented for those codecs that one codec instance serves as the
|
||||
// decoder for different flavors of the codec. One example is iSAC. there,
|
||||
// iSAC 16 kHz and iSAC 32 kHz are treated as two different codecs with
|
||||
// different payload types, however, there is only one iSAC instance to
|
||||
// decode. The reason for that is we would like to decode and encode with
|
||||
// the same codec instance for bandwidth estimator to work.
|
||||
//
|
||||
// Each time that we receive a new payload type, we call this function to
|
||||
// prepare the decoder associated with the new payload. Normally, decoders
|
||||
// doesn't have to do anything. For iSAC the decoder has to change it's
|
||||
// sampling rate. The input parameter specifies the current flavor of the
|
||||
// codec in codec database. For instance, if we just got a SWB payload then
|
||||
// the input parameter is ACMCodecDB::isacswb.
|
||||
//
|
||||
// Input:
|
||||
// -codec_id : the ID of the codec associated with the
|
||||
// payload type that we just received.
|
||||
//
|
||||
// Return value:
|
||||
// 0 if succeeded in updating the decoder.
|
||||
// -1 if failed to update.
|
||||
//
|
||||
virtual int16_t UpdateDecoderSampFreq(int16_t /* codec_id */) { return 0; }
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// UpdateEncoderSampFreq()
|
||||
// Call this function to update the encoder sampling frequency. This
|
||||
// is for codecs where one payload-name supports several encoder sampling
|
||||
// frequencies. Otherwise, to change the sampling frequency we need to
|
||||
// register new codec. ACM will consider that as registration of a new
|
||||
// codec, not a change in parameter. For iSAC, switching from WB to SWB
|
||||
// is treated as a change in parameter. Therefore, we need this function.
|
||||
//
|
||||
// Input:
|
||||
// -samp_freq_hz : encoder sampling frequency.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed, or if this is meaningless for the given codec.
|
||||
// 0 if succeeded.
|
||||
//
|
||||
virtual int16_t UpdateEncoderSampFreq(uint16_t samp_freq_hz)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// EncoderSampFreq()
|
||||
// Get the sampling frequency that the encoder (WebRtc wrapper) expects.
|
||||
//
|
||||
// Output:
|
||||
// -samp_freq_hz : sampling frequency, in Hertz, which the encoder
|
||||
// should be fed with.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed to output sampling rate.
|
||||
// 0 if the sample rate is returned successfully.
|
||||
//
|
||||
virtual int16_t EncoderSampFreq(uint16_t* samp_freq_hz)
|
||||
SHARED_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int32_t ConfigISACBandwidthEstimator()
|
||||
// Call this function to configure the bandwidth estimator of ISAC.
|
||||
// During the adaptation of bit-rate, iSAC automatically adjusts the
|
||||
// frame-size (either 30 or 60 ms) to save on RTP header. The initial
|
||||
// frame-size can be specified by the first argument. The configuration also
|
||||
// regards the initial estimate of bandwidths. The estimator starts from
|
||||
// this point and converges to the actual bottleneck. This is given by the
|
||||
// second parameter. Furthermore, it is also possible to control the
|
||||
// adaptation of frame-size. This is specified by the last parameter.
|
||||
//
|
||||
// Input:
|
||||
// -init_frame_fize_ms : initial frame-size in milliseconds. For iSAC-wb
|
||||
// 30 ms and 60 ms (default) are acceptable values,
|
||||
// and for iSAC-swb 30 ms is the only acceptable
|
||||
// value. Zero indicates default value.
|
||||
// -init_rate_bps : initial estimate of the bandwidth. Values
|
||||
// between 10000 and 58000 are acceptable.
|
||||
// -enforce_frame_size : if true, the frame-size will not be adapted.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed to configure the bandwidth estimator,
|
||||
// 0 if the configuration was successfully applied.
|
||||
//
|
||||
virtual int32_t ConfigISACBandwidthEstimator(
|
||||
const uint8_t init_frame_size_msec,
|
||||
const uint16_t init_rate_bps,
|
||||
const bool enforce_frame_size);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// SetISACMaxPayloadSize()
|
||||
// Set the maximum payload size of iSAC packets. No iSAC payload,
|
||||
// regardless of its frame-size, may exceed the given limit. For
|
||||
// an iSAC payload of size B bits and frame-size T sec we have;
|
||||
// (B < max_payload_len_bytes * 8) and (B/T < max_rate_bit_per_sec), c.f.
|
||||
// SetISACMaxRate().
|
||||
//
|
||||
// Input:
|
||||
// -max_payload_len_bytes : maximum payload size in bytes.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed to set the maximum payload-size.
|
||||
// 0 if the given length is set successfully.
|
||||
//
|
||||
virtual int32_t SetISACMaxPayloadSize(const uint16_t max_payload_len_bytes);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// SetISACMaxRate()
|
||||
// Set the maximum instantaneous rate of iSAC. For a payload of B bits
|
||||
// with a frame-size of T sec the instantaneous rate is B/T bits per
|
||||
// second. Therefore, (B/T < max_rate_bit_per_sec) and
|
||||
// (B < max_payload_len_bytes * 8) are always satisfied for iSAC payloads,
|
||||
// c.f SetISACMaxPayloadSize().
|
||||
//
|
||||
// Input:
|
||||
// -max_rate_bps : maximum instantaneous bit-rate given in bits/sec.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed to set the maximum rate.
|
||||
// 0 if the maximum rate is set successfully.
|
||||
//
|
||||
virtual int32_t SetISACMaxRate(const uint32_t max_rate_bps);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// REDPayloadISAC()
|
||||
// This is an iSAC-specific function. The function is called to get RED
|
||||
// payload from a default-encoder.
|
||||
//
|
||||
// Inputs:
|
||||
// -isac_rate : the target rate of the main payload. A RED
|
||||
// payload is generated according to the rate of
|
||||
// main payload. Note that we are not specifying the
|
||||
// rate of RED payload, but the main payload.
|
||||
// -isac_bw_estimate : bandwidth information should be inserted in
|
||||
// RED payload.
|
||||
//
|
||||
// Output:
|
||||
// -payload : pointer to a buffer where the RED payload will
|
||||
// written to.
|
||||
// -payload_len_bytes : a place-holder to write the length of the RED
|
||||
// payload in Bytes.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if an error occurs, otherwise the length of the payload (in Bytes)
|
||||
// is returned.
|
||||
//
|
||||
virtual int16_t REDPayloadISAC(const int32_t isac_rate,
|
||||
const int16_t isac_bw_estimate,
|
||||
uint8_t* payload,
|
||||
int16_t* payload_len_bytes);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int SetOpusMaxBandwidth()
|
||||
// Sets maximum required encoding bandwidth for Opus. This is to tell Opus
|
||||
// that it is enough to code the input audio up to a bandwidth. A use case of
|
||||
// this is when the receiver cannot render the full band. Opus can take this
|
||||
// information to optimize the bit rate and increase the computation
|
||||
// efficiency.
|
||||
//
|
||||
// Input:
|
||||
// -max_bandwidth : maximum required bandwidth.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed or on codecs other than Opus
|
||||
// 0 if succeeded.
|
||||
//
|
||||
virtual int SetOpusMaxBandwidth(int /* max_bandwidth */);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// HasFrameToEncode()
|
||||
// Returns true if there is enough audio buffered for encoding, such that
|
||||
// calling Encode() will return a payload.
|
||||
//
|
||||
bool HasFrameToEncode() const;
|
||||
|
||||
//
|
||||
// Returns pointer to the AudioDecoder class of this codec. A codec which
|
||||
// should own its own decoder (e.g. iSAC which need same instance for encoding
|
||||
// and decoding, or a codec which should access decoder instance for specific
|
||||
// decoder setting) should implement this method. This method is called if
|
||||
// and only if the ACMCodecDB::codec_settings[codec_id].owns_decoder is true.
|
||||
//
|
||||
virtual AudioDecoder* Decoder(int /* codec_id */) { return NULL; }
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// bool HasInternalFEC()
|
||||
// Used to check if the codec has internal FEC.
|
||||
//
|
||||
// Return value:
|
||||
// true if the codec has an internal FEC, e.g. Opus.
|
||||
// false otherwise.
|
||||
//
|
||||
bool HasInternalFEC() const {
|
||||
ReadLockScoped rl(codec_wrapper_lock_);
|
||||
return has_internal_fec_;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int SetFEC();
|
||||
// Sets the codec internal FEC. No effects on codecs that do not provide
|
||||
// internal FEC.
|
||||
//
|
||||
// Input:
|
||||
// -enable_fec : if true FEC will be enabled otherwise the FEC is
|
||||
// disabled.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed, or the codec does not support FEC
|
||||
// 0 if succeeded.
|
||||
//
|
||||
virtual int SetFEC(bool /* enable_fec */) { return -1; }
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int SetPacketLossRate()
|
||||
// Sets expected packet loss rate for encoding. Some encoders provide packet
|
||||
// loss gnostic encoding to make stream less sensitive to packet losses,
|
||||
// through e.g., FEC. No effects on codecs that do not provide such encoding.
|
||||
//
|
||||
// Input:
|
||||
// -loss_rate : expected packet loss rate (0 -- 100 inclusive).
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// 0 if succeeded or packet loss rate is ignored.
|
||||
//
|
||||
virtual int SetPacketLossRate(int /* loss_rate */) { return 0; }
|
||||
|
||||
protected:
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// All the functions with FunctionNameSafe(...) contain the actual
|
||||
// implementation of FunctionName(...). FunctionName() acquires an
|
||||
// appropriate lock and calls FunctionNameSafe() to do the actual work.
|
||||
// Therefore, for the description of functionality, input/output arguments
|
||||
// and return value we refer to FunctionName()
|
||||
//
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See Add10MsSafe() for the description of function, input(s)/output(s)
|
||||
// and return value.
|
||||
//
|
||||
virtual int32_t Add10MsDataSafe(const uint32_t timestamp,
|
||||
const int16_t* data,
|
||||
const uint16_t length,
|
||||
const uint8_t audio_channel)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See EncoderParam() for the description of function, input(s)/output(s)
|
||||
// and return value.
|
||||
//
|
||||
int16_t EncoderParamsSafe(WebRtcACMCodecParams* enc_params)
|
||||
SHARED_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See ResetEncoder() for the description of function, input(s)/output(s)
|
||||
// and return value.
|
||||
//
|
||||
int16_t ResetEncoderSafe() EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See InitEncoder() for the description of function, input(s)/output(s)
|
||||
// and return value.
|
||||
//
|
||||
int16_t InitEncoderSafe(WebRtcACMCodecParams* codec_params,
|
||||
bool force_initialization)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See InitDecoder() for the description of function, input(s)/output(s)
|
||||
// and return value.
|
||||
//
|
||||
int16_t InitDecoderSafe(WebRtcACMCodecParams* codec_params,
|
||||
bool force_initialization);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See DestructEncoder() for the description of function,
|
||||
// input(s)/output(s) and return value.
|
||||
//
|
||||
virtual void DestructEncoderSafe()
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_) = 0;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See SetBitRate() for the description of function, input(s)/output(s)
|
||||
// and return value.
|
||||
//
|
||||
// Any codec that can change the bit-rate has to implement this.
|
||||
//
|
||||
virtual int16_t SetBitRateSafe(const int32_t bitrate_bps)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See GetEstimatedBandwidth() for the description of function,
|
||||
// input(s)/output(s) and return value.
|
||||
//
|
||||
virtual int32_t GetEstimatedBandwidthSafe();
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See SetEstimatedBandwidth() for the description of function,
|
||||
// input(s)/output(s) and return value.
|
||||
//
|
||||
virtual int32_t SetEstimatedBandwidthSafe(int32_t estimated_bandwidth);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See GetRedPayload() for the description of function, input(s)/output(s)
|
||||
// and return value.
|
||||
//
|
||||
virtual int32_t GetRedPayloadSafe(uint8_t* red_payload,
|
||||
int16_t* payload_bytes);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See SetVAD() for the description of function, input(s)/output(s) and
|
||||
// return value.
|
||||
//
|
||||
int16_t SetVADSafe(bool* enable_dtx, bool* enable_vad, ACMVADMode* mode)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See ReplaceInternalDTX() for the description of function, input and
|
||||
// return value.
|
||||
//
|
||||
virtual int32_t ReplaceInternalDTXSafe(const bool replace_internal_dtx);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// See IsInternalDTXReplaced() for the description of function, input and
|
||||
// return value.
|
||||
//
|
||||
virtual int32_t IsInternalDTXReplacedSafe(bool* internal_dtx_replaced);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t CreateEncoder()
|
||||
// Creates the encoder instance.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
int16_t CreateEncoder() EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t EnableVAD();
|
||||
// Enables VAD with the given mode. The VAD instance will be created if
|
||||
// it does not exists.
|
||||
//
|
||||
// Input:
|
||||
// -mode : VAD mode c.f. audio_coding_module_typedefs.h for
|
||||
// the options.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
int16_t EnableVAD(ACMVADMode mode)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t DisableVAD()
|
||||
// Disables VAD.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
int16_t DisableVAD() EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t EnableDTX()
|
||||
// Enables DTX. This method should be overwritten for codecs which have
|
||||
// internal DTX.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
virtual int16_t EnableDTX() EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t DisableDTX()
|
||||
// Disables usage of DTX. This method should be overwritten for codecs which
|
||||
// have internal DTX.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
virtual int16_t DisableDTX() EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t InternalEncode()
|
||||
// This is a codec-specific function called in EncodeSafe() to actually
|
||||
// encode a frame of audio.
|
||||
//
|
||||
// Outputs:
|
||||
// -bitstream : pointer to a buffer where the bit-stream is
|
||||
// written to.
|
||||
// -bitstream_len_byte : the length of the bit-stream in bytes,
|
||||
// a negative value indicates error.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// otherwise the length of the bit-stream is returned.
|
||||
//
|
||||
virtual int16_t InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_) = 0;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t InternalInitEncoder()
|
||||
// This is a codec-specific function called in InitEncoderSafe(), it has to
|
||||
// do all codec-specific operation to initialize the encoder given the
|
||||
// encoder parameters.
|
||||
//
|
||||
// Input:
|
||||
// -codec_params : pointer to a structure that contains parameters to
|
||||
// initialize encoder.
|
||||
// Set codec_params->codec_inst.rate to -1 for
|
||||
// iSAC to operate in adaptive mode.
|
||||
// (to do: if frame-length is -1 frame-length will be
|
||||
// automatically adjusted, otherwise, given
|
||||
// frame-length is forced)
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
virtual int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_) = 0;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// void IncreaseNoMissedSamples()
|
||||
// This method is called to increase the number of samples that are
|
||||
// overwritten in the audio buffer.
|
||||
//
|
||||
// Input:
|
||||
// -num_samples : the number of overwritten samples is incremented
|
||||
// by this value.
|
||||
//
|
||||
void IncreaseNoMissedSamples(const int16_t num_samples)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t InternalCreateEncoder()
|
||||
// This is a codec-specific method called in CreateEncoderSafe() it is
|
||||
// supposed to perform all codec-specific operations to create encoder
|
||||
// instance.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
virtual int16_t InternalCreateEncoder() = 0;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// void InternalDestructEncoderInst()
|
||||
// This is a codec-specific method, used in conferencing, called from
|
||||
// DestructEncoderInst(). The input argument is pointer to encoder instance
|
||||
// (codec instance for codecs that encoder and decoder share the same
|
||||
// instance). This method is called to free the memory that |ptr_inst| is
|
||||
// pointing to.
|
||||
//
|
||||
// Input:
|
||||
// -ptr_inst : pointer to encoder instance.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
virtual void InternalDestructEncoderInst(void* ptr_inst) = 0;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t InternalResetEncoder()
|
||||
// This method is called to reset the states of encoder. However, the
|
||||
// current parameters, e.g. frame-length, should remain as they are. For
|
||||
// most of the codecs a re-initialization of the encoder is what needs to
|
||||
// be down. But for iSAC we like to keep the BWE history so we cannot
|
||||
// re-initialize. As soon as such an API is implemented in iSAC this method
|
||||
// has to be overwritten in ACMISAC class.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
virtual int16_t InternalResetEncoder()
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// int16_t ProcessFrameVADDTX()
|
||||
// This function is called when a full frame of audio is available. It will
|
||||
// break the audio frame into blocks such that each block could be processed
|
||||
// by VAD & CN/DTX. If a frame is divided into two blocks then there are two
|
||||
// cases. First, the first block is active, the second block will not be
|
||||
// processed by CN/DTX but only by VAD and return to caller with
|
||||
// '*samples_processed' set to zero. There, the audio frame will be encoded
|
||||
// by the encoder. Second, the first block is inactive and is processed by
|
||||
// CN/DTX, then we stop processing the next block and return to the caller
|
||||
// which is EncodeSafe(), with "*samples_processed" equal to the number of
|
||||
// samples in first block.
|
||||
//
|
||||
// Output:
|
||||
// -bitstream : pointer to a buffer where DTX frame, if
|
||||
// generated, will be written to.
|
||||
// -bitstream_len_byte : contains the length of bit-stream in bytes, if
|
||||
// generated. Zero if no bit-stream is generated.
|
||||
// -samples_processed : contains no of samples that actually CN has
|
||||
// processed. Those samples processed by CN will not
|
||||
// be encoded by the encoder, obviously. If
|
||||
// contains zero, it means that the frame has been
|
||||
// identified as active by VAD. Note that
|
||||
// "*samples_processed" might be non-zero but
|
||||
// "*bitstream_len_byte" be zero.
|
||||
//
|
||||
// Return value:
|
||||
// -1 if failed,
|
||||
// 0 if succeeded.
|
||||
//
|
||||
int16_t ProcessFrameVADDTX(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte,
|
||||
int16_t* samples_processed)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// CurrentRate()
|
||||
// Call to get the current encoding rate of the encoder. This function
|
||||
// should be overwritten for codecs which automatically change their
|
||||
// target rate. One example is iSAC. The output of the function is the
|
||||
// current target rate.
|
||||
//
|
||||
// Output:
|
||||
// -rate_bps : the current target rate of the codec.
|
||||
//
|
||||
virtual void CurrentRate(int32_t* /* rate_bps */) {}
|
||||
|
||||
// &in_audio_[in_audio_ix_write_] always point to where new audio can be
|
||||
// written to
|
||||
int16_t in_audio_ix_write_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
// &in_audio_[in_audio_ix_read_] points to where audio has to be read from
|
||||
int16_t in_audio_ix_read_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
int16_t in_timestamp_ix_write_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
// Where the audio is stored before encoding,
|
||||
// To save memory the following buffer can be allocated
|
||||
// dynamically for 80 ms depending on the sampling frequency
|
||||
// of the codec.
|
||||
int16_t* in_audio_ GUARDED_BY(codec_wrapper_lock_);
|
||||
uint32_t* in_timestamp_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
int16_t frame_len_smpl_ GUARDED_BY(codec_wrapper_lock_);
|
||||
uint16_t num_channels_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
// This will point to a static database of the supported codecs
|
||||
int16_t codec_id_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
// This will account for the number of samples were not encoded
|
||||
// the case is rare, either samples are missed due to overwrite
|
||||
// at input buffer or due to encoding error
|
||||
uint32_t num_missed_samples_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
// True if the encoder instance created
|
||||
bool encoder_exist_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
// True if the encoder instance initialized
|
||||
bool encoder_initialized_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
const bool registered_in_neteq_
|
||||
GUARDED_BY(codec_wrapper_lock_); // TODO(henrik.lundin) Remove?
|
||||
|
||||
// VAD/DTX
|
||||
bool has_internal_dtx_ GUARDED_BY(codec_wrapper_lock_);
|
||||
WebRtcVadInst* ptr_vad_inst_ GUARDED_BY(codec_wrapper_lock_);
|
||||
bool vad_enabled_ GUARDED_BY(codec_wrapper_lock_);
|
||||
ACMVADMode vad_mode_ GUARDED_BY(codec_wrapper_lock_);
|
||||
int16_t vad_label_[MAX_FRAME_SIZE_10MSEC] GUARDED_BY(codec_wrapper_lock_);
|
||||
bool dtx_enabled_ GUARDED_BY(codec_wrapper_lock_);
|
||||
WebRtcCngEncInst* ptr_dtx_inst_ GUARDED_BY(codec_wrapper_lock_);
|
||||
uint8_t num_lpc_params_ // TODO(henrik.lundin) Delete and
|
||||
GUARDED_BY(codec_wrapper_lock_); // replace with kNewCNGNumLPCParams.
|
||||
bool sent_cn_previous_ GUARDED_BY(codec_wrapper_lock_);
|
||||
int16_t prev_frame_cng_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
// FEC.
|
||||
bool has_internal_fec_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
WebRtcACMCodecParams encoder_params_ GUARDED_BY(codec_wrapper_lock_);
|
||||
|
||||
// Used to lock wrapper internal data
|
||||
// such as buffers and state variables.
|
||||
RWLockWrapper& codec_wrapper_lock_;
|
||||
|
||||
uint32_t last_timestamp_ GUARDED_BY(codec_wrapper_lock_);
|
||||
uint32_t unique_id_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_GENERIC_CODEC_H_
|
||||
161
jni/webrtc/modules/audio_coding/main/acm2/acm_gsmfr.cc
Normal file
161
jni/webrtc/modules/audio_coding/main/acm2/acm_gsmfr.cc
Normal file
@@ -0,0 +1,161 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_gsmfr.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_GSMFR
|
||||
// NOTE! GSM-FR is not included in the open-source package. Modify this file
|
||||
// or your codec API to match the function calls and names of used GSM-FR API
|
||||
// file.
|
||||
#include "webrtc/modules/audio_coding/main/codecs/gsmfr/interface/gsmfr_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_GSMFR
|
||||
|
||||
ACMGSMFR::ACMGSMFR(int16_t /* codec_id */) : encoder_inst_ptr_(NULL) {}
|
||||
|
||||
ACMGSMFR::~ACMGSMFR() { return; }
|
||||
|
||||
int16_t ACMGSMFR::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMGSMFR::EnableDTX() { return -1; }
|
||||
|
||||
int16_t ACMGSMFR::DisableDTX() { return -1; }
|
||||
|
||||
int16_t ACMGSMFR::InternalInitEncoder(
|
||||
WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMGSMFR::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMGSMFR::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMGSMFR::DestructEncoderSafe() { return; }
|
||||
|
||||
void ACMGSMFR::InternalDestructEncoderInst(void* /* ptr_inst */) {
|
||||
return;
|
||||
}
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
|
||||
ACMGSMFR::ACMGSMFR(int16_t codec_id)
|
||||
: codec_id_(codec_id),
|
||||
has_internal_dtx_(true),
|
||||
encoder_inst_ptr_(NULL) {}
|
||||
|
||||
ACMGSMFR::~ACMGSMFR() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcGSMFR_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMGSMFR::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
*bitstream_len_byte = WebRtcGSMFR_Encode(
|
||||
encoder_inst_ptr_, &in_audio_[in_audio_ix_read_], frame_len_smpl_,
|
||||
reinterpret_cast<int16_t*>(bitstream));
|
||||
|
||||
// increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer
|
||||
in_audio_ix_read_ += frame_len_smpl_;
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMGSMFR::EnableDTX() {
|
||||
if (dtx_enabled_) {
|
||||
return 0;
|
||||
} else if (encoder_exist_) {
|
||||
if (WebRtcGSMFR_EncoderInit(encoder_inst_ptr_, 1) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"EnableDTX: cannot init encoder for GSMFR");
|
||||
return -1;
|
||||
}
|
||||
dtx_enabled_ = true;
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int16_t ACMGSMFR::DisableDTX() {
|
||||
if (!dtx_enabled_) {
|
||||
return 0;
|
||||
} else if (encoder_exist_) {
|
||||
if (WebRtcGSMFR_EncoderInit(encoder_inst_ptr_, 0) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"DisableDTX: cannot init encoder for GSMFR");
|
||||
return -1;
|
||||
}
|
||||
dtx_enabled_ = false;
|
||||
return 0;
|
||||
} else {
|
||||
// encoder doesn't exists, therefore disabling is harmless
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int16_t ACMGSMFR::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
if (WebRtcGSMFR_EncoderInit(encoder_inst_ptr_,
|
||||
((codec_params->enable_dtx) ? 1 : 0)) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError,
|
||||
webrtc::kTraceAudioCoding,
|
||||
unique_id_,
|
||||
"InternalInitEncoder: cannot init encoder for GSMFR");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMGSMFR::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMGSMFR::InternalCreateEncoder() {
|
||||
if (WebRtcGSMFR_CreateEnc(&encoder_inst_ptr_) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError,
|
||||
webrtc::kTraceAudioCoding,
|
||||
unique_id_,
|
||||
"InternalCreateEncoder: cannot create instance for GSMFR "
|
||||
"encoder");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMGSMFR::DestructEncoderSafe() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcGSMFR_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
}
|
||||
|
||||
void ACMGSMFR::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
WebRtcGSMFR_FreeEnc(static_cast<GSMFR_encinst_t_*>(ptr_inst));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
54
jni/webrtc/modules/audio_coding/main/acm2/acm_gsmfr.h
Normal file
54
jni/webrtc/modules/audio_coding/main/acm2/acm_gsmfr.h
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_GSMFR_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_GSMFR_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
// forward declaration
|
||||
struct GSMFR_encinst_t_;
|
||||
struct GSMFR_decinst_t_;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMGSMFR : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMGSMFR(int16_t codec_id);
|
||||
~ACMGSMFR();
|
||||
|
||||
// for FEC
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream, int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int16_t EnableDTX();
|
||||
|
||||
int16_t DisableDTX();
|
||||
|
||||
GSMFR_encinst_t_* encoder_inst_ptr_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_GSMFR_H_
|
||||
145
jni/webrtc/modules/audio_coding/main/acm2/acm_ilbc.cc
Normal file
145
jni/webrtc/modules/audio_coding/main/acm2/acm_ilbc.cc
Normal file
@@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_ilbc.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_ILBC
|
||||
#include "webrtc/modules/audio_coding/codecs/ilbc/interface/ilbc.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_ILBC
|
||||
|
||||
ACMILBC::ACMILBC(int16_t /* codec_id */) : encoder_inst_ptr_(NULL) {}
|
||||
|
||||
ACMILBC::~ACMILBC() { return; }
|
||||
|
||||
int16_t ACMILBC::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMILBC::InternalInitEncoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMILBC::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMILBC::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMILBC::DestructEncoderSafe() { return; }
|
||||
|
||||
void ACMILBC::InternalDestructEncoderInst(void* /* ptr_inst */) { return; }
|
||||
|
||||
int16_t ACMILBC::SetBitRateSafe(const int32_t /* rate */) { return -1; }
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
|
||||
ACMILBC::ACMILBC(int16_t codec_id) : encoder_inst_ptr_(NULL) {
|
||||
codec_id_ = codec_id;
|
||||
return;
|
||||
}
|
||||
|
||||
ACMILBC::~ACMILBC() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcIlbcfix_EncoderFree(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMILBC::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
*bitstream_len_byte = WebRtcIlbcfix_Encode(
|
||||
encoder_inst_ptr_, &in_audio_[in_audio_ix_read_], frame_len_smpl_,
|
||||
reinterpret_cast<int16_t*>(bitstream));
|
||||
if (*bitstream_len_byte < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError,
|
||||
webrtc::kTraceAudioCoding,
|
||||
unique_id_,
|
||||
"InternalEncode: error in encode for ILBC");
|
||||
return -1;
|
||||
}
|
||||
// increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer
|
||||
in_audio_ix_read_ += frame_len_smpl_;
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMILBC::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
// initialize with a correct processing block length
|
||||
if ((160 == (codec_params->codec_inst).pacsize) ||
|
||||
(320 == (codec_params->codec_inst).pacsize)) {
|
||||
// processing block of 20ms
|
||||
return WebRtcIlbcfix_EncoderInit(encoder_inst_ptr_, 20);
|
||||
} else if ((240 == (codec_params->codec_inst).pacsize) ||
|
||||
(480 == (codec_params->codec_inst).pacsize)) {
|
||||
// processing block of 30ms
|
||||
return WebRtcIlbcfix_EncoderInit(encoder_inst_ptr_, 30);
|
||||
} else {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"InternalInitEncoder: invalid processing block");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMILBC::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMILBC::InternalCreateEncoder() {
|
||||
if (WebRtcIlbcfix_EncoderCreate(&encoder_inst_ptr_) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError,
|
||||
webrtc::kTraceAudioCoding,
|
||||
unique_id_,
|
||||
"InternalCreateEncoder: cannot create instance for ILBC "
|
||||
"encoder");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMILBC::DestructEncoderSafe() {
|
||||
encoder_initialized_ = false;
|
||||
encoder_exist_ = false;
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcIlbcfix_EncoderFree(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void ACMILBC::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
WebRtcIlbcfix_EncoderFree(static_cast<iLBC_encinst_t_*>(ptr_inst));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMILBC::SetBitRateSafe(const int32_t rate) {
|
||||
// Check that rate is valid. No need to store the value
|
||||
if (rate == 13300) {
|
||||
WebRtcIlbcfix_EncoderInit(encoder_inst_ptr_, 30);
|
||||
} else if (rate == 15200) {
|
||||
WebRtcIlbcfix_EncoderInit(encoder_inst_ptr_, 20);
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
encoder_params_.codec_inst.rate = rate;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
56
jni/webrtc/modules/audio_coding/main/acm2/acm_ilbc.h
Normal file
56
jni/webrtc/modules/audio_coding/main/acm2/acm_ilbc.h
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_ILBC_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_ILBC_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
// forward declaration
|
||||
struct iLBC_encinst_t_;
|
||||
struct iLBC_decinst_t_;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMILBC : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMILBC(int16_t codec_id);
|
||||
~ACMILBC();
|
||||
|
||||
// for FEC
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
int16_t SetBitRateSafe(const int32_t rate) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
void DestructEncoderSafe() OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
iLBC_encinst_t_* encoder_inst_ptr_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_ILBC_H_
|
||||
840
jni/webrtc/modules/audio_coding/main/acm2/acm_isac.cc
Normal file
840
jni/webrtc/modules/audio_coding/main/acm2/acm_isac.cc
Normal file
@@ -0,0 +1,840 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_isac.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/interface/audio_decoder.h"
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_ISAC
|
||||
#include "webrtc/modules/audio_coding/codecs/isac/main/interface/isac.h"
|
||||
#endif
|
||||
|
||||
#ifdef WEBRTC_CODEC_ISACFX
|
||||
#include "webrtc/modules/audio_coding/codecs/isac/fix/interface/isacfix.h"
|
||||
#endif
|
||||
|
||||
#if defined (WEBRTC_CODEC_ISAC) || defined (WEBRTC_CODEC_ISACFX)
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_isac_macros.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
// we need this otherwise we cannot use forward declaration
|
||||
// in the header file
|
||||
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
|
||||
struct ACMISACInst {
|
||||
ACM_ISAC_STRUCT* inst;
|
||||
};
|
||||
#endif
|
||||
|
||||
#define ISAC_MIN_RATE 10000
|
||||
#define ISAC_MAX_RATE 56000
|
||||
|
||||
// Tables for bandwidth estimates
|
||||
#define NR_ISAC_BANDWIDTHS 24
|
||||
static const int32_t kIsacRatesWb[NR_ISAC_BANDWIDTHS] = {
|
||||
10000, 11100, 12300, 13700, 15200, 16900, 18800, 20900, 23300, 25900, 28700,
|
||||
31900, 10100, 11200, 12400, 13800, 15300, 17000, 18900, 21000, 23400, 26000,
|
||||
28800, 32000};
|
||||
|
||||
static const int32_t kIsacRatesSwb[NR_ISAC_BANDWIDTHS] = {
|
||||
10000, 11000, 12400, 13800, 15300, 17000, 18900, 21000, 23200, 25400, 27600,
|
||||
29800, 32000, 34100, 36300, 38500, 40700, 42900, 45100, 47300, 49500, 51700,
|
||||
53900, 56000 };
|
||||
|
||||
#if (!defined(WEBRTC_CODEC_ISAC) && !defined(WEBRTC_CODEC_ISACFX))
|
||||
|
||||
ACMISAC::ACMISAC(int16_t /* codec_id */)
|
||||
: codec_inst_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
codec_inst_ptr_(NULL),
|
||||
is_enc_initialized_(false),
|
||||
isac_coding_mode_(CHANNEL_INDEPENDENT),
|
||||
enforce_frame_size_(false),
|
||||
isac_currentBN_(32000),
|
||||
samples_in10MsAudio_(160), // Initiates to 16 kHz mode.
|
||||
decoder_initialized_(false) {
|
||||
}
|
||||
|
||||
ACMISAC::~ACMISAC() {
|
||||
return;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMISAC::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMISAC::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::InternalInitEncoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::InternalInitDecoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMISAC::DestructEncoderSafe() { return; }
|
||||
|
||||
void ACMISAC::InternalDestructEncoderInst(void* /* ptr_inst */) { return; }
|
||||
|
||||
int16_t ACMISAC::Transcode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */,
|
||||
int16_t /* q_bwe */,
|
||||
int32_t /* scale */,
|
||||
bool /* is_red */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::SetBitRateSafe(int32_t /* bit_rate */) { return -1; }
|
||||
|
||||
int32_t ACMISAC::GetEstimatedBandwidthSafe() { return -1; }
|
||||
|
||||
int32_t ACMISAC::SetEstimatedBandwidthSafe(int32_t /* estimated_bandwidth */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t ACMISAC::GetRedPayloadSafe(uint8_t* /* red_payload */,
|
||||
int16_t* /* payload_bytes */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::UpdateDecoderSampFreq(int16_t /* codec_id */) { return -1; }
|
||||
|
||||
int16_t ACMISAC::UpdateEncoderSampFreq(uint16_t /* encoder_samp_freq_hz */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::EncoderSampFreq(uint16_t* /* samp_freq_hz */) { return -1; }
|
||||
|
||||
int32_t ACMISAC::ConfigISACBandwidthEstimator(
|
||||
const uint8_t /* init_frame_size_msec */,
|
||||
const uint16_t /* init_rate_bit_per_sec */,
|
||||
const bool /* enforce_frame_size */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t ACMISAC::SetISACMaxPayloadSize(
|
||||
const uint16_t /* max_payload_len_bytes */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int32_t ACMISAC::SetISACMaxRate(const uint32_t /* max_rate_bit_per_sec */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ACMISAC::UpdateFrameLen() { return; }
|
||||
|
||||
void ACMISAC::CurrentRate(int32_t* /*rate_bit_per_sec */) { return; }
|
||||
|
||||
bool ACMISAC::DecoderParamsSafe(WebRtcACMCodecParams* /* dec_params */,
|
||||
const uint8_t /* payload_type */) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::REDPayloadISAC(const int32_t /* isac_rate */,
|
||||
const int16_t /* isac_bw_estimate */,
|
||||
uint8_t* /* payload */,
|
||||
int16_t* /* payload_len_bytes */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
AudioDecoder* ACMISAC::Decoder(int /* codec_id */) { return NULL; }
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
|
||||
#ifdef WEBRTC_CODEC_ISACFX
|
||||
|
||||
// How the scaling is computed. iSAC computes a gain based on the
|
||||
// bottleneck. It follows the following expression for that
|
||||
//
|
||||
// G(BN_kbps) = pow(10, (a + b * BN_kbps + c * BN_kbps * BN_kbps) / 20.0)
|
||||
// / 3.4641;
|
||||
//
|
||||
// Where for 30 ms framelength we have,
|
||||
//
|
||||
// a = -23; b = 0.48; c = 0;
|
||||
//
|
||||
// As the default encoder is operating at 32kbps we have the scale as
|
||||
//
|
||||
// S(BN_kbps) = G(BN_kbps) / G(32);
|
||||
|
||||
#define ISAC_NUM_SUPPORTED_RATES 9
|
||||
|
||||
static const uint16_t kIsacSuportedRates[ISAC_NUM_SUPPORTED_RATES] = {
|
||||
32000, 30000, 26000, 23000, 21000, 19000, 17000, 15000, 12000};
|
||||
|
||||
static const float kIsacScale[ISAC_NUM_SUPPORTED_RATES] = {
|
||||
1.0f, 0.8954f, 0.7178f, 0.6081f, 0.5445f,
|
||||
0.4875f, 0.4365f, 0.3908f, 0.3311f
|
||||
};
|
||||
|
||||
enum IsacSamplingRate {
|
||||
kIsacWideband = 16,
|
||||
kIsacSuperWideband = 32
|
||||
};
|
||||
|
||||
static float ACMISACFixTranscodingScale(uint16_t rate) {
|
||||
// find the scale for transcoding, the scale is rounded
|
||||
// downward
|
||||
float scale = -1;
|
||||
for (int16_t n = 0; n < ISAC_NUM_SUPPORTED_RATES; n++) {
|
||||
if (rate >= kIsacSuportedRates[n]) {
|
||||
scale = kIsacScale[n];
|
||||
break;
|
||||
}
|
||||
}
|
||||
return scale;
|
||||
}
|
||||
|
||||
static void ACMISACFixGetSendBitrate(ACM_ISAC_STRUCT* inst,
|
||||
int32_t* bottleneck) {
|
||||
*bottleneck = WebRtcIsacfix_GetUplinkBw(inst);
|
||||
}
|
||||
|
||||
static int16_t ACMISACFixGetNewBitstream(ACM_ISAC_STRUCT* inst,
|
||||
int16_t bwe_index,
|
||||
int16_t /* jitter_index */,
|
||||
int32_t rate,
|
||||
int16_t* bitstream,
|
||||
bool is_red) {
|
||||
if (is_red) {
|
||||
// RED not supported with iSACFIX
|
||||
return -1;
|
||||
}
|
||||
float scale = ACMISACFixTranscodingScale((uint16_t)rate);
|
||||
return WebRtcIsacfix_GetNewBitStream(inst, bwe_index, scale, bitstream);
|
||||
}
|
||||
|
||||
static int16_t ACMISACFixGetSendBWE(ACM_ISAC_STRUCT* inst,
|
||||
int16_t* rate_index,
|
||||
int16_t* /* dummy */) {
|
||||
int16_t local_rate_index;
|
||||
int16_t status = WebRtcIsacfix_GetDownLinkBwIndex(inst, &local_rate_index);
|
||||
if (status < 0) {
|
||||
return -1;
|
||||
} else {
|
||||
*rate_index = local_rate_index;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int16_t ACMISACFixControlBWE(ACM_ISAC_STRUCT* inst,
|
||||
int32_t rate_bps,
|
||||
int16_t frame_size_ms,
|
||||
int16_t enforce_frame_size) {
|
||||
return WebRtcIsacfix_ControlBwe(
|
||||
inst, (int16_t)rate_bps, frame_size_ms, enforce_frame_size);
|
||||
}
|
||||
|
||||
static int16_t ACMISACFixControl(ACM_ISAC_STRUCT* inst,
|
||||
int32_t rate_bps,
|
||||
int16_t frame_size_ms) {
|
||||
return WebRtcIsacfix_Control(inst, (int16_t)rate_bps, frame_size_ms);
|
||||
}
|
||||
|
||||
// The following two function should have the same signature as their counter
|
||||
// part in iSAC floating-point, i.e. WebRtcIsac_EncSampRate &
|
||||
// WebRtcIsac_DecSampRate.
|
||||
static uint16_t ACMISACFixGetEncSampRate(ACM_ISAC_STRUCT* /* inst */) {
|
||||
return 16000;
|
||||
}
|
||||
|
||||
static uint16_t ACMISACFixGetDecSampRate(ACM_ISAC_STRUCT* /* inst */) {
|
||||
return 16000;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
ACMISAC::ACMISAC(int16_t codec_id)
|
||||
: AudioDecoder(ACMCodecDB::neteq_decoders_[codec_id]),
|
||||
codec_inst_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
is_enc_initialized_(false),
|
||||
isac_coding_mode_(CHANNEL_INDEPENDENT),
|
||||
enforce_frame_size_(false),
|
||||
isac_current_bn_(32000),
|
||||
samples_in_10ms_audio_(160), // Initiates to 16 kHz mode.
|
||||
decoder_initialized_(false) {
|
||||
codec_id_ = codec_id;
|
||||
|
||||
// Create codec instance.
|
||||
codec_inst_ptr_ = new ACMISACInst;
|
||||
if (codec_inst_ptr_ == NULL) {
|
||||
return;
|
||||
}
|
||||
codec_inst_ptr_->inst = NULL;
|
||||
state_ = codec_inst_ptr_;
|
||||
}
|
||||
|
||||
ACMISAC::~ACMISAC() {
|
||||
if (codec_inst_ptr_ != NULL) {
|
||||
if (codec_inst_ptr_->inst != NULL) {
|
||||
ACM_ISAC_FREE(codec_inst_ptr_->inst);
|
||||
codec_inst_ptr_->inst = NULL;
|
||||
}
|
||||
delete codec_inst_ptr_;
|
||||
codec_inst_ptr_ = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::InternalInitDecoder(WebRtcACMCodecParams* codec_params) {
|
||||
// set decoder sampling frequency.
|
||||
if (codec_params->codec_inst.plfreq == 32000 ||
|
||||
codec_params->codec_inst.plfreq == 48000) {
|
||||
UpdateDecoderSampFreq(ACMCodecDB::kISACSWB);
|
||||
} else {
|
||||
UpdateDecoderSampFreq(ACMCodecDB::kISAC);
|
||||
}
|
||||
|
||||
// in a one-way communication we may never register send-codec.
|
||||
// However we like that the BWE to work properly so it has to
|
||||
// be initialized. The BWE is initialized when iSAC encoder is initialized.
|
||||
// Therefore, we need this.
|
||||
if (!encoder_initialized_) {
|
||||
// Since we don't require a valid rate or a valid packet size when
|
||||
// initializing the decoder, we set valid values before initializing encoder
|
||||
codec_params->codec_inst.rate = kIsacWbDefaultRate;
|
||||
codec_params->codec_inst.pacsize = kIsacPacSize960;
|
||||
if (InternalInitEncoder(codec_params) < 0) {
|
||||
return -1;
|
||||
}
|
||||
encoder_initialized_ = true;
|
||||
}
|
||||
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
return ACM_ISAC_DECODERINIT(codec_inst_ptr_->inst);
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMISAC::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMISAC::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
// ISAC takes 10ms audio every time we call encoder, therefore,
|
||||
// it should be treated like codecs with 'basic coding block'
|
||||
// non-zero, and the following 'while-loop' should not be necessary.
|
||||
// However, due to a mistake in the codec the frame-size might change
|
||||
// at the first 10ms pushed in to iSAC if the bit-rate is low, this is
|
||||
// sort of a bug in iSAC. to address this we treat iSAC as the
|
||||
// following.
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
if (codec_inst_ptr_ == NULL) {
|
||||
return -1;
|
||||
}
|
||||
*bitstream_len_byte = 0;
|
||||
while ((*bitstream_len_byte == 0) && (in_audio_ix_read_ < frame_len_smpl_)) {
|
||||
if (in_audio_ix_read_ > in_audio_ix_write_) {
|
||||
// something is wrong.
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"The actual frame-size of iSAC appears to be larger that "
|
||||
"expected. All audio pushed in but no bit-stream is "
|
||||
"generated.");
|
||||
return -1;
|
||||
}
|
||||
*bitstream_len_byte = ACM_ISAC_ENCODE(
|
||||
codec_inst_ptr_->inst, &in_audio_[in_audio_ix_read_],
|
||||
reinterpret_cast<int16_t*>(bitstream));
|
||||
// increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer
|
||||
in_audio_ix_read_ += samples_in_10ms_audio_;
|
||||
}
|
||||
if (*bitstream_len_byte == 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"ISAC Has encoded the whole frame but no bit-stream is "
|
||||
"generated.");
|
||||
}
|
||||
|
||||
// a packet is generated iSAC, is set in adaptive mode may change
|
||||
// the frame length and we like to update the bottleneck value as
|
||||
// well, although updating bottleneck is not crucial
|
||||
if ((*bitstream_len_byte > 0) && (isac_coding_mode_ == ADAPTIVE)) {
|
||||
ACM_ISAC_GETSENDBITRATE(codec_inst_ptr_->inst, &isac_current_bn_);
|
||||
}
|
||||
UpdateFrameLen();
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
// if rate is set to -1 then iSAC has to be in adaptive mode
|
||||
if (codec_params->codec_inst.rate == -1) {
|
||||
isac_coding_mode_ = ADAPTIVE;
|
||||
} else if ((codec_params->codec_inst.rate >= ISAC_MIN_RATE) &&
|
||||
(codec_params->codec_inst.rate <= ISAC_MAX_RATE)) {
|
||||
// sanity check that rate is in acceptable range
|
||||
isac_coding_mode_ = CHANNEL_INDEPENDENT;
|
||||
isac_current_bn_ = codec_params->codec_inst.rate;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// we need to set the encoder sampling frequency.
|
||||
if (UpdateEncoderSampFreq((uint16_t)codec_params->codec_inst.plfreq) < 0) {
|
||||
return -1;
|
||||
}
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
if (ACM_ISAC_ENCODERINIT(codec_inst_ptr_->inst, isac_coding_mode_) < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// apply the frame-size and rate if operating in
|
||||
// channel-independent mode
|
||||
if (isac_coding_mode_ == CHANNEL_INDEPENDENT) {
|
||||
if (ACM_ISAC_CONTROL(codec_inst_ptr_->inst,
|
||||
codec_params->codec_inst.rate,
|
||||
codec_params->codec_inst.pacsize /
|
||||
(codec_params->codec_inst.plfreq / 1000)) < 0) {
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
// We need this for adaptive case and has to be called
|
||||
// after initialization
|
||||
ACM_ISAC_GETSENDBITRATE(codec_inst_ptr_->inst, &isac_current_bn_);
|
||||
}
|
||||
frame_len_smpl_ = ACM_ISAC_GETNEWFRAMELEN(codec_inst_ptr_->inst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::InternalCreateEncoder() {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
if (codec_inst_ptr_ == NULL) {
|
||||
return -1;
|
||||
}
|
||||
decoder_initialized_ = false;
|
||||
int16_t status = ACM_ISAC_CREATE(&(codec_inst_ptr_->inst));
|
||||
|
||||
if (status < 0)
|
||||
codec_inst_ptr_->inst = NULL;
|
||||
return status;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::Transcode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte,
|
||||
int16_t q_bwe,
|
||||
int32_t rate,
|
||||
bool is_red) {
|
||||
int16_t jitter_info = 0;
|
||||
// transcode from a higher rate to lower rate sanity check
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
if (codec_inst_ptr_ == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
*bitstream_len_byte = ACM_ISAC_GETNEWBITSTREAM(
|
||||
codec_inst_ptr_->inst, q_bwe, jitter_info, rate,
|
||||
reinterpret_cast<int16_t*>(bitstream), (is_red) ? 1 : 0);
|
||||
|
||||
if (*bitstream_len_byte < 0) {
|
||||
// error happened
|
||||
*bitstream_len_byte = 0;
|
||||
return -1;
|
||||
} else {
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
}
|
||||
|
||||
void ACMISAC::UpdateFrameLen() {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
frame_len_smpl_ = ACM_ISAC_GETNEWFRAMELEN(codec_inst_ptr_->inst);
|
||||
encoder_params_.codec_inst.pacsize = frame_len_smpl_;
|
||||
}
|
||||
|
||||
void ACMISAC::DestructEncoderSafe() {
|
||||
// codec with shared instance cannot delete.
|
||||
encoder_initialized_ = false;
|
||||
return;
|
||||
}
|
||||
|
||||
void ACMISAC::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
ACM_ISAC_FREE(static_cast<ACM_ISAC_STRUCT *>(ptr_inst));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::SetBitRateSafe(int32_t bit_rate) {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
if (codec_inst_ptr_ == NULL) {
|
||||
return -1;
|
||||
}
|
||||
uint16_t encoder_samp_freq;
|
||||
EncoderSampFreq(&encoder_samp_freq);
|
||||
bool reinit = false;
|
||||
// change the BN of iSAC
|
||||
if (bit_rate == -1) {
|
||||
// ADAPTIVE MODE
|
||||
// Check if it was already in adaptive mode
|
||||
if (isac_coding_mode_ != ADAPTIVE) {
|
||||
// was not in adaptive, then set the mode to adaptive
|
||||
// and flag for re-initialization
|
||||
isac_coding_mode_ = ADAPTIVE;
|
||||
reinit = true;
|
||||
}
|
||||
} else if ((bit_rate >= ISAC_MIN_RATE) && (bit_rate <= ISAC_MAX_RATE)) {
|
||||
// Sanity check if the rate valid
|
||||
// check if it was in channel-independent mode before
|
||||
if (isac_coding_mode_ != CHANNEL_INDEPENDENT) {
|
||||
// was not in channel independent, set the mode to
|
||||
// channel-independent and flag for re-initialization
|
||||
isac_coding_mode_ = CHANNEL_INDEPENDENT;
|
||||
reinit = true;
|
||||
}
|
||||
// store the bottleneck
|
||||
isac_current_bn_ = (uint16_t)bit_rate;
|
||||
} else {
|
||||
// invlaid rate
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t status = 0;
|
||||
if (reinit) {
|
||||
// initialize and check if it is successful
|
||||
if (ACM_ISAC_ENCODERINIT(codec_inst_ptr_->inst, isac_coding_mode_) < 0) {
|
||||
// failed initialization
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if (isac_coding_mode_ == CHANNEL_INDEPENDENT) {
|
||||
status = ACM_ISAC_CONTROL(
|
||||
codec_inst_ptr_->inst, isac_current_bn_,
|
||||
(encoder_samp_freq == 32000 || encoder_samp_freq == 48000) ? 30 :
|
||||
(frame_len_smpl_ / 16));
|
||||
if (status < 0) {
|
||||
status = -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Update encoder parameters
|
||||
encoder_params_.codec_inst.rate = bit_rate;
|
||||
|
||||
UpdateFrameLen();
|
||||
return status;
|
||||
}
|
||||
|
||||
int32_t ACMISAC::GetEstimatedBandwidthSafe() {
|
||||
int16_t bandwidth_index = 0;
|
||||
int16_t delay_index = 0;
|
||||
int samp_rate;
|
||||
|
||||
// Get bandwidth information
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
ACM_ISAC_GETSENDBWE(codec_inst_ptr_->inst, &bandwidth_index, &delay_index);
|
||||
|
||||
// Validy check of index
|
||||
if ((bandwidth_index < 0) || (bandwidth_index >= NR_ISAC_BANDWIDTHS)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Check sample frequency
|
||||
samp_rate = ACM_ISAC_GETDECSAMPRATE(codec_inst_ptr_->inst);
|
||||
if (samp_rate == 16000) {
|
||||
return kIsacRatesWb[bandwidth_index];
|
||||
} else {
|
||||
return kIsacRatesSwb[bandwidth_index];
|
||||
}
|
||||
}
|
||||
|
||||
int32_t ACMISAC::SetEstimatedBandwidthSafe(int32_t estimated_bandwidth) {
|
||||
int samp_rate;
|
||||
int16_t bandwidth_index;
|
||||
|
||||
// Check sample frequency and choose appropriate table
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
samp_rate = ACM_ISAC_GETENCSAMPRATE(codec_inst_ptr_->inst);
|
||||
|
||||
if (samp_rate == 16000) {
|
||||
// Search through the WB rate table to find the index
|
||||
bandwidth_index = NR_ISAC_BANDWIDTHS / 2 - 1;
|
||||
for (int i = 0; i < (NR_ISAC_BANDWIDTHS / 2); i++) {
|
||||
if (estimated_bandwidth == kIsacRatesWb[i]) {
|
||||
bandwidth_index = i;
|
||||
break;
|
||||
} else if (estimated_bandwidth
|
||||
== kIsacRatesWb[i + NR_ISAC_BANDWIDTHS / 2]) {
|
||||
bandwidth_index = i + NR_ISAC_BANDWIDTHS / 2;
|
||||
break;
|
||||
} else if (estimated_bandwidth < kIsacRatesWb[i]) {
|
||||
bandwidth_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Search through the SWB rate table to find the index
|
||||
bandwidth_index = NR_ISAC_BANDWIDTHS - 1;
|
||||
for (int i = 0; i < NR_ISAC_BANDWIDTHS; i++) {
|
||||
if (estimated_bandwidth <= kIsacRatesSwb[i]) {
|
||||
bandwidth_index = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set iSAC Bandwidth Estimate
|
||||
ACM_ISAC_SETBWE(codec_inst_ptr_->inst, bandwidth_index);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t ACMISAC::GetRedPayloadSafe(
|
||||
#if (!defined(WEBRTC_CODEC_ISAC))
|
||||
uint8_t* /* red_payload */,
|
||||
int16_t* /* payload_bytes */) {
|
||||
return -1;
|
||||
#else
|
||||
uint8_t* red_payload, int16_t* payload_bytes) {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
int16_t bytes =
|
||||
WebRtcIsac_GetRedPayload(
|
||||
codec_inst_ptr_->inst, reinterpret_cast<int16_t*>(red_payload));
|
||||
if (bytes < 0) {
|
||||
return -1;
|
||||
}
|
||||
*payload_bytes = bytes;
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int16_t ACMISAC::UpdateDecoderSampFreq(
|
||||
#ifdef WEBRTC_CODEC_ISAC
|
||||
int16_t codec_id) {
|
||||
// The decoder supports only wideband and super-wideband.
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
if (ACMCodecDB::kISAC == codec_id) {
|
||||
return WebRtcIsac_SetDecSampRate(codec_inst_ptr_->inst, 16000);
|
||||
} else if (ACMCodecDB::kISACSWB == codec_id ||
|
||||
ACMCodecDB::kISACFB == codec_id) {
|
||||
return WebRtcIsac_SetDecSampRate(codec_inst_ptr_->inst, 32000);
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
#else
|
||||
int16_t /* codec_id */) {
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int16_t ACMISAC::UpdateEncoderSampFreq(
|
||||
#ifdef WEBRTC_CODEC_ISAC
|
||||
uint16_t encoder_samp_freq_hz) {
|
||||
uint16_t current_samp_rate_hz;
|
||||
EncoderSampFreq(¤t_samp_rate_hz);
|
||||
|
||||
if (current_samp_rate_hz != encoder_samp_freq_hz) {
|
||||
if ((encoder_samp_freq_hz != 16000) && (encoder_samp_freq_hz != 32000) &&
|
||||
(encoder_samp_freq_hz != 48000)) {
|
||||
return -1;
|
||||
} else {
|
||||
in_audio_ix_read_ = 0;
|
||||
in_audio_ix_write_ = 0;
|
||||
in_timestamp_ix_write_ = 0;
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
if (WebRtcIsac_SetEncSampRate(codec_inst_ptr_->inst,
|
||||
encoder_samp_freq_hz) < 0) {
|
||||
return -1;
|
||||
}
|
||||
samples_in_10ms_audio_ = encoder_samp_freq_hz / 100;
|
||||
frame_len_smpl_ = ACM_ISAC_GETNEWFRAMELEN(codec_inst_ptr_->inst);
|
||||
encoder_params_.codec_inst.pacsize = frame_len_smpl_;
|
||||
encoder_params_.codec_inst.plfreq = encoder_samp_freq_hz;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
#else
|
||||
uint16_t /* codec_id */) {
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int16_t ACMISAC::EncoderSampFreq(uint16_t* samp_freq_hz) {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
*samp_freq_hz = ACM_ISAC_GETENCSAMPRATE(codec_inst_ptr_->inst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t ACMISAC::ConfigISACBandwidthEstimator(
|
||||
const uint8_t init_frame_size_msec,
|
||||
const uint16_t init_rate_bit_per_sec,
|
||||
const bool enforce_frame_size) {
|
||||
int16_t status;
|
||||
{
|
||||
uint16_t samp_freq_hz;
|
||||
EncoderSampFreq(&samp_freq_hz);
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
// TODO(turajs): at 32kHz we hardcode calling with 30ms and enforce
|
||||
// the frame-size otherwise we might get error. Revise if
|
||||
// control-bwe is changed.
|
||||
if (samp_freq_hz == 32000 || samp_freq_hz == 48000) {
|
||||
status = ACM_ISAC_CONTROL_BWE(codec_inst_ptr_->inst,
|
||||
init_rate_bit_per_sec, 30, 1);
|
||||
} else {
|
||||
status = ACM_ISAC_CONTROL_BWE(codec_inst_ptr_->inst,
|
||||
init_rate_bit_per_sec,
|
||||
init_frame_size_msec,
|
||||
enforce_frame_size ? 1 : 0);
|
||||
}
|
||||
}
|
||||
if (status < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Couldn't config iSAC BWE.");
|
||||
return -1;
|
||||
}
|
||||
{
|
||||
WriteLockScoped wl(codec_wrapper_lock_);
|
||||
UpdateFrameLen();
|
||||
}
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
ACM_ISAC_GETSENDBITRATE(codec_inst_ptr_->inst, &isac_current_bn_);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t ACMISAC::SetISACMaxPayloadSize(const uint16_t max_payload_len_bytes) {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
return ACM_ISAC_SETMAXPAYLOADSIZE(codec_inst_ptr_->inst,
|
||||
max_payload_len_bytes);
|
||||
}
|
||||
|
||||
int32_t ACMISAC::SetISACMaxRate(const uint32_t max_rate_bit_per_sec) {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
return ACM_ISAC_SETMAXRATE(codec_inst_ptr_->inst, max_rate_bit_per_sec);
|
||||
}
|
||||
|
||||
void ACMISAC::CurrentRate(int32_t* rate_bit_per_sec) {
|
||||
if (isac_coding_mode_ == ADAPTIVE) {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
ACM_ISAC_GETSENDBITRATE(codec_inst_ptr_->inst, rate_bit_per_sec);
|
||||
}
|
||||
}
|
||||
|
||||
int16_t ACMISAC::REDPayloadISAC(const int32_t isac_rate,
|
||||
const int16_t isac_bw_estimate,
|
||||
uint8_t* payload,
|
||||
int16_t* payload_len_bytes) {
|
||||
int16_t status;
|
||||
ReadLockScoped rl(codec_wrapper_lock_);
|
||||
status =
|
||||
Transcode(payload, payload_len_bytes, isac_bw_estimate, isac_rate, true);
|
||||
return status;
|
||||
}
|
||||
|
||||
int ACMISAC::Decode(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
int16_t* decoded,
|
||||
SpeechType* speech_type) {
|
||||
int16_t temp_type;
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
int ret =
|
||||
ACM_ISAC_DECODE_B(static_cast<ACM_ISAC_STRUCT*>(codec_inst_ptr_->inst),
|
||||
reinterpret_cast<const uint16_t*>(encoded),
|
||||
static_cast<int16_t>(encoded_len),
|
||||
decoded,
|
||||
&temp_type);
|
||||
*speech_type = ConvertSpeechType(temp_type);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ACMISAC::DecodePlc(int num_frames, int16_t* decoded) {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
return ACM_ISAC_DECODEPLC(
|
||||
static_cast<ACM_ISAC_STRUCT*>(codec_inst_ptr_->inst),
|
||||
decoded,
|
||||
static_cast<int16_t>(num_frames));
|
||||
}
|
||||
|
||||
int ACMISAC::IncomingPacket(const uint8_t* payload,
|
||||
size_t payload_len,
|
||||
uint16_t rtp_sequence_number,
|
||||
uint32_t rtp_timestamp,
|
||||
uint32_t arrival_timestamp) {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
return ACM_ISAC_DECODE_BWE(
|
||||
static_cast<ACM_ISAC_STRUCT*>(codec_inst_ptr_->inst),
|
||||
reinterpret_cast<const uint16_t*>(payload),
|
||||
static_cast<uint32_t>(payload_len),
|
||||
rtp_sequence_number,
|
||||
rtp_timestamp,
|
||||
arrival_timestamp);
|
||||
}
|
||||
|
||||
int ACMISAC::DecodeRedundant(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
int16_t* decoded,
|
||||
SpeechType* speech_type) {
|
||||
int16_t temp_type = 1; // Default is speech.
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
int16_t ret =
|
||||
ACM_ISAC_DECODERCU(static_cast<ACM_ISAC_STRUCT*>(codec_inst_ptr_->inst),
|
||||
reinterpret_cast<const uint16_t*>(encoded),
|
||||
static_cast<int16_t>(encoded_len),
|
||||
decoded,
|
||||
&temp_type);
|
||||
*speech_type = ConvertSpeechType(temp_type);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ACMISAC::ErrorCode() {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
return ACM_ISAC_GETERRORCODE(
|
||||
static_cast<ACM_ISAC_STRUCT*>(codec_inst_ptr_->inst));
|
||||
}
|
||||
|
||||
AudioDecoder* ACMISAC::Decoder(int codec_id) {
|
||||
// Create iSAC instance if it does not exist.
|
||||
WriteLockScoped wl(codec_wrapper_lock_);
|
||||
if (!encoder_exist_) {
|
||||
CriticalSectionScoped lock(codec_inst_crit_sect_.get());
|
||||
assert(codec_inst_ptr_->inst == NULL);
|
||||
encoder_initialized_ = false;
|
||||
decoder_initialized_ = false;
|
||||
if (ACM_ISAC_CREATE(&(codec_inst_ptr_->inst)) < 0) {
|
||||
codec_inst_ptr_->inst = NULL;
|
||||
return NULL;
|
||||
}
|
||||
encoder_exist_ = true;
|
||||
}
|
||||
|
||||
WebRtcACMCodecParams codec_params;
|
||||
if (!encoder_initialized_ || !decoder_initialized_) {
|
||||
ACMCodecDB::Codec(codec_id, &codec_params.codec_inst);
|
||||
// The following three values are not used but we set them to valid values.
|
||||
codec_params.enable_dtx = false;
|
||||
codec_params.enable_vad = false;
|
||||
codec_params.vad_mode = VADNormal;
|
||||
}
|
||||
|
||||
if (!encoder_initialized_) {
|
||||
// Initialize encoder to make sure bandwidth estimator works.
|
||||
if (InternalInitEncoder(&codec_params) < 0)
|
||||
return NULL;
|
||||
encoder_initialized_ = true;
|
||||
}
|
||||
|
||||
if (!decoder_initialized_) {
|
||||
if (InternalInitDecoder(&codec_params) < 0)
|
||||
return NULL;
|
||||
decoder_initialized_ = true;
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
141
jni/webrtc/modules/audio_coding/main/acm2/acm_isac.h
Normal file
141
jni/webrtc/modules/audio_coding/main/acm2/acm_isac.h
Normal file
@@ -0,0 +1,141 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_ISAC_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_ISAC_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/interface/audio_decoder.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/system_wrappers/interface/thread_annotations.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class CriticalSectionWrapper;
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
struct ACMISACInst;
|
||||
|
||||
enum IsacCodingMode {
|
||||
ADAPTIVE,
|
||||
CHANNEL_INDEPENDENT
|
||||
};
|
||||
|
||||
class ACMISAC : public ACMGenericCodec, AudioDecoder {
|
||||
public:
|
||||
explicit ACMISAC(int16_t codec_id);
|
||||
~ACMISAC();
|
||||
|
||||
int16_t InternalInitDecoder(WebRtcACMCodecParams* codec_params)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
// Methods below are inherited from ACMGenericCodec.
|
||||
ACMGenericCodec* CreateInstance(void) OVERRIDE;
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t UpdateDecoderSampFreq(int16_t codec_id) OVERRIDE;
|
||||
|
||||
int16_t UpdateEncoderSampFreq(uint16_t samp_freq_hz) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t EncoderSampFreq(uint16_t* samp_freq_hz) OVERRIDE;
|
||||
|
||||
int32_t ConfigISACBandwidthEstimator(const uint8_t init_frame_size_msec,
|
||||
const uint16_t init_rate_bit_per_sec,
|
||||
const bool enforce_frame_size) OVERRIDE;
|
||||
|
||||
int32_t SetISACMaxPayloadSize(const uint16_t max_payload_len_bytes) OVERRIDE;
|
||||
|
||||
int32_t SetISACMaxRate(const uint32_t max_rate_bit_per_sec) OVERRIDE;
|
||||
|
||||
int16_t REDPayloadISAC(const int32_t isac_rate,
|
||||
const int16_t isac_bw_estimate,
|
||||
uint8_t* payload,
|
||||
int16_t* payload_len_bytes) OVERRIDE;
|
||||
|
||||
// Methods below are inherited from AudioDecoder.
|
||||
virtual int Decode(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
int16_t* decoded,
|
||||
SpeechType* speech_type) OVERRIDE;
|
||||
|
||||
virtual bool HasDecodePlc() const OVERRIDE { return true; }
|
||||
|
||||
virtual int DecodePlc(int num_frames, int16_t* decoded) OVERRIDE;
|
||||
|
||||
virtual int Init() OVERRIDE { return 0; }
|
||||
|
||||
virtual int IncomingPacket(const uint8_t* payload,
|
||||
size_t payload_len,
|
||||
uint16_t rtp_sequence_number,
|
||||
uint32_t rtp_timestamp,
|
||||
uint32_t arrival_timestamp) OVERRIDE;
|
||||
|
||||
virtual int DecodeRedundant(const uint8_t* encoded,
|
||||
size_t encoded_len,
|
||||
int16_t* decoded,
|
||||
SpeechType* speech_type) OVERRIDE;
|
||||
|
||||
virtual int ErrorCode() OVERRIDE;
|
||||
|
||||
protected:
|
||||
int16_t Transcode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte,
|
||||
int16_t q_bwe,
|
||||
int32_t rate,
|
||||
bool is_red);
|
||||
|
||||
void UpdateFrameLen() EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
// Methods below are inherited from ACMGenericCodec.
|
||||
void DestructEncoderSafe() OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t SetBitRateSafe(const int32_t bit_rate) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int32_t GetEstimatedBandwidthSafe() OVERRIDE;
|
||||
|
||||
int32_t SetEstimatedBandwidthSafe(int32_t estimated_bandwidth) OVERRIDE;
|
||||
|
||||
int32_t GetRedPayloadSafe(uint8_t* red_payload,
|
||||
int16_t* payload_bytes) OVERRIDE;
|
||||
|
||||
int16_t InternalCreateEncoder() OVERRIDE;
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst) OVERRIDE;
|
||||
|
||||
void CurrentRate(int32_t* rate_bit_per_sec) OVERRIDE;
|
||||
|
||||
virtual AudioDecoder* Decoder(int codec_id) OVERRIDE;
|
||||
|
||||
// |codec_inst_crit_sect_| protects |codec_inst_ptr_|.
|
||||
const scoped_ptr<CriticalSectionWrapper> codec_inst_crit_sect_;
|
||||
ACMISACInst* codec_inst_ptr_ GUARDED_BY(codec_inst_crit_sect_);
|
||||
bool is_enc_initialized_;
|
||||
IsacCodingMode isac_coding_mode_;
|
||||
bool enforce_frame_size_;
|
||||
int32_t isac_current_bn_;
|
||||
uint16_t samples_in_10ms_audio_;
|
||||
bool decoder_initialized_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_ISAC_H_
|
||||
80
jni/webrtc/modules/audio_coding/main/acm2/acm_isac_macros.h
Normal file
80
jni/webrtc/modules/audio_coding/main/acm2/acm_isac_macros.h
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_ISAC_MACROS_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_ISAC_MACROS_H_
|
||||
|
||||
#include "webrtc/engine_configurations.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifdef WEBRTC_CODEC_ISAC
|
||||
#define ACM_ISAC_CREATE WebRtcIsac_Create
|
||||
#define ACM_ISAC_FREE WebRtcIsac_Free
|
||||
#define ACM_ISAC_ENCODERINIT WebRtcIsac_EncoderInit
|
||||
#define ACM_ISAC_ENCODE WebRtcIsac_Encode
|
||||
#define ACM_ISAC_DECODERINIT WebRtcIsac_DecoderInit
|
||||
#define ACM_ISAC_DECODE_BWE WebRtcIsac_UpdateBwEstimate
|
||||
#define ACM_ISAC_DECODE_B WebRtcIsac_Decode
|
||||
#define ACM_ISAC_DECODEPLC WebRtcIsac_DecodePlc
|
||||
#define ACM_ISAC_CONTROL WebRtcIsac_Control
|
||||
#define ACM_ISAC_CONTROL_BWE WebRtcIsac_ControlBwe
|
||||
#define ACM_ISAC_GETFRAMELEN WebRtcIsac_ReadFrameLen
|
||||
#define ACM_ISAC_GETERRORCODE WebRtcIsac_GetErrorCode
|
||||
#define ACM_ISAC_GETSENDBITRATE WebRtcIsac_GetUplinkBw
|
||||
#define ACM_ISAC_SETMAXPAYLOADSIZE WebRtcIsac_SetMaxPayloadSize
|
||||
#define ACM_ISAC_SETMAXRATE WebRtcIsac_SetMaxRate
|
||||
#define ACM_ISAC_GETNEWBITSTREAM WebRtcIsac_GetNewBitStream
|
||||
#define ACM_ISAC_GETSENDBWE WebRtcIsac_GetDownLinkBwIndex
|
||||
#define ACM_ISAC_SETBWE WebRtcIsac_UpdateUplinkBw
|
||||
#define ACM_ISAC_GETBWE WebRtcIsac_ReadBwIndex
|
||||
#define ACM_ISAC_GETNEWFRAMELEN WebRtcIsac_GetNewFrameLen
|
||||
#define ACM_ISAC_STRUCT ISACStruct
|
||||
#define ACM_ISAC_GETENCSAMPRATE WebRtcIsac_EncSampRate
|
||||
#define ACM_ISAC_GETDECSAMPRATE WebRtcIsac_DecSampRate
|
||||
#define ACM_ISAC_DECODERCU WebRtcIsac_DecodeRcu
|
||||
#endif
|
||||
|
||||
#ifdef WEBRTC_CODEC_ISACFX
|
||||
#define ACM_ISAC_CREATE WebRtcIsacfix_Create
|
||||
#define ACM_ISAC_FREE WebRtcIsacfix_Free
|
||||
#define ACM_ISAC_ENCODERINIT WebRtcIsacfix_EncoderInit
|
||||
#define ACM_ISAC_ENCODE WebRtcIsacfix_Encode
|
||||
#define ACM_ISAC_DECODERINIT WebRtcIsacfix_DecoderInit
|
||||
#define ACM_ISAC_DECODE_BWE WebRtcIsacfix_UpdateBwEstimate
|
||||
#define ACM_ISAC_DECODE_B WebRtcIsacfix_Decode
|
||||
#define ACM_ISAC_DECODEPLC WebRtcIsacfix_DecodePlc
|
||||
#define ACM_ISAC_CONTROL ACMISACFixControl // Local Impl
|
||||
#define ACM_ISAC_CONTROL_BWE ACMISACFixControlBWE // Local Impl
|
||||
#define ACM_ISAC_GETFRAMELEN WebRtcIsacfix_ReadFrameLen
|
||||
#define ACM_ISAC_GETERRORCODE WebRtcIsacfix_GetErrorCode
|
||||
#define ACM_ISAC_GETSENDBITRATE ACMISACFixGetSendBitrate // Local Impl
|
||||
#define ACM_ISAC_SETMAXPAYLOADSIZE WebRtcIsacfix_SetMaxPayloadSize
|
||||
#define ACM_ISAC_SETMAXRATE WebRtcIsacfix_SetMaxRate
|
||||
#define ACM_ISAC_GETNEWBITSTREAM ACMISACFixGetNewBitstream // Local Impl
|
||||
#define ACM_ISAC_GETSENDBWE ACMISACFixGetSendBWE // Local Impl
|
||||
#define ACM_ISAC_SETBWE WebRtcIsacfix_UpdateUplinkBw
|
||||
#define ACM_ISAC_GETBWE WebRtcIsacfix_ReadBwIndex
|
||||
#define ACM_ISAC_GETNEWFRAMELEN WebRtcIsacfix_GetNewFrameLen
|
||||
#define ACM_ISAC_STRUCT ISACFIX_MainStruct
|
||||
#define ACM_ISAC_GETENCSAMPRATE ACMISACFixGetEncSampRate // Local Impl
|
||||
#define ACM_ISAC_GETDECSAMPRATE ACMISACFixGetDecSampRate // Local Impl
|
||||
#define ACM_ISAC_DECODERCU WebRtcIsacfix_Decode // No special RCU
|
||||
// decoder
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_ISAC_MACROS_H_
|
||||
|
||||
@@ -0,0 +1,15 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
// This file contains unit tests for ACM's NetEQ wrapper (class ACMNetEQ).
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {} // namespace
|
||||
275
jni/webrtc/modules/audio_coding/main/acm2/acm_opus.cc
Normal file
275
jni/webrtc/modules/audio_coding/main/acm2/acm_opus.cc
Normal file
@@ -0,0 +1,275 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_opus.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_OPUS
|
||||
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_OPUS
|
||||
|
||||
ACMOpus::ACMOpus(int16_t /* codec_id */)
|
||||
: encoder_inst_ptr_(NULL),
|
||||
sample_freq_(0),
|
||||
bitrate_(0),
|
||||
channels_(1),
|
||||
fec_enabled_(false),
|
||||
packet_loss_rate_(0) {
|
||||
return;
|
||||
}
|
||||
|
||||
ACMOpus::~ACMOpus() {
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMOpus::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMOpus::InternalInitEncoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMOpus::CreateInstance(void) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int16_t ACMOpus::InternalCreateEncoder() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ACMOpus::DestructEncoderSafe() {
|
||||
return;
|
||||
}
|
||||
|
||||
void ACMOpus::InternalDestructEncoderInst(void* /* ptr_inst */) {
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMOpus::SetBitRateSafe(const int32_t /*rate*/) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
|
||||
ACMOpus::ACMOpus(int16_t codec_id)
|
||||
: encoder_inst_ptr_(NULL),
|
||||
sample_freq_(32000), // Default sampling frequency.
|
||||
bitrate_(20000), // Default bit-rate.
|
||||
channels_(1), // Default mono.
|
||||
fec_enabled_(false), // Default FEC is off.
|
||||
packet_loss_rate_(0) { // Initial packet loss rate.
|
||||
codec_id_ = codec_id;
|
||||
// Opus has internal DTX, but we dont use it for now.
|
||||
has_internal_dtx_ = false;
|
||||
|
||||
has_internal_fec_ = true;
|
||||
|
||||
if (codec_id_ != ACMCodecDB::kOpus) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Wrong codec id for Opus.");
|
||||
sample_freq_ = 0xFFFF;
|
||||
bitrate_ = -1;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ACMOpus::~ACMOpus() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcOpus_EncoderFree(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int16_t ACMOpus::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
// Call Encoder.
|
||||
*bitstream_len_byte = WebRtcOpus_Encode(encoder_inst_ptr_,
|
||||
&in_audio_[in_audio_ix_read_],
|
||||
frame_len_smpl_,
|
||||
MAX_PAYLOAD_SIZE_BYTE, bitstream);
|
||||
// Check for error reported from encoder.
|
||||
if (*bitstream_len_byte < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"InternalEncode: Encode error for Opus");
|
||||
*bitstream_len_byte = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Increment the read index. This tells the caller how far
|
||||
// we have gone forward in reading the audio buffer.
|
||||
in_audio_ix_read_ += frame_len_smpl_ * channels_;
|
||||
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMOpus::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
int16_t ret;
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcOpus_EncoderFree(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
ret = WebRtcOpus_EncoderCreate(&encoder_inst_ptr_,
|
||||
codec_params->codec_inst.channels);
|
||||
// Store number of channels.
|
||||
channels_ = codec_params->codec_inst.channels;
|
||||
|
||||
if (ret < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Encoder creation failed for Opus");
|
||||
return ret;
|
||||
}
|
||||
ret = WebRtcOpus_SetBitRate(encoder_inst_ptr_,
|
||||
codec_params->codec_inst.rate);
|
||||
if (ret < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Setting initial bitrate failed for Opus");
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Store bitrate.
|
||||
bitrate_ = codec_params->codec_inst.rate;
|
||||
|
||||
// TODO(tlegrand): Remove this code when we have proper APIs to set the
|
||||
// complexity at a higher level.
|
||||
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) || defined(WEBRTC_ARCH_ARM)
|
||||
// If we are on Android, iOS and/or ARM, use a lower complexity setting as
|
||||
// default, to save encoder complexity.
|
||||
const int kOpusComplexity5 = 5;
|
||||
WebRtcOpus_SetComplexity(encoder_inst_ptr_, kOpusComplexity5);
|
||||
if (ret < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Setting complexity failed for Opus");
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMOpus::CreateInstance(void) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int16_t ACMOpus::InternalCreateEncoder() {
|
||||
// Real encoder will be created in InternalInitEncoder.
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMOpus::DestructEncoderSafe() {
|
||||
if (encoder_inst_ptr_) {
|
||||
WebRtcOpus_EncoderFree(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void ACMOpus::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
WebRtcOpus_EncoderFree(static_cast<OpusEncInst*>(ptr_inst));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMOpus::SetBitRateSafe(const int32_t rate) {
|
||||
if (rate < 6000 || rate > 510000) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"SetBitRateSafe: Invalid rate Opus");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bitrate_ = rate;
|
||||
|
||||
// Ask the encoder for the new rate.
|
||||
if (WebRtcOpus_SetBitRate(encoder_inst_ptr_, bitrate_) >= 0) {
|
||||
encoder_params_.codec_inst.rate = bitrate_;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int ACMOpus::SetFEC(bool enable_fec) {
|
||||
// Ask the encoder to enable FEC.
|
||||
if (enable_fec) {
|
||||
if (WebRtcOpus_EnableFec(encoder_inst_ptr_) == 0) {
|
||||
fec_enabled_ = true;
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
if (WebRtcOpus_DisableFec(encoder_inst_ptr_) == 0) {
|
||||
fec_enabled_ = false;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int ACMOpus::SetPacketLossRate(int loss_rate) {
|
||||
// Optimize the loss rate to configure Opus. Basically, optimized loss rate is
|
||||
// the input loss rate rounded down to various levels, because a robustly good
|
||||
// audio quality is achieved by lowering the packet loss down.
|
||||
// Additionally, to prevent toggling, margins are used, i.e., when jumping to
|
||||
// a loss rate from below, a higher threshold is used than jumping to the same
|
||||
// level from above.
|
||||
const int kPacketLossRate20 = 20;
|
||||
const int kPacketLossRate10 = 10;
|
||||
const int kPacketLossRate5 = 5;
|
||||
const int kPacketLossRate1 = 1;
|
||||
const int kLossRate20Margin = 2;
|
||||
const int kLossRate10Margin = 1;
|
||||
const int kLossRate5Margin = 1;
|
||||
int opt_loss_rate;
|
||||
if (loss_rate >= kPacketLossRate20 + kLossRate20Margin *
|
||||
(kPacketLossRate20 - packet_loss_rate_ > 0 ? 1 : -1)) {
|
||||
opt_loss_rate = kPacketLossRate20;
|
||||
} else if (loss_rate >= kPacketLossRate10 + kLossRate10Margin *
|
||||
(kPacketLossRate10 - packet_loss_rate_ > 0 ? 1 : -1)) {
|
||||
opt_loss_rate = kPacketLossRate10;
|
||||
} else if (loss_rate >= kPacketLossRate5 + kLossRate5Margin *
|
||||
(kPacketLossRate5 - packet_loss_rate_ > 0 ? 1 : -1)) {
|
||||
opt_loss_rate = kPacketLossRate5;
|
||||
} else if (loss_rate >= kPacketLossRate1) {
|
||||
opt_loss_rate = kPacketLossRate1;
|
||||
} else {
|
||||
opt_loss_rate = 0;
|
||||
}
|
||||
|
||||
if (packet_loss_rate_ == opt_loss_rate) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Ask the encoder to change the target packet loss rate.
|
||||
if (WebRtcOpus_SetPacketLossRate(encoder_inst_ptr_, opt_loss_rate) == 0) {
|
||||
packet_loss_rate_ = opt_loss_rate;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int ACMOpus::SetOpusMaxBandwidth(int max_bandwidth) {
|
||||
// Ask the encoder to change the maximum required bandwidth.
|
||||
return WebRtcOpus_SetMaxBandwidth(encoder_inst_ptr_, max_bandwidth);
|
||||
}
|
||||
|
||||
#endif // WEBRTC_CODEC_OPUS
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
66
jni/webrtc/modules/audio_coding/main/acm2/acm_opus.h
Normal file
66
jni/webrtc/modules/audio_coding/main/acm2/acm_opus.h
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_OPUS_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_OPUS_H_
|
||||
|
||||
#include "webrtc/common_audio/resampler/include/resampler.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
struct WebRtcOpusEncInst;
|
||||
struct WebRtcOpusDecInst;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMOpus : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMOpus(int16_t codec_id);
|
||||
~ACMOpus();
|
||||
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams *codec_params);
|
||||
|
||||
virtual int SetFEC(bool enable_fec) OVERRIDE;
|
||||
|
||||
virtual int SetPacketLossRate(int loss_rate) OVERRIDE;
|
||||
|
||||
virtual int SetOpusMaxBandwidth(int max_bandwidth) OVERRIDE;
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int16_t SetBitRateSafe(const int32_t rate) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
WebRtcOpusEncInst* encoder_inst_ptr_;
|
||||
uint16_t sample_freq_;
|
||||
int32_t bitrate_;
|
||||
int channels_;
|
||||
|
||||
bool fec_enabled_;
|
||||
int packet_loss_rate_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_OPUS_H_
|
||||
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_opus.h"
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
namespace {
|
||||
const CodecInst kOpusCodecInst = {105, "opus", 48000, 960, 1, 32000};
|
||||
// These constants correspond to those used in ACMOpus::SetPacketLossRate().
|
||||
const int kPacketLossRate20 = 20;
|
||||
const int kPacketLossRate10 = 10;
|
||||
const int kPacketLossRate5 = 5;
|
||||
const int kPacketLossRate1 = 1;
|
||||
const int kLossRate20Margin = 2;
|
||||
const int kLossRate10Margin = 1;
|
||||
const int kLossRate5Margin = 1;
|
||||
} // namespace
|
||||
|
||||
class AcmOpusTest : public ACMOpus {
|
||||
public:
|
||||
explicit AcmOpusTest(int16_t codec_id)
|
||||
: ACMOpus(codec_id) {}
|
||||
~AcmOpusTest() {}
|
||||
int packet_loss_rate() { return packet_loss_rate_; }
|
||||
|
||||
void TestSetPacketLossRate(int from, int to, int expected_return);
|
||||
};
|
||||
|
||||
#ifdef WEBRTC_CODEC_OPUS
|
||||
void AcmOpusTest::TestSetPacketLossRate(int from, int to, int expected_return) {
|
||||
for (int loss = from; loss <= to; (to >= from) ? ++loss : --loss) {
|
||||
EXPECT_EQ(0, SetPacketLossRate(loss));
|
||||
EXPECT_EQ(expected_return, packet_loss_rate());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(AcmOpusTest, PacketLossRateOptimized) {
|
||||
AcmOpusTest opus(ACMCodecDB::kOpus);
|
||||
WebRtcACMCodecParams params;
|
||||
memcpy(&(params.codec_inst), &kOpusCodecInst, sizeof(CodecInst));
|
||||
EXPECT_EQ(0, opus.InitEncoder(¶ms, true));
|
||||
EXPECT_EQ(0, opus.SetFEC(true));
|
||||
|
||||
// Note that the order of the following calls is critical.
|
||||
opus.TestSetPacketLossRate(0, 0, 0);
|
||||
opus.TestSetPacketLossRate(kPacketLossRate1,
|
||||
kPacketLossRate5 + kLossRate5Margin - 1,
|
||||
kPacketLossRate1);
|
||||
opus.TestSetPacketLossRate(kPacketLossRate5 + kLossRate5Margin,
|
||||
kPacketLossRate10 + kLossRate10Margin - 1,
|
||||
kPacketLossRate5);
|
||||
opus.TestSetPacketLossRate(kPacketLossRate10 + kLossRate10Margin,
|
||||
kPacketLossRate20 + kLossRate20Margin - 1,
|
||||
kPacketLossRate10);
|
||||
opus.TestSetPacketLossRate(kPacketLossRate20 + kLossRate20Margin,
|
||||
100,
|
||||
kPacketLossRate20);
|
||||
opus.TestSetPacketLossRate(kPacketLossRate20 + kLossRate20Margin,
|
||||
kPacketLossRate20 - kLossRate20Margin,
|
||||
kPacketLossRate20);
|
||||
opus.TestSetPacketLossRate(kPacketLossRate20 - kLossRate20Margin - 1,
|
||||
kPacketLossRate10 - kLossRate10Margin,
|
||||
kPacketLossRate10);
|
||||
opus.TestSetPacketLossRate(kPacketLossRate10 - kLossRate10Margin - 1,
|
||||
kPacketLossRate5 - kLossRate5Margin,
|
||||
kPacketLossRate5);
|
||||
opus.TestSetPacketLossRate(kPacketLossRate5 - kLossRate5Margin - 1,
|
||||
kPacketLossRate1,
|
||||
kPacketLossRate1);
|
||||
opus.TestSetPacketLossRate(0, 0, 0);
|
||||
}
|
||||
#else
|
||||
void AcmOpusTest:TestSetPacketLossRate(int /* from */, int /* to */,
|
||||
int /* expected_return */) {
|
||||
return;
|
||||
}
|
||||
#endif // WEBRTC_CODEC_OPUS
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
96
jni/webrtc/modules/audio_coding/main/acm2/acm_pcm16b.cc
Normal file
96
jni/webrtc/modules/audio_coding/main/acm2/acm_pcm16b.cc
Normal file
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_pcm16b.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_PCM16
|
||||
#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_PCM16
|
||||
|
||||
ACMPCM16B::ACMPCM16B(int16_t /* codec_id */) { return; }
|
||||
|
||||
ACMPCM16B::~ACMPCM16B() { return; }
|
||||
|
||||
int16_t ACMPCM16B::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMPCM16B::InternalInitEncoder(
|
||||
WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMPCM16B::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMPCM16B::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMPCM16B::InternalDestructEncoderInst(void* /* ptr_inst */) { return; }
|
||||
|
||||
void ACMPCM16B::DestructEncoderSafe() { return; }
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
ACMPCM16B::ACMPCM16B(int16_t codec_id) {
|
||||
codec_id_ = codec_id;
|
||||
sampling_freq_hz_ = ACMCodecDB::CodecFreq(codec_id_);
|
||||
}
|
||||
|
||||
ACMPCM16B::~ACMPCM16B() { return; }
|
||||
|
||||
int16_t ACMPCM16B::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
*bitstream_len_byte = WebRtcPcm16b_Encode(&in_audio_[in_audio_ix_read_],
|
||||
frame_len_smpl_ * num_channels_,
|
||||
bitstream);
|
||||
// Increment the read index to tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer.
|
||||
in_audio_ix_read_ += frame_len_smpl_ * num_channels_;
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMPCM16B::InternalInitEncoder(
|
||||
WebRtcACMCodecParams* /* codec_params */) {
|
||||
// This codec does not need initialization, PCM has no instance.
|
||||
return 0;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMPCM16B::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMPCM16B::InternalCreateEncoder() {
|
||||
// PCM has no instance.
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMPCM16B::InternalDestructEncoderInst(void* /* ptr_inst */) {
|
||||
// PCM has no instance.
|
||||
return;
|
||||
}
|
||||
|
||||
void ACMPCM16B::DestructEncoderSafe() {
|
||||
// PCM has no instance.
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
return;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
49
jni/webrtc/modules/audio_coding/main/acm2/acm_pcm16b.h
Normal file
49
jni/webrtc/modules/audio_coding/main/acm2/acm_pcm16b.h
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_PCM16B_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_PCM16B_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMPCM16B : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMPCM16B(int16_t codec_id);
|
||||
~ACMPCM16B();
|
||||
|
||||
// For FEC.
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe() OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int32_t sampling_freq_hz_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_PCM16B_H_
|
||||
62
jni/webrtc/modules/audio_coding/main/acm2/acm_pcma.cc
Normal file
62
jni/webrtc/modules/audio_coding/main/acm2/acm_pcma.cc
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_pcma.h"
|
||||
|
||||
#include "webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
// Codec interface
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
ACMPCMA::ACMPCMA(int16_t codec_id) { codec_id_ = codec_id; }
|
||||
|
||||
ACMPCMA::~ACMPCMA() { return; }
|
||||
|
||||
int16_t ACMPCMA::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
*bitstream_len_byte = WebRtcG711_EncodeA(
|
||||
NULL, &in_audio_[in_audio_ix_read_], frame_len_smpl_ * num_channels_,
|
||||
reinterpret_cast<int16_t*>(bitstream));
|
||||
// Increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer.
|
||||
in_audio_ix_read_ += frame_len_smpl_ * num_channels_;
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMPCMA::InternalInitEncoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
// This codec does not need initialization, PCM has no instance.
|
||||
return 0;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMPCMA::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMPCMA::InternalCreateEncoder() {
|
||||
// PCM has no instance.
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMPCMA::InternalDestructEncoderInst(void* /* ptr_inst */) {
|
||||
// PCM has no instance.
|
||||
return;
|
||||
}
|
||||
|
||||
void ACMPCMA::DestructEncoderSafe() {
|
||||
// PCM has no instance.
|
||||
return;
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
46
jni/webrtc/modules/audio_coding/main/acm2/acm_pcma.h
Normal file
46
jni/webrtc/modules/audio_coding/main/acm2/acm_pcma.h
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_PCMA_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_PCMA_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMPCMA : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMPCMA(int16_t codec_id);
|
||||
~ACMPCMA();
|
||||
|
||||
// For FEC.
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_PCMA_H_
|
||||
63
jni/webrtc/modules/audio_coding/main/acm2/acm_pcmu.cc
Normal file
63
jni/webrtc/modules/audio_coding/main/acm2/acm_pcmu.cc
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_pcmu.h"
|
||||
|
||||
#include "webrtc/modules/audio_coding/codecs/g711/include/g711_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
// Codec interface.
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
ACMPCMU::ACMPCMU(int16_t codec_id) { codec_id_ = codec_id; }
|
||||
|
||||
ACMPCMU::~ACMPCMU() {}
|
||||
|
||||
int16_t ACMPCMU::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
*bitstream_len_byte = WebRtcG711_EncodeU(
|
||||
NULL, &in_audio_[in_audio_ix_read_], frame_len_smpl_ * num_channels_,
|
||||
reinterpret_cast<int16_t*>(bitstream));
|
||||
|
||||
// Increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer.
|
||||
in_audio_ix_read_ += frame_len_smpl_ * num_channels_;
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMPCMU::InternalInitEncoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
// This codec does not need initialization, PCM has no instance.
|
||||
return 0;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMPCMU::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMPCMU::InternalCreateEncoder() {
|
||||
// PCM has no instance.
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMPCMU::InternalDestructEncoderInst(void* /* ptr_inst */) {
|
||||
// PCM has no instance.
|
||||
}
|
||||
|
||||
void ACMPCMU::DestructEncoderSafe() {
|
||||
// PCM has no instance.
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
47
jni/webrtc/modules/audio_coding/main/acm2/acm_pcmu.h
Normal file
47
jni/webrtc/modules/audio_coding/main/acm2/acm_pcmu.h
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_PCMU_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_PCMU_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMPCMU : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMPCMU(int16_t codec_id);
|
||||
~ACMPCMU();
|
||||
|
||||
// For FEC.
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe() OVERRIDE
|
||||
EXCLUSIVE_LOCKS_REQUIRED(codec_wrapper_lock_);
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_PCMU_H_
|
||||
181
jni/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
Normal file
181
jni/webrtc/modules/audio_coding/main/acm2/acm_receive_test.cc
Normal file
@@ -0,0 +1,181 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_receive_test.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/audio_sink.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace test {
|
||||
|
||||
namespace {
|
||||
// Returns true if the codec should be registered, otherwise false. Changes
|
||||
// the number of channels for the Opus codec to always be 1.
|
||||
bool ModifyAndUseThisCodec(CodecInst* codec_param) {
|
||||
if (STR_CASE_CMP(codec_param->plname, "CN") == 0 &&
|
||||
codec_param->plfreq == 48000)
|
||||
return false; // Skip 48 kHz comfort noise.
|
||||
|
||||
if (STR_CASE_CMP(codec_param->plname, "telephone-event") == 0)
|
||||
return false; // Skip DTFM.
|
||||
|
||||
if (STR_CASE_CMP(codec_param->plname, "opus") == 0)
|
||||
codec_param->channels = 1; // Always register Opus as mono.
|
||||
else if (codec_param->channels > 1)
|
||||
return false; // Skip all non-mono codecs.
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Remaps payload types from ACM's default to those used in the resource file
|
||||
// neteq_universal_new.rtp. Returns true if the codec should be registered,
|
||||
// otherwise false. The payload types are set as follows (all are mono codecs):
|
||||
// PCMu = 0;
|
||||
// PCMa = 8;
|
||||
// Comfort noise 8 kHz = 13
|
||||
// Comfort noise 16 kHz = 98
|
||||
// Comfort noise 32 kHz = 99
|
||||
// iLBC = 102
|
||||
// iSAC wideband = 103
|
||||
// iSAC super-wideband = 104
|
||||
// iSAC fullband = 124
|
||||
// AVT/DTMF = 106
|
||||
// RED = 117
|
||||
// PCM16b 8 kHz = 93
|
||||
// PCM16b 16 kHz = 94
|
||||
// PCM16b 32 kHz = 95
|
||||
// G.722 = 94
|
||||
bool RemapPltypeAndUseThisCodec(const char* plname,
|
||||
int plfreq,
|
||||
int channels,
|
||||
int* pltype) {
|
||||
if (channels != 1)
|
||||
return false; // Don't use non-mono codecs.
|
||||
|
||||
// Re-map pltypes to those used in the NetEq test files.
|
||||
if (STR_CASE_CMP(plname, "PCMU") == 0 && plfreq == 8000) {
|
||||
*pltype = 0;
|
||||
} else if (STR_CASE_CMP(plname, "PCMA") == 0 && plfreq == 8000) {
|
||||
*pltype = 8;
|
||||
} else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 8000) {
|
||||
*pltype = 13;
|
||||
} else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 16000) {
|
||||
*pltype = 98;
|
||||
} else if (STR_CASE_CMP(plname, "CN") == 0 && plfreq == 32000) {
|
||||
*pltype = 99;
|
||||
} else if (STR_CASE_CMP(plname, "ILBC") == 0) {
|
||||
*pltype = 102;
|
||||
} else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 16000) {
|
||||
*pltype = 103;
|
||||
} else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 32000) {
|
||||
*pltype = 104;
|
||||
} else if (STR_CASE_CMP(plname, "ISAC") == 0 && plfreq == 48000) {
|
||||
*pltype = 124;
|
||||
} else if (STR_CASE_CMP(plname, "telephone-event") == 0) {
|
||||
*pltype = 106;
|
||||
} else if (STR_CASE_CMP(plname, "red") == 0) {
|
||||
*pltype = 117;
|
||||
} else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 8000) {
|
||||
*pltype = 93;
|
||||
} else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 16000) {
|
||||
*pltype = 94;
|
||||
} else if (STR_CASE_CMP(plname, "L16") == 0 && plfreq == 32000) {
|
||||
*pltype = 95;
|
||||
} else if (STR_CASE_CMP(plname, "G722") == 0) {
|
||||
*pltype = 9;
|
||||
} else {
|
||||
// Don't use any other codecs.
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
AcmReceiveTest::AcmReceiveTest(PacketSource* packet_source,
|
||||
AudioSink* audio_sink,
|
||||
int output_freq_hz)
|
||||
: clock_(0),
|
||||
acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
|
||||
packet_source_(packet_source),
|
||||
audio_sink_(audio_sink),
|
||||
output_freq_hz_(output_freq_hz) {
|
||||
}
|
||||
|
||||
void AcmReceiveTest::RegisterDefaultCodecs() {
|
||||
CodecInst my_codec_param;
|
||||
for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
|
||||
ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
|
||||
if (ModifyAndUseThisCodec(&my_codec_param)) {
|
||||
ASSERT_EQ(0, acm_->RegisterReceiveCodec(my_codec_param))
|
||||
<< "Couldn't register receive codec.\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AcmReceiveTest::RegisterNetEqTestCodecs() {
|
||||
CodecInst my_codec_param;
|
||||
for (int n = 0; n < acm_->NumberOfCodecs(); n++) {
|
||||
ASSERT_EQ(0, acm_->Codec(n, &my_codec_param)) << "Failed to get codec.";
|
||||
if (!ModifyAndUseThisCodec(&my_codec_param)) {
|
||||
// Skip this codec.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (RemapPltypeAndUseThisCodec(my_codec_param.plname,
|
||||
my_codec_param.plfreq,
|
||||
my_codec_param.channels,
|
||||
&my_codec_param.pltype)) {
|
||||
ASSERT_EQ(0, acm_->RegisterReceiveCodec(my_codec_param))
|
||||
<< "Couldn't register receive codec.\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AcmReceiveTest::Run() {
|
||||
for (scoped_ptr<Packet> packet(packet_source_->NextPacket()); packet;
|
||||
packet.reset(packet_source_->NextPacket())) {
|
||||
// Pull audio until time to insert packet.
|
||||
while (clock_.TimeInMilliseconds() < packet->time_ms()) {
|
||||
AudioFrame output_frame;
|
||||
EXPECT_EQ(0, acm_->PlayoutData10Ms(output_freq_hz_, &output_frame));
|
||||
EXPECT_EQ(output_freq_hz_, output_frame.sample_rate_hz_);
|
||||
const int samples_per_block = output_freq_hz_ * 10 / 1000;
|
||||
EXPECT_EQ(samples_per_block, output_frame.samples_per_channel_);
|
||||
EXPECT_EQ(1, output_frame.num_channels_);
|
||||
ASSERT_TRUE(audio_sink_->WriteAudioFrame(output_frame));
|
||||
clock_.AdvanceTimeMilliseconds(10);
|
||||
}
|
||||
|
||||
// Insert packet after converting from RTPHeader to WebRtcRTPHeader.
|
||||
WebRtcRTPHeader header;
|
||||
header.header = packet->header();
|
||||
header.frameType = kAudioFrameSpeech;
|
||||
memset(&header.type.Audio, 0, sizeof(RTPAudioHeader));
|
||||
EXPECT_EQ(0,
|
||||
acm_->IncomingPacket(
|
||||
packet->payload(),
|
||||
static_cast<int32_t>(packet->payload_length_bytes()),
|
||||
header))
|
||||
<< "Failure when inserting packet:" << std::endl
|
||||
<< " PT = " << static_cast<int>(header.header.payloadType) << std::endl
|
||||
<< " TS = " << header.header.timestamp << std::endl
|
||||
<< " SN = " << header.header.sequenceNumber;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
55
jni/webrtc/modules/audio_coding/main/acm2/acm_receive_test.h
Normal file
55
jni/webrtc/modules/audio_coding/main/acm2/acm_receive_test.h
Normal file
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
|
||||
|
||||
#include "webrtc/base/constructormagic.h"
|
||||
#include "webrtc/system_wrappers/interface/clock.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
class AudioCodingModule;
|
||||
struct CodecInst;
|
||||
|
||||
namespace test {
|
||||
class AudioSink;
|
||||
class PacketSource;
|
||||
|
||||
class AcmReceiveTest {
|
||||
public:
|
||||
AcmReceiveTest(PacketSource* packet_source,
|
||||
AudioSink* audio_sink,
|
||||
int output_freq_hz);
|
||||
virtual ~AcmReceiveTest() {}
|
||||
|
||||
// Registers the codecs with default parameters from ACM.
|
||||
void RegisterDefaultCodecs();
|
||||
|
||||
// Registers codecs with payload types matching the pre-encoded NetEq test
|
||||
// files.
|
||||
void RegisterNetEqTestCodecs();
|
||||
|
||||
// Runs the test and returns true if successful.
|
||||
void Run();
|
||||
|
||||
private:
|
||||
SimulatedClock clock_;
|
||||
scoped_ptr<AudioCodingModule> acm_;
|
||||
PacketSource* packet_source_;
|
||||
AudioSink* audio_sink_;
|
||||
const int output_freq_hz_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(AcmReceiveTest);
|
||||
};
|
||||
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVE_TEST_H_
|
||||
849
jni/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
Normal file
849
jni/webrtc/modules/audio_coding/main/acm2/acm_receiver.cc
Normal file
@@ -0,0 +1,849 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_receiver.h"
|
||||
|
||||
#include <stdlib.h> // malloc
|
||||
|
||||
#include <algorithm> // sort
|
||||
#include <vector>
|
||||
|
||||
#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
|
||||
#include "webrtc/common_types.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/nack.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/interface/audio_decoder.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
|
||||
#include "webrtc/system_wrappers/interface/clock.h"
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/logging.h"
|
||||
#include "webrtc/system_wrappers/interface/tick_util.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
namespace {
|
||||
|
||||
const int kNackThresholdPackets = 2;
|
||||
|
||||
// |vad_activity_| field of |audio_frame| is set to |previous_audio_activity_|
|
||||
// before the call to this function.
|
||||
void SetAudioFrameActivityAndType(bool vad_enabled,
|
||||
NetEqOutputType type,
|
||||
AudioFrame* audio_frame) {
|
||||
if (vad_enabled) {
|
||||
switch (type) {
|
||||
case kOutputNormal: {
|
||||
audio_frame->vad_activity_ = AudioFrame::kVadActive;
|
||||
audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
|
||||
break;
|
||||
}
|
||||
case kOutputVADPassive: {
|
||||
audio_frame->vad_activity_ = AudioFrame::kVadPassive;
|
||||
audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
|
||||
break;
|
||||
}
|
||||
case kOutputCNG: {
|
||||
audio_frame->vad_activity_ = AudioFrame::kVadPassive;
|
||||
audio_frame->speech_type_ = AudioFrame::kCNG;
|
||||
break;
|
||||
}
|
||||
case kOutputPLC: {
|
||||
// Don't change |audio_frame->vad_activity_|, it should be the same as
|
||||
// |previous_audio_activity_|.
|
||||
audio_frame->speech_type_ = AudioFrame::kPLC;
|
||||
break;
|
||||
}
|
||||
case kOutputPLCtoCNG: {
|
||||
audio_frame->vad_activity_ = AudioFrame::kVadPassive;
|
||||
audio_frame->speech_type_ = AudioFrame::kPLCCNG;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
} else {
|
||||
// Always return kVadUnknown when receive VAD is inactive
|
||||
audio_frame->vad_activity_ = AudioFrame::kVadUnknown;
|
||||
switch (type) {
|
||||
case kOutputNormal: {
|
||||
audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
|
||||
break;
|
||||
}
|
||||
case kOutputCNG: {
|
||||
audio_frame->speech_type_ = AudioFrame::kCNG;
|
||||
break;
|
||||
}
|
||||
case kOutputPLC: {
|
||||
audio_frame->speech_type_ = AudioFrame::kPLC;
|
||||
break;
|
||||
}
|
||||
case kOutputPLCtoCNG: {
|
||||
audio_frame->speech_type_ = AudioFrame::kPLCCNG;
|
||||
break;
|
||||
}
|
||||
case kOutputVADPassive: {
|
||||
// Normally, we should no get any VAD decision if post-decoding VAD is
|
||||
// not active. However, if post-decoding VAD has been active then
|
||||
// disabled, we might be here for couple of frames.
|
||||
audio_frame->speech_type_ = AudioFrame::kNormalSpeech;
|
||||
LOG_F(LS_WARNING) << "Post-decoding VAD is disabled but output is "
|
||||
<< "labeled VAD-passive";
|
||||
break;
|
||||
}
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Is the given codec a CNG codec?
|
||||
bool IsCng(int codec_id) {
|
||||
return (codec_id == ACMCodecDB::kCNNB || codec_id == ACMCodecDB::kCNWB ||
|
||||
codec_id == ACMCodecDB::kCNSWB || codec_id == ACMCodecDB::kCNFB);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
AcmReceiver::AcmReceiver(const AudioCodingModule::Config& config)
|
||||
: crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
id_(config.id),
|
||||
last_audio_decoder_(-1), // Invalid value.
|
||||
previous_audio_activity_(AudioFrame::kVadPassive),
|
||||
current_sample_rate_hz_(config.neteq_config.sample_rate_hz),
|
||||
nack_(),
|
||||
nack_enabled_(false),
|
||||
neteq_(NetEq::Create(config.neteq_config)),
|
||||
vad_enabled_(true),
|
||||
clock_(config.clock),
|
||||
av_sync_(false),
|
||||
initial_delay_manager_(),
|
||||
missing_packets_sync_stream_(),
|
||||
late_packets_sync_stream_() {
|
||||
assert(clock_);
|
||||
for (int n = 0; n < ACMCodecDB::kMaxNumCodecs; ++n) {
|
||||
decoders_[n].registered = false;
|
||||
}
|
||||
|
||||
// Make sure we are on the same page as NetEq. Post-decode VAD is disabled by
|
||||
// default in NetEq4, however, Audio Conference Mixer relies on VAD decision
|
||||
// and fails if VAD decision is not provided.
|
||||
if (vad_enabled_)
|
||||
neteq_->EnableVad();
|
||||
else
|
||||
neteq_->DisableVad();
|
||||
}
|
||||
|
||||
AcmReceiver::~AcmReceiver() {
|
||||
delete neteq_;
|
||||
}
|
||||
|
||||
int AcmReceiver::SetMinimumDelay(int delay_ms) {
|
||||
if (neteq_->SetMinimumDelay(delay_ms))
|
||||
return 0;
|
||||
LOG_FERR1(LS_ERROR, "AcmReceiver::SetExtraDelay", delay_ms);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int AcmReceiver::SetInitialDelay(int delay_ms) {
|
||||
if (delay_ms < 0 || delay_ms > 10000) {
|
||||
return -1;
|
||||
}
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
|
||||
if (delay_ms == 0) {
|
||||
av_sync_ = false;
|
||||
initial_delay_manager_.reset();
|
||||
missing_packets_sync_stream_.reset();
|
||||
late_packets_sync_stream_.reset();
|
||||
neteq_->SetMinimumDelay(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (av_sync_ && initial_delay_manager_->PacketBuffered()) {
|
||||
// Too late for this API. Only works before a call is started.
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Most of places NetEq calls are not within AcmReceiver's critical section to
|
||||
// improve performance. Here, this call has to be placed before the following
|
||||
// block, therefore, we keep it inside critical section. Otherwise, we have to
|
||||
// release |neteq_crit_sect_| and acquire it again, which seems an overkill.
|
||||
if (!neteq_->SetMinimumDelay(delay_ms))
|
||||
return -1;
|
||||
|
||||
const int kLatePacketThreshold = 5;
|
||||
av_sync_ = true;
|
||||
initial_delay_manager_.reset(new InitialDelayManager(delay_ms,
|
||||
kLatePacketThreshold));
|
||||
missing_packets_sync_stream_.reset(new InitialDelayManager::SyncStream);
|
||||
late_packets_sync_stream_.reset(new InitialDelayManager::SyncStream);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AcmReceiver::SetMaximumDelay(int delay_ms) {
|
||||
if (neteq_->SetMaximumDelay(delay_ms))
|
||||
return 0;
|
||||
LOG_FERR1(LS_ERROR, "AcmReceiver::SetExtraDelay", delay_ms);
|
||||
return -1;
|
||||
}
|
||||
|
||||
int AcmReceiver::LeastRequiredDelayMs() const {
|
||||
return neteq_->LeastRequiredDelayMs();
|
||||
}
|
||||
|
||||
int AcmReceiver::current_sample_rate_hz() const {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
return current_sample_rate_hz_;
|
||||
}
|
||||
|
||||
// TODO(turajs): use one set of enumerators, e.g. the one defined in
|
||||
// common_types.h
|
||||
// TODO(henrik.lundin): This method is not used any longer. The call hierarchy
|
||||
// stops in voe::Channel::SetNetEQPlayoutMode(). Remove it.
|
||||
void AcmReceiver::SetPlayoutMode(AudioPlayoutMode mode) {
|
||||
enum NetEqPlayoutMode playout_mode = kPlayoutOn;
|
||||
switch (mode) {
|
||||
case voice:
|
||||
playout_mode = kPlayoutOn;
|
||||
break;
|
||||
case fax: // No change to background noise mode.
|
||||
playout_mode = kPlayoutFax;
|
||||
break;
|
||||
case streaming:
|
||||
playout_mode = kPlayoutStreaming;
|
||||
break;
|
||||
case off:
|
||||
playout_mode = kPlayoutOff;
|
||||
break;
|
||||
}
|
||||
neteq_->SetPlayoutMode(playout_mode);
|
||||
}
|
||||
|
||||
AudioPlayoutMode AcmReceiver::PlayoutMode() const {
|
||||
AudioPlayoutMode acm_mode = voice;
|
||||
NetEqPlayoutMode mode = neteq_->PlayoutMode();
|
||||
switch (mode) {
|
||||
case kPlayoutOn:
|
||||
acm_mode = voice;
|
||||
break;
|
||||
case kPlayoutOff:
|
||||
acm_mode = off;
|
||||
break;
|
||||
case kPlayoutFax:
|
||||
acm_mode = fax;
|
||||
break;
|
||||
case kPlayoutStreaming:
|
||||
acm_mode = streaming;
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
return acm_mode;
|
||||
}
|
||||
|
||||
int AcmReceiver::InsertPacket(const WebRtcRTPHeader& rtp_header,
|
||||
const uint8_t* incoming_payload,
|
||||
int length_payload) {
|
||||
uint32_t receive_timestamp = 0;
|
||||
InitialDelayManager::PacketType packet_type =
|
||||
InitialDelayManager::kUndefinedPacket;
|
||||
bool new_codec = false;
|
||||
const RTPHeader* header = &rtp_header.header; // Just a shorthand.
|
||||
|
||||
{
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
|
||||
int codec_id = RtpHeaderToCodecIndex(*header, incoming_payload);
|
||||
if (codec_id < 0) {
|
||||
LOG_F(LS_ERROR) << "Payload-type " << header->payloadType
|
||||
<< " is not registered.";
|
||||
return -1;
|
||||
}
|
||||
assert(codec_id < ACMCodecDB::kMaxNumCodecs);
|
||||
const int sample_rate_hz = ACMCodecDB::CodecFreq(codec_id);
|
||||
receive_timestamp = NowInTimestamp(sample_rate_hz);
|
||||
|
||||
if (IsCng(codec_id)) {
|
||||
// If this is a CNG while the audio codec is not mono skip pushing in
|
||||
// packets into NetEq.
|
||||
if (last_audio_decoder_ >= 0 &&
|
||||
decoders_[last_audio_decoder_].channels > 1)
|
||||
return 0;
|
||||
packet_type = InitialDelayManager::kCngPacket;
|
||||
} else if (codec_id == ACMCodecDB::kAVT) {
|
||||
packet_type = InitialDelayManager::kAvtPacket;
|
||||
} else {
|
||||
if (codec_id != last_audio_decoder_) {
|
||||
// This is either the first audio packet or send codec is changed.
|
||||
// Therefore, either NetEq buffer is empty or will be flushed when this
|
||||
// packet inserted. Note that |last_audio_decoder_| is initialized to
|
||||
// an invalid value (-1), hence, the above condition is true for the
|
||||
// very first audio packet.
|
||||
new_codec = true;
|
||||
|
||||
// Updating NACK'sampling rate is required, either first packet is
|
||||
// received or codec is changed. Furthermore, reset is required if codec
|
||||
// is changed (NetEq flushes its buffer so NACK should reset its list).
|
||||
if (nack_enabled_) {
|
||||
assert(nack_.get());
|
||||
nack_->Reset();
|
||||
nack_->UpdateSampleRate(sample_rate_hz);
|
||||
}
|
||||
last_audio_decoder_ = codec_id;
|
||||
}
|
||||
packet_type = InitialDelayManager::kAudioPacket;
|
||||
}
|
||||
|
||||
if (nack_enabled_) {
|
||||
assert(nack_.get());
|
||||
nack_->UpdateLastReceivedPacket(header->sequenceNumber,
|
||||
header->timestamp);
|
||||
}
|
||||
|
||||
if (av_sync_) {
|
||||
assert(initial_delay_manager_.get());
|
||||
assert(missing_packets_sync_stream_.get());
|
||||
// This updates |initial_delay_manager_| and specifies an stream of
|
||||
// sync-packets, if required to be inserted. We insert the sync-packets
|
||||
// when AcmReceiver lock is released and |decoder_lock_| is acquired.
|
||||
initial_delay_manager_->UpdateLastReceivedPacket(
|
||||
rtp_header, receive_timestamp, packet_type, new_codec, sample_rate_hz,
|
||||
missing_packets_sync_stream_.get());
|
||||
}
|
||||
} // |crit_sect_| is released.
|
||||
|
||||
// If |missing_packets_sync_stream_| is allocated then we are in AV-sync and
|
||||
// we may need to insert sync-packets. We don't check |av_sync_| as we are
|
||||
// outside AcmReceiver's critical section.
|
||||
if (missing_packets_sync_stream_.get()) {
|
||||
InsertStreamOfSyncPackets(missing_packets_sync_stream_.get());
|
||||
}
|
||||
|
||||
if (neteq_->InsertPacket(rtp_header, incoming_payload, length_payload,
|
||||
receive_timestamp) < 0) {
|
||||
LOG_FERR1(LS_ERROR, "AcmReceiver::InsertPacket", header->payloadType) <<
|
||||
" Failed to insert packet";
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AcmReceiver::GetAudio(int desired_freq_hz, AudioFrame* audio_frame) {
|
||||
enum NetEqOutputType type;
|
||||
int16_t* ptr_audio_buffer = audio_frame->data_;
|
||||
int samples_per_channel;
|
||||
int num_channels;
|
||||
bool return_silence = false;
|
||||
|
||||
{
|
||||
// Accessing members, take the lock.
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
|
||||
if (av_sync_) {
|
||||
assert(initial_delay_manager_.get());
|
||||
assert(late_packets_sync_stream_.get());
|
||||
return_silence = GetSilence(desired_freq_hz, audio_frame);
|
||||
uint32_t timestamp_now = NowInTimestamp(current_sample_rate_hz_);
|
||||
initial_delay_manager_->LatePackets(timestamp_now,
|
||||
late_packets_sync_stream_.get());
|
||||
}
|
||||
|
||||
if (!return_silence) {
|
||||
// This is our initial guess regarding whether a resampling will be
|
||||
// required. It is based on previous sample rate of netEq. Most often,
|
||||
// this is a correct guess, however, in case that incoming payload changes
|
||||
// the resampling might might be needed. By doing so, we avoid an
|
||||
// unnecessary memcpy().
|
||||
if (desired_freq_hz != -1 &&
|
||||
current_sample_rate_hz_ != desired_freq_hz) {
|
||||
ptr_audio_buffer = audio_buffer_;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If |late_packets_sync_stream_| is allocated then we have been in AV-sync
|
||||
// mode and we might have to insert sync-packets.
|
||||
if (late_packets_sync_stream_.get()) {
|
||||
InsertStreamOfSyncPackets(late_packets_sync_stream_.get());
|
||||
if (return_silence) // Silence generated, don't pull from NetEq.
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (neteq_->GetAudio(AudioFrame::kMaxDataSizeSamples,
|
||||
ptr_audio_buffer,
|
||||
&samples_per_channel,
|
||||
&num_channels, &type) != NetEq::kOK) {
|
||||
LOG_FERR0(LS_ERROR, "AcmReceiver::GetAudio") << "NetEq Failed.";
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Accessing members, take the lock.
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
|
||||
// Update NACK.
|
||||
int decoded_sequence_num = 0;
|
||||
uint32_t decoded_timestamp = 0;
|
||||
bool update_nack = nack_enabled_ && // Update NACK only if it is enabled.
|
||||
neteq_->DecodedRtpInfo(&decoded_sequence_num, &decoded_timestamp);
|
||||
if (update_nack) {
|
||||
assert(nack_.get());
|
||||
nack_->UpdateLastDecodedPacket(decoded_sequence_num, decoded_timestamp);
|
||||
}
|
||||
|
||||
// NetEq always returns 10 ms of audio.
|
||||
current_sample_rate_hz_ = samples_per_channel * 100;
|
||||
|
||||
// Update if resampling is required.
|
||||
bool need_resampling = (desired_freq_hz != -1) &&
|
||||
(current_sample_rate_hz_ != desired_freq_hz);
|
||||
|
||||
if (ptr_audio_buffer == audio_buffer_) {
|
||||
// Data is written to local buffer.
|
||||
if (need_resampling) {
|
||||
samples_per_channel =
|
||||
resampler_.Resample10Msec(audio_buffer_,
|
||||
current_sample_rate_hz_,
|
||||
desired_freq_hz,
|
||||
num_channels,
|
||||
AudioFrame::kMaxDataSizeSamples,
|
||||
audio_frame->data_);
|
||||
if (samples_per_channel < 0) {
|
||||
LOG_FERR0(LS_ERROR, "AcmReceiver::GetAudio") << "Resampler Failed.";
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
// We might end up here ONLY if codec is changed.
|
||||
memcpy(audio_frame->data_, audio_buffer_, samples_per_channel *
|
||||
num_channels * sizeof(int16_t));
|
||||
}
|
||||
} else {
|
||||
// Data is written into |audio_frame|.
|
||||
if (need_resampling) {
|
||||
// We might end up here ONLY if codec is changed.
|
||||
samples_per_channel =
|
||||
resampler_.Resample10Msec(audio_frame->data_,
|
||||
current_sample_rate_hz_,
|
||||
desired_freq_hz,
|
||||
num_channels,
|
||||
AudioFrame::kMaxDataSizeSamples,
|
||||
audio_buffer_);
|
||||
if (samples_per_channel < 0) {
|
||||
LOG_FERR0(LS_ERROR, "AcmReceiver::GetAudio") << "Resampler Failed.";
|
||||
return -1;
|
||||
}
|
||||
memcpy(audio_frame->data_, audio_buffer_, samples_per_channel *
|
||||
num_channels * sizeof(int16_t));
|
||||
}
|
||||
}
|
||||
|
||||
audio_frame->num_channels_ = num_channels;
|
||||
audio_frame->samples_per_channel_ = samples_per_channel;
|
||||
audio_frame->sample_rate_hz_ = samples_per_channel * 100;
|
||||
|
||||
// Should set |vad_activity| before calling SetAudioFrameActivityAndType().
|
||||
audio_frame->vad_activity_ = previous_audio_activity_;
|
||||
SetAudioFrameActivityAndType(vad_enabled_, type, audio_frame);
|
||||
previous_audio_activity_ = audio_frame->vad_activity_;
|
||||
call_stats_.DecodedByNetEq(audio_frame->speech_type_);
|
||||
|
||||
// Computes the RTP timestamp of the first sample in |audio_frame| from
|
||||
// |GetPlayoutTimestamp|, which is the timestamp of the last sample of
|
||||
// |audio_frame|.
|
||||
uint32_t playout_timestamp = 0;
|
||||
if (GetPlayoutTimestamp(&playout_timestamp)) {
|
||||
audio_frame->timestamp_ =
|
||||
playout_timestamp - audio_frame->samples_per_channel_;
|
||||
} else {
|
||||
// Remain 0 until we have a valid |playout_timestamp|.
|
||||
audio_frame->timestamp_ = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t AcmReceiver::AddCodec(int acm_codec_id,
|
||||
uint8_t payload_type,
|
||||
int channels,
|
||||
AudioDecoder* audio_decoder) {
|
||||
assert(acm_codec_id >= 0 && acm_codec_id < ACMCodecDB::kMaxNumCodecs);
|
||||
NetEqDecoder neteq_decoder = ACMCodecDB::neteq_decoders_[acm_codec_id];
|
||||
|
||||
// Make sure the right decoder is registered for Opus.
|
||||
if (neteq_decoder == kDecoderOpus && channels == 2) {
|
||||
neteq_decoder = kDecoderOpus_2ch;
|
||||
}
|
||||
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
|
||||
// The corresponding NetEq decoder ID.
|
||||
// If this coder has been registered before.
|
||||
if (decoders_[acm_codec_id].registered) {
|
||||
if (decoders_[acm_codec_id].payload_type == payload_type &&
|
||||
decoders_[acm_codec_id].channels == channels) {
|
||||
// Re-registering the same codec with the same payload-type. Do nothing
|
||||
// and return.
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Changing the payload-type or number of channels for this codec.
|
||||
// First unregister. Then register with new payload-type/channels.
|
||||
if (neteq_->RemovePayloadType(decoders_[acm_codec_id].payload_type) !=
|
||||
NetEq::kOK) {
|
||||
LOG_F(LS_ERROR) << "Cannot remover payload "
|
||||
<< decoders_[acm_codec_id].payload_type;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
int ret_val;
|
||||
if (!audio_decoder) {
|
||||
ret_val = neteq_->RegisterPayloadType(neteq_decoder, payload_type);
|
||||
} else {
|
||||
ret_val = neteq_->RegisterExternalDecoder(
|
||||
audio_decoder, neteq_decoder, payload_type);
|
||||
}
|
||||
if (ret_val != NetEq::kOK) {
|
||||
LOG_FERR3(LS_ERROR, "AcmReceiver::AddCodec", acm_codec_id, payload_type,
|
||||
channels);
|
||||
// Registration failed, delete the allocated space and set the pointer to
|
||||
// NULL, for the record.
|
||||
decoders_[acm_codec_id].registered = false;
|
||||
return -1;
|
||||
}
|
||||
|
||||
decoders_[acm_codec_id].registered = true;
|
||||
decoders_[acm_codec_id].payload_type = payload_type;
|
||||
decoders_[acm_codec_id].channels = channels;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AcmReceiver::EnableVad() {
|
||||
neteq_->EnableVad();
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
vad_enabled_ = true;
|
||||
}
|
||||
|
||||
void AcmReceiver::DisableVad() {
|
||||
neteq_->DisableVad();
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
vad_enabled_ = false;
|
||||
}
|
||||
|
||||
void AcmReceiver::FlushBuffers() {
|
||||
neteq_->FlushBuffers();
|
||||
}
|
||||
|
||||
// If failed in removing one of the codecs, this method continues to remove as
|
||||
// many as it can.
|
||||
int AcmReceiver::RemoveAllCodecs() {
|
||||
int ret_val = 0;
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
for (int n = 0; n < ACMCodecDB::kMaxNumCodecs; ++n) {
|
||||
if (decoders_[n].registered) {
|
||||
if (neteq_->RemovePayloadType(decoders_[n].payload_type) == 0) {
|
||||
decoders_[n].registered = false;
|
||||
} else {
|
||||
LOG_F(LS_ERROR) << "Cannot remove payload "
|
||||
<< decoders_[n].payload_type;
|
||||
ret_val = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
// No codec is registered, invalidate last audio decoder.
|
||||
last_audio_decoder_ = -1;
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
int AcmReceiver::RemoveCodec(uint8_t payload_type) {
|
||||
int codec_index = PayloadType2CodecIndex(payload_type);
|
||||
if (codec_index < 0) { // Such a payload-type is not registered.
|
||||
return 0;
|
||||
}
|
||||
if (neteq_->RemovePayloadType(payload_type) != NetEq::kOK) {
|
||||
LOG_FERR1(LS_ERROR, "AcmReceiver::RemoveCodec", payload_type);
|
||||
return -1;
|
||||
}
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
decoders_[codec_index].registered = false;
|
||||
if (last_audio_decoder_ == codec_index)
|
||||
last_audio_decoder_ = -1; // Codec is removed, invalidate last decoder.
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AcmReceiver::set_id(int id) {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
id_ = id;
|
||||
}
|
||||
|
||||
bool AcmReceiver::GetPlayoutTimestamp(uint32_t* timestamp) {
|
||||
if (av_sync_) {
|
||||
assert(initial_delay_manager_.get());
|
||||
if (initial_delay_manager_->buffering()) {
|
||||
return initial_delay_manager_->GetPlayoutTimestamp(timestamp);
|
||||
}
|
||||
}
|
||||
return neteq_->GetPlayoutTimestamp(timestamp);
|
||||
}
|
||||
|
||||
int AcmReceiver::last_audio_codec_id() const {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
return last_audio_decoder_;
|
||||
}
|
||||
|
||||
int AcmReceiver::last_audio_payload_type() const {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
if (last_audio_decoder_ < 0)
|
||||
return -1;
|
||||
assert(decoders_[last_audio_decoder_].registered);
|
||||
return decoders_[last_audio_decoder_].payload_type;
|
||||
}
|
||||
|
||||
int AcmReceiver::RedPayloadType() const {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
if (ACMCodecDB::kRED < 0 ||
|
||||
!decoders_[ACMCodecDB::kRED].registered) {
|
||||
LOG_F(LS_WARNING) << "RED is not registered.";
|
||||
return -1;
|
||||
}
|
||||
return decoders_[ACMCodecDB::kRED].payload_type;
|
||||
}
|
||||
|
||||
int AcmReceiver::LastAudioCodec(CodecInst* codec) const {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
if (last_audio_decoder_ < 0) {
|
||||
return -1;
|
||||
}
|
||||
assert(decoders_[last_audio_decoder_].registered);
|
||||
memcpy(codec, &ACMCodecDB::database_[last_audio_decoder_], sizeof(CodecInst));
|
||||
codec->pltype = decoders_[last_audio_decoder_].payload_type;
|
||||
codec->channels = decoders_[last_audio_decoder_].channels;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AcmReceiver::NetworkStatistics(ACMNetworkStatistics* acm_stat) {
|
||||
NetEqNetworkStatistics neteq_stat;
|
||||
// NetEq function always returns zero, so we don't check the return value.
|
||||
neteq_->NetworkStatistics(&neteq_stat);
|
||||
|
||||
acm_stat->currentBufferSize = neteq_stat.current_buffer_size_ms;
|
||||
acm_stat->preferredBufferSize = neteq_stat.preferred_buffer_size_ms;
|
||||
acm_stat->jitterPeaksFound = neteq_stat.jitter_peaks_found ? true : false;
|
||||
acm_stat->currentPacketLossRate = neteq_stat.packet_loss_rate;
|
||||
acm_stat->currentDiscardRate = neteq_stat.packet_discard_rate;
|
||||
acm_stat->currentExpandRate = neteq_stat.expand_rate;
|
||||
acm_stat->currentPreemptiveRate = neteq_stat.preemptive_rate;
|
||||
acm_stat->currentAccelerateRate = neteq_stat.accelerate_rate;
|
||||
acm_stat->clockDriftPPM = neteq_stat.clockdrift_ppm;
|
||||
acm_stat->addedSamples = neteq_stat.added_zero_samples;
|
||||
|
||||
std::vector<int> waiting_times;
|
||||
neteq_->WaitingTimes(&waiting_times);
|
||||
size_t size = waiting_times.size();
|
||||
if (size == 0) {
|
||||
acm_stat->meanWaitingTimeMs = -1;
|
||||
acm_stat->medianWaitingTimeMs = -1;
|
||||
acm_stat->minWaitingTimeMs = -1;
|
||||
acm_stat->maxWaitingTimeMs = -1;
|
||||
} else {
|
||||
std::sort(waiting_times.begin(), waiting_times.end());
|
||||
if ((size & 0x1) == 0) {
|
||||
acm_stat->medianWaitingTimeMs = (waiting_times[size / 2 - 1] +
|
||||
waiting_times[size / 2]) / 2;
|
||||
} else {
|
||||
acm_stat->medianWaitingTimeMs = waiting_times[size / 2];
|
||||
}
|
||||
acm_stat->minWaitingTimeMs = waiting_times.front();
|
||||
acm_stat->maxWaitingTimeMs = waiting_times.back();
|
||||
double sum = 0;
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
sum += waiting_times[i];
|
||||
}
|
||||
acm_stat->meanWaitingTimeMs = static_cast<int>(sum / size);
|
||||
}
|
||||
}
|
||||
|
||||
int AcmReceiver::DecoderByPayloadType(uint8_t payload_type,
|
||||
CodecInst* codec) const {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
int codec_index = PayloadType2CodecIndex(payload_type);
|
||||
if (codec_index < 0) {
|
||||
LOG_FERR1(LS_ERROR, "AcmReceiver::DecoderByPayloadType", payload_type);
|
||||
return -1;
|
||||
}
|
||||
memcpy(codec, &ACMCodecDB::database_[codec_index], sizeof(CodecInst));
|
||||
codec->pltype = decoders_[codec_index].payload_type;
|
||||
codec->channels = decoders_[codec_index].channels;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int AcmReceiver::PayloadType2CodecIndex(uint8_t payload_type) const {
|
||||
for (int n = 0; n < ACMCodecDB::kMaxNumCodecs; ++n) {
|
||||
if (decoders_[n].registered && decoders_[n].payload_type == payload_type) {
|
||||
return n;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
int AcmReceiver::EnableNack(size_t max_nack_list_size) {
|
||||
// Don't do anything if |max_nack_list_size| is out of range.
|
||||
if (max_nack_list_size == 0 || max_nack_list_size > Nack::kNackListSizeLimit)
|
||||
return -1;
|
||||
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
if (!nack_enabled_) {
|
||||
nack_.reset(Nack::Create(kNackThresholdPackets));
|
||||
nack_enabled_ = true;
|
||||
|
||||
// Sampling rate might need to be updated if we change from disable to
|
||||
// enable. Do it if the receive codec is valid.
|
||||
if (last_audio_decoder_ >= 0) {
|
||||
nack_->UpdateSampleRate(
|
||||
ACMCodecDB::database_[last_audio_decoder_].plfreq);
|
||||
}
|
||||
}
|
||||
return nack_->SetMaxNackListSize(max_nack_list_size);
|
||||
}
|
||||
|
||||
void AcmReceiver::DisableNack() {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
nack_.reset(); // Memory is released.
|
||||
nack_enabled_ = false;
|
||||
}
|
||||
|
||||
std::vector<uint16_t> AcmReceiver::GetNackList(
|
||||
int round_trip_time_ms) const {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
if (round_trip_time_ms < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
|
||||
"GetNackList: round trip time cannot be negative."
|
||||
" round_trip_time_ms=%d", round_trip_time_ms);
|
||||
}
|
||||
if (nack_enabled_ && round_trip_time_ms >= 0) {
|
||||
assert(nack_.get());
|
||||
return nack_->GetNackList(round_trip_time_ms);
|
||||
}
|
||||
std::vector<uint16_t> empty_list;
|
||||
return empty_list;
|
||||
}
|
||||
|
||||
void AcmReceiver::ResetInitialDelay() {
|
||||
{
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
av_sync_ = false;
|
||||
initial_delay_manager_.reset(NULL);
|
||||
missing_packets_sync_stream_.reset(NULL);
|
||||
late_packets_sync_stream_.reset(NULL);
|
||||
}
|
||||
neteq_->SetMinimumDelay(0);
|
||||
// TODO(turajs): Should NetEq Buffer be flushed?
|
||||
}
|
||||
|
||||
// This function is called within critical section, no need to acquire a lock.
|
||||
bool AcmReceiver::GetSilence(int desired_sample_rate_hz, AudioFrame* frame) {
|
||||
assert(av_sync_);
|
||||
assert(initial_delay_manager_.get());
|
||||
if (!initial_delay_manager_->buffering()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We stop accumulating packets, if the number of packets or the total size
|
||||
// exceeds a threshold.
|
||||
int num_packets;
|
||||
int max_num_packets;
|
||||
const float kBufferingThresholdScale = 0.9f;
|
||||
neteq_->PacketBufferStatistics(&num_packets, &max_num_packets);
|
||||
if (num_packets > max_num_packets * kBufferingThresholdScale) {
|
||||
initial_delay_manager_->DisableBuffering();
|
||||
return false;
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
call_stats_.DecodedBySilenceGenerator();
|
||||
|
||||
// Set the values if already got a packet, otherwise set to default values.
|
||||
if (last_audio_decoder_ >= 0) {
|
||||
current_sample_rate_hz_ = ACMCodecDB::database_[last_audio_decoder_].plfreq;
|
||||
frame->num_channels_ = decoders_[last_audio_decoder_].channels;
|
||||
} else {
|
||||
frame->num_channels_ = 1;
|
||||
}
|
||||
|
||||
// Set the audio frame's sampling frequency.
|
||||
if (desired_sample_rate_hz > 0) {
|
||||
frame->sample_rate_hz_ = desired_sample_rate_hz;
|
||||
} else {
|
||||
frame->sample_rate_hz_ = current_sample_rate_hz_;
|
||||
}
|
||||
|
||||
frame->samples_per_channel_ = frame->sample_rate_hz_ / 100; // Always 10 ms.
|
||||
frame->speech_type_ = AudioFrame::kCNG;
|
||||
frame->vad_activity_ = AudioFrame::kVadPassive;
|
||||
int samples = frame->samples_per_channel_ * frame->num_channels_;
|
||||
memset(frame->data_, 0, samples * sizeof(int16_t));
|
||||
return true;
|
||||
}
|
||||
|
||||
int AcmReceiver::RtpHeaderToCodecIndex(
|
||||
const RTPHeader &rtp_header, const uint8_t* payload) const {
|
||||
uint8_t payload_type = rtp_header.payloadType;
|
||||
if (ACMCodecDB::kRED >= 0 && // This ensures that RED is defined in WebRTC.
|
||||
decoders_[ACMCodecDB::kRED].registered &&
|
||||
payload_type == decoders_[ACMCodecDB::kRED].payload_type) {
|
||||
// This is a RED packet, get the payload of the audio codec.
|
||||
payload_type = payload[0] & 0x7F;
|
||||
}
|
||||
|
||||
// Check if the payload is registered.
|
||||
return PayloadType2CodecIndex(payload_type);
|
||||
}
|
||||
|
||||
uint32_t AcmReceiver::NowInTimestamp(int decoder_sampling_rate) const {
|
||||
// Down-cast the time to (32-6)-bit since we only care about
|
||||
// the least significant bits. (32-6) bits cover 2^(32-6) = 67108864 ms.
|
||||
// We masked 6 most significant bits of 32-bit so there is no overflow in
|
||||
// the conversion from milliseconds to timestamp.
|
||||
const uint32_t now_in_ms = static_cast<uint32_t>(
|
||||
clock_->TimeInMilliseconds() & 0x03ffffff);
|
||||
return static_cast<uint32_t>(
|
||||
(decoder_sampling_rate / 1000) * now_in_ms);
|
||||
}
|
||||
|
||||
// This function only interacts with |neteq_|, therefore, it does not have to
|
||||
// be within critical section of AcmReceiver. It is inserting packets
|
||||
// into NetEq, so we call it when |decode_lock_| is acquired. However, this is
|
||||
// not essential as sync-packets do not interact with codecs (especially BWE).
|
||||
void AcmReceiver::InsertStreamOfSyncPackets(
|
||||
InitialDelayManager::SyncStream* sync_stream) {
|
||||
assert(sync_stream);
|
||||
assert(av_sync_);
|
||||
for (int n = 0; n < sync_stream->num_sync_packets; ++n) {
|
||||
neteq_->InsertSyncPacket(sync_stream->rtp_info,
|
||||
sync_stream->receive_timestamp);
|
||||
++sync_stream->rtp_info.header.sequenceNumber;
|
||||
sync_stream->rtp_info.header.timestamp += sync_stream->timestamp_step;
|
||||
sync_stream->receive_timestamp += sync_stream->timestamp_step;
|
||||
}
|
||||
}
|
||||
|
||||
void AcmReceiver::GetDecodingCallStatistics(
|
||||
AudioDecodingCallStats* stats) const {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
*stats = call_stats_.GetDecodingStatistics();
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
364
jni/webrtc/modules/audio_coding/main/acm2/acm_receiver.h
Normal file
364
jni/webrtc/modules/audio_coding/main/acm2/acm_receiver.h
Normal file
@@ -0,0 +1,364 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVER_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "webrtc/common_audio/vad/include/webrtc_vad.h"
|
||||
#include "webrtc/engine_configurations.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/initial_delay_manager.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/interface/neteq.h"
|
||||
#include "webrtc/modules/interface/module_common_types.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/system_wrappers/interface/thread_annotations.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
struct CodecInst;
|
||||
class CriticalSectionWrapper;
|
||||
class NetEq;
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class Nack;
|
||||
|
||||
class AcmReceiver {
|
||||
public:
|
||||
struct Decoder {
|
||||
bool registered;
|
||||
uint8_t payload_type;
|
||||
// This field is meaningful for codecs where both mono and
|
||||
// stereo versions are registered under the same ID.
|
||||
int channels;
|
||||
};
|
||||
|
||||
// Constructor of the class
|
||||
explicit AcmReceiver(const AudioCodingModule::Config& config);
|
||||
|
||||
// Destructor of the class.
|
||||
~AcmReceiver();
|
||||
|
||||
//
|
||||
// Inserts a payload with its associated RTP-header into NetEq.
|
||||
//
|
||||
// Input:
|
||||
// - rtp_header : RTP header for the incoming payload containing
|
||||
// information about payload type, sequence number,
|
||||
// timestamp, SSRC and marker bit.
|
||||
// - incoming_payload : Incoming audio payload.
|
||||
// - length_payload : Length of incoming audio payload in bytes.
|
||||
//
|
||||
// Return value : 0 if OK.
|
||||
// <0 if NetEq returned an error.
|
||||
//
|
||||
int InsertPacket(const WebRtcRTPHeader& rtp_header,
|
||||
const uint8_t* incoming_payload,
|
||||
int length_payload);
|
||||
|
||||
//
|
||||
// Asks NetEq for 10 milliseconds of decoded audio.
|
||||
//
|
||||
// Input:
|
||||
// -desired_freq_hz : specifies the sampling rate [Hz] of the output
|
||||
// audio. If set -1 indicates to resampling is
|
||||
// is required and the audio returned at the
|
||||
// sampling rate of the decoder.
|
||||
//
|
||||
// Output:
|
||||
// -audio_frame : an audio frame were output data and
|
||||
// associated parameters are written to.
|
||||
//
|
||||
// Return value : 0 if OK.
|
||||
// -1 if NetEq returned an error.
|
||||
//
|
||||
int GetAudio(int desired_freq_hz, AudioFrame* audio_frame);
|
||||
|
||||
//
|
||||
// Adds a new codec to the NetEq codec database.
|
||||
//
|
||||
// Input:
|
||||
// - acm_codec_id : ACM codec ID.
|
||||
// - payload_type : payload type.
|
||||
// - audio_decoder : pointer to a decoder object. If it is NULL
|
||||
// then NetEq will internally create the decoder
|
||||
// object. Otherwise, NetEq will store this pointer
|
||||
// as the decoder corresponding with the given
|
||||
// payload type. NetEq won't acquire the ownership
|
||||
// of this pointer. It is up to the client of this
|
||||
// class (ACM) to delete it. By providing
|
||||
// |audio_decoder| ACM will have control over the
|
||||
// decoder instance of the codec. This is essential
|
||||
// for a codec like iSAC which encoder/decoder
|
||||
// encoder has to know about decoder (bandwidth
|
||||
// estimator that is updated at decoding time).
|
||||
//
|
||||
// Return value : 0 if OK.
|
||||
// <0 if NetEq returned an error.
|
||||
//
|
||||
int AddCodec(int acm_codec_id,
|
||||
uint8_t payload_type,
|
||||
int channels,
|
||||
AudioDecoder* audio_decoder);
|
||||
|
||||
//
|
||||
// Sets a minimum delay for packet buffer. The given delay is maintained,
|
||||
// unless channel condition dictates a higher delay.
|
||||
//
|
||||
// Input:
|
||||
// - delay_ms : minimum delay in milliseconds.
|
||||
//
|
||||
// Return value : 0 if OK.
|
||||
// <0 if NetEq returned an error.
|
||||
//
|
||||
int SetMinimumDelay(int delay_ms);
|
||||
|
||||
//
|
||||
// Sets a maximum delay [ms] for the packet buffer. The target delay does not
|
||||
// exceed the given value, even if channel condition requires so.
|
||||
//
|
||||
// Input:
|
||||
// - delay_ms : maximum delay in milliseconds.
|
||||
//
|
||||
// Return value : 0 if OK.
|
||||
// <0 if NetEq returned an error.
|
||||
//
|
||||
int SetMaximumDelay(int delay_ms);
|
||||
|
||||
//
|
||||
// Get least required delay computed based on channel conditions. Note that
|
||||
// this is before applying any user-defined limits (specified by calling
|
||||
// (SetMinimumDelay() and/or SetMaximumDelay()).
|
||||
//
|
||||
int LeastRequiredDelayMs() const;
|
||||
|
||||
//
|
||||
// Sets an initial delay of |delay_ms| milliseconds. This introduces a playout
|
||||
// delay. Silence (zero signal) is played out until equivalent of |delay_ms|
|
||||
// millisecond of audio is buffered. Then, NetEq maintains the delay.
|
||||
//
|
||||
// Input:
|
||||
// - delay_ms : initial delay in milliseconds.
|
||||
//
|
||||
// Return value : 0 if OK.
|
||||
// <0 if NetEq returned an error.
|
||||
//
|
||||
int SetInitialDelay(int delay_ms);
|
||||
|
||||
//
|
||||
// Resets the initial delay to zero.
|
||||
//
|
||||
void ResetInitialDelay();
|
||||
|
||||
//
|
||||
// Get the current sampling frequency in Hz.
|
||||
//
|
||||
// Return value : Sampling frequency in Hz.
|
||||
//
|
||||
int current_sample_rate_hz() const;
|
||||
|
||||
//
|
||||
// Sets the playout mode.
|
||||
//
|
||||
// Input:
|
||||
// - mode : an enumerator specifying the playout mode.
|
||||
//
|
||||
void SetPlayoutMode(AudioPlayoutMode mode);
|
||||
|
||||
//
|
||||
// Get the current playout mode.
|
||||
//
|
||||
// Return value : The current playout mode.
|
||||
//
|
||||
AudioPlayoutMode PlayoutMode() const;
|
||||
|
||||
//
|
||||
// Get the current network statistics from NetEq.
|
||||
//
|
||||
// Output:
|
||||
// - statistics : The current network statistics.
|
||||
//
|
||||
void NetworkStatistics(ACMNetworkStatistics* statistics);
|
||||
|
||||
//
|
||||
// Enable post-decoding VAD.
|
||||
//
|
||||
void EnableVad();
|
||||
|
||||
//
|
||||
// Disable post-decoding VAD.
|
||||
//
|
||||
void DisableVad();
|
||||
|
||||
//
|
||||
// Returns whether post-decoding VAD is enabled (true) or disabled (false).
|
||||
//
|
||||
bool vad_enabled() const { return vad_enabled_; }
|
||||
|
||||
//
|
||||
// Flushes the NetEq packet and speech buffers.
|
||||
//
|
||||
void FlushBuffers();
|
||||
|
||||
//
|
||||
// Removes a payload-type from the NetEq codec database.
|
||||
//
|
||||
// Input:
|
||||
// - payload_type : the payload-type to be removed.
|
||||
//
|
||||
// Return value : 0 if OK.
|
||||
// -1 if an error occurred.
|
||||
//
|
||||
int RemoveCodec(uint8_t payload_type);
|
||||
|
||||
//
|
||||
// Remove all registered codecs.
|
||||
//
|
||||
int RemoveAllCodecs();
|
||||
|
||||
//
|
||||
// Set ID.
|
||||
//
|
||||
void set_id(int id); // TODO(turajs): can be inline.
|
||||
|
||||
//
|
||||
// Gets the RTP timestamp of the last sample delivered by GetAudio().
|
||||
// Returns true if the RTP timestamp is valid, otherwise false.
|
||||
//
|
||||
bool GetPlayoutTimestamp(uint32_t* timestamp);
|
||||
|
||||
//
|
||||
// Return the index of the codec associated with the last non-CNG/non-DTMF
|
||||
// received payload. If no non-CNG/non-DTMF payload is received -1 is
|
||||
// returned.
|
||||
//
|
||||
int last_audio_codec_id() const; // TODO(turajs): can be inline.
|
||||
|
||||
//
|
||||
// Return the payload-type of the last non-CNG/non-DTMF RTP packet. If no
|
||||
// non-CNG/non-DTMF packet is received -1 is returned.
|
||||
//
|
||||
int last_audio_payload_type() const; // TODO(turajs): can be inline.
|
||||
|
||||
//
|
||||
// Get the audio codec associated with the last non-CNG/non-DTMF received
|
||||
// payload. If no non-CNG/non-DTMF packet is received -1 is returned,
|
||||
// otherwise return 0.
|
||||
//
|
||||
int LastAudioCodec(CodecInst* codec) const;
|
||||
|
||||
//
|
||||
// Return payload type of RED if it is registered, otherwise return -1;
|
||||
//
|
||||
int RedPayloadType() const;
|
||||
|
||||
//
|
||||
// Get a decoder given its registered payload-type.
|
||||
//
|
||||
// Input:
|
||||
// -payload_type : the payload-type of the codec to be retrieved.
|
||||
//
|
||||
// Output:
|
||||
// -codec : codec associated with the given payload-type.
|
||||
//
|
||||
// Return value : 0 if succeeded.
|
||||
// -1 if failed, e.g. given payload-type is not
|
||||
// registered.
|
||||
//
|
||||
int DecoderByPayloadType(uint8_t payload_type,
|
||||
CodecInst* codec) const;
|
||||
|
||||
//
|
||||
// Enable NACK and set the maximum size of the NACK list. If NACK is already
|
||||
// enabled then the maximum NACK list size is modified accordingly.
|
||||
//
|
||||
// Input:
|
||||
// -max_nack_list_size : maximum NACK list size
|
||||
// should be positive (none zero) and less than or
|
||||
// equal to |Nack::kNackListSizeLimit|
|
||||
// Return value
|
||||
// : 0 if succeeded.
|
||||
// -1 if failed
|
||||
//
|
||||
int EnableNack(size_t max_nack_list_size);
|
||||
|
||||
// Disable NACK.
|
||||
void DisableNack();
|
||||
|
||||
//
|
||||
// Get a list of packets to be retransmitted.
|
||||
//
|
||||
// Input:
|
||||
// -round_trip_time_ms : estimate of the round-trip-time (in milliseconds).
|
||||
// Return value : list of packets to be retransmitted.
|
||||
//
|
||||
std::vector<uint16_t> GetNackList(int round_trip_time_ms) const;
|
||||
|
||||
//
|
||||
// Get statistics of calls to GetAudio().
|
||||
void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
|
||||
|
||||
private:
|
||||
int PayloadType2CodecIndex(uint8_t payload_type) const;
|
||||
|
||||
bool GetSilence(int desired_sample_rate_hz, AudioFrame* frame)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(crit_sect_);
|
||||
|
||||
int GetNumSyncPacketToInsert(uint16_t received_squence_number);
|
||||
|
||||
int RtpHeaderToCodecIndex(
|
||||
const RTPHeader& rtp_header, const uint8_t* payload) const;
|
||||
|
||||
uint32_t NowInTimestamp(int decoder_sampling_rate) const;
|
||||
|
||||
void InsertStreamOfSyncPackets(InitialDelayManager::SyncStream* sync_stream);
|
||||
|
||||
scoped_ptr<CriticalSectionWrapper> crit_sect_;
|
||||
int id_; // TODO(henrik.lundin) Make const.
|
||||
int last_audio_decoder_ GUARDED_BY(crit_sect_);
|
||||
AudioFrame::VADActivity previous_audio_activity_ GUARDED_BY(crit_sect_);
|
||||
int current_sample_rate_hz_ GUARDED_BY(crit_sect_);
|
||||
ACMResampler resampler_ GUARDED_BY(crit_sect_);
|
||||
// Used in GetAudio, declared as member to avoid allocating every 10ms.
|
||||
// TODO(henrik.lundin) Stack-allocate in GetAudio instead?
|
||||
int16_t audio_buffer_[AudioFrame::kMaxDataSizeSamples] GUARDED_BY(crit_sect_);
|
||||
scoped_ptr<Nack> nack_ GUARDED_BY(crit_sect_);
|
||||
bool nack_enabled_ GUARDED_BY(crit_sect_);
|
||||
CallStatistics call_stats_ GUARDED_BY(crit_sect_);
|
||||
NetEq* neteq_;
|
||||
Decoder decoders_[ACMCodecDB::kMaxNumCodecs];
|
||||
bool vad_enabled_;
|
||||
Clock* clock_; // TODO(henrik.lundin) Make const if possible.
|
||||
|
||||
// Indicates if a non-zero initial delay is set, and the receiver is in
|
||||
// AV-sync mode.
|
||||
bool av_sync_;
|
||||
scoped_ptr<InitialDelayManager> initial_delay_manager_;
|
||||
|
||||
// The following are defined as members to avoid creating them in every
|
||||
// iteration. |missing_packets_sync_stream_| is *ONLY* used in InsertPacket().
|
||||
// |late_packets_sync_stream_| is only used in GetAudio(). Both of these
|
||||
// member variables are allocated only when we AV-sync is enabled, i.e.
|
||||
// initial delay is set.
|
||||
scoped_ptr<InitialDelayManager::SyncStream> missing_packets_sync_stream_;
|
||||
scoped_ptr<InitialDelayManager::SyncStream> late_packets_sync_stream_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RECEIVER_H_
|
||||
@@ -0,0 +1,364 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_receiver.h"
|
||||
|
||||
#include <algorithm> // std::min
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/rtp_generator.h"
|
||||
#include "webrtc/system_wrappers/interface/clock.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/test/test_suite.h"
|
||||
#include "webrtc/test/testsupport/fileutils.h"
|
||||
#include "webrtc/test/testsupport/gtest_disable.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
namespace {
|
||||
|
||||
bool CodecsEqual(const CodecInst& codec_a, const CodecInst& codec_b) {
|
||||
if (strcmp(codec_a.plname, codec_b.plname) != 0 ||
|
||||
codec_a.plfreq != codec_b.plfreq ||
|
||||
codec_a.pltype != codec_b.pltype ||
|
||||
codec_b.channels != codec_a.channels)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
class AcmReceiverTest : public AudioPacketizationCallback,
|
||||
public ::testing::Test {
|
||||
protected:
|
||||
AcmReceiverTest()
|
||||
: timestamp_(0),
|
||||
packet_sent_(false),
|
||||
last_packet_send_timestamp_(timestamp_),
|
||||
last_frame_type_(kFrameEmpty) {
|
||||
AudioCodingModule::Config config;
|
||||
acm_.reset(new AudioCodingModuleImpl(config));
|
||||
receiver_.reset(new AcmReceiver(config));
|
||||
}
|
||||
|
||||
~AcmReceiverTest() {}
|
||||
|
||||
void SetUp() {
|
||||
ASSERT_TRUE(receiver_.get() != NULL);
|
||||
ASSERT_TRUE(acm_.get() != NULL);
|
||||
for (int n = 0; n < ACMCodecDB::kNumCodecs; n++) {
|
||||
ASSERT_EQ(0, ACMCodecDB::Codec(n, &codecs_[n]));
|
||||
}
|
||||
|
||||
acm_->InitializeReceiver();
|
||||
acm_->InitializeSender();
|
||||
acm_->RegisterTransportCallback(this);
|
||||
|
||||
rtp_header_.header.sequenceNumber = 0;
|
||||
rtp_header_.header.timestamp = 0;
|
||||
rtp_header_.header.markerBit = false;
|
||||
rtp_header_.header.ssrc = 0x12345678; // Arbitrary.
|
||||
rtp_header_.header.numCSRCs = 0;
|
||||
rtp_header_.header.payloadType = 0;
|
||||
rtp_header_.frameType = kAudioFrameSpeech;
|
||||
rtp_header_.type.Audio.isCNG = false;
|
||||
}
|
||||
|
||||
void TearDown() {
|
||||
}
|
||||
|
||||
void InsertOnePacketOfSilence(int codec_id) {
|
||||
CodecInst codec;
|
||||
ACMCodecDB::Codec(codec_id, &codec);
|
||||
if (timestamp_ == 0) { // This is the first time inserting audio.
|
||||
ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
|
||||
} else {
|
||||
CodecInst current_codec;
|
||||
ASSERT_EQ(0, acm_->SendCodec(¤t_codec));
|
||||
if (!CodecsEqual(codec, current_codec))
|
||||
ASSERT_EQ(0, acm_->RegisterSendCodec(codec));
|
||||
}
|
||||
AudioFrame frame;
|
||||
// Frame setup according to the codec.
|
||||
frame.sample_rate_hz_ = codec.plfreq;
|
||||
frame.samples_per_channel_ = codec.plfreq / 100; // 10 ms.
|
||||
frame.num_channels_ = codec.channels;
|
||||
memset(frame.data_, 0, frame.samples_per_channel_ * frame.num_channels_ *
|
||||
sizeof(int16_t));
|
||||
int num_bytes = 0;
|
||||
packet_sent_ = false;
|
||||
last_packet_send_timestamp_ = timestamp_;
|
||||
while (num_bytes == 0) {
|
||||
frame.timestamp_ = timestamp_;
|
||||
timestamp_ += frame.samples_per_channel_;
|
||||
ASSERT_EQ(0, acm_->Add10MsData(frame));
|
||||
num_bytes = acm_->Process();
|
||||
ASSERT_GE(num_bytes, 0);
|
||||
}
|
||||
ASSERT_TRUE(packet_sent_); // Sanity check.
|
||||
}
|
||||
|
||||
// Last element of id should be negative.
|
||||
void AddSetOfCodecs(const int* id) {
|
||||
int n = 0;
|
||||
while (id[n] >= 0) {
|
||||
ASSERT_EQ(0, receiver_->AddCodec(id[n], codecs_[id[n]].pltype,
|
||||
codecs_[id[n]].channels, NULL));
|
||||
++n;
|
||||
}
|
||||
}
|
||||
|
||||
virtual int SendData(
|
||||
FrameType frame_type,
|
||||
uint8_t payload_type,
|
||||
uint32_t timestamp,
|
||||
const uint8_t* payload_data,
|
||||
uint16_t payload_len_bytes,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
if (frame_type == kFrameEmpty)
|
||||
return 0;
|
||||
|
||||
rtp_header_.header.payloadType = payload_type;
|
||||
rtp_header_.frameType = frame_type;
|
||||
if (frame_type == kAudioFrameSpeech)
|
||||
rtp_header_.type.Audio.isCNG = false;
|
||||
else
|
||||
rtp_header_.type.Audio.isCNG = true;
|
||||
rtp_header_.header.timestamp = timestamp;
|
||||
|
||||
int ret_val = receiver_->InsertPacket(rtp_header_, payload_data,
|
||||
payload_len_bytes);
|
||||
if (ret_val < 0) {
|
||||
assert(false);
|
||||
return -1;
|
||||
}
|
||||
rtp_header_.header.sequenceNumber++;
|
||||
packet_sent_ = true;
|
||||
last_frame_type_ = frame_type;
|
||||
return 0;
|
||||
}
|
||||
|
||||
scoped_ptr<AcmReceiver> receiver_;
|
||||
CodecInst codecs_[ACMCodecDB::kMaxNumCodecs];
|
||||
scoped_ptr<AudioCodingModule> acm_;
|
||||
WebRtcRTPHeader rtp_header_;
|
||||
uint32_t timestamp_;
|
||||
bool packet_sent_; // Set when SendData is called reset when inserting audio.
|
||||
uint32_t last_packet_send_timestamp_;
|
||||
FrameType last_frame_type_;
|
||||
};
|
||||
|
||||
TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(AddCodecGetCodec)) {
|
||||
// Add codec.
|
||||
for (int n = 0; n < ACMCodecDB::kNumCodecs; ++n) {
|
||||
if (n & 0x1) // Just add codecs with odd index.
|
||||
EXPECT_EQ(0, receiver_->AddCodec(n, codecs_[n].pltype,
|
||||
codecs_[n].channels, NULL));
|
||||
}
|
||||
// Get codec and compare.
|
||||
for (int n = 0; n < ACMCodecDB::kNumCodecs; ++n) {
|
||||
CodecInst my_codec;
|
||||
if (n & 0x1) {
|
||||
// Codecs with odd index should match the reference.
|
||||
EXPECT_EQ(0, receiver_->DecoderByPayloadType(codecs_[n].pltype,
|
||||
&my_codec));
|
||||
EXPECT_TRUE(CodecsEqual(codecs_[n], my_codec));
|
||||
} else {
|
||||
// Codecs with even index are not registered.
|
||||
EXPECT_EQ(-1, receiver_->DecoderByPayloadType(codecs_[n].pltype,
|
||||
&my_codec));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(AddCodecChangePayloadType)) {
|
||||
CodecInst ref_codec;
|
||||
const int codec_id = ACMCodecDB::kPCMA;
|
||||
EXPECT_EQ(0, ACMCodecDB::Codec(codec_id, &ref_codec));
|
||||
const int payload_type = ref_codec.pltype;
|
||||
EXPECT_EQ(0, receiver_->AddCodec(codec_id, ref_codec.pltype,
|
||||
ref_codec.channels, NULL));
|
||||
CodecInst test_codec;
|
||||
EXPECT_EQ(0, receiver_->DecoderByPayloadType(payload_type, &test_codec));
|
||||
EXPECT_EQ(true, CodecsEqual(ref_codec, test_codec));
|
||||
|
||||
// Re-register the same codec with different payload.
|
||||
ref_codec.pltype = payload_type + 1;
|
||||
EXPECT_EQ(0, receiver_->AddCodec(codec_id, ref_codec.pltype,
|
||||
ref_codec.channels, NULL));
|
||||
|
||||
// Payload type |payload_type| should not exist.
|
||||
EXPECT_EQ(-1, receiver_->DecoderByPayloadType(payload_type, &test_codec));
|
||||
|
||||
// Payload type |payload_type + 1| should exist.
|
||||
EXPECT_EQ(0, receiver_->DecoderByPayloadType(payload_type + 1, &test_codec));
|
||||
EXPECT_TRUE(CodecsEqual(test_codec, ref_codec));
|
||||
}
|
||||
|
||||
TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(AddCodecRemoveCodec)) {
|
||||
CodecInst codec;
|
||||
const int codec_id = ACMCodecDB::kPCMA;
|
||||
EXPECT_EQ(0, ACMCodecDB::Codec(codec_id, &codec));
|
||||
const int payload_type = codec.pltype;
|
||||
EXPECT_EQ(0, receiver_->AddCodec(codec_id, codec.pltype,
|
||||
codec.channels, NULL));
|
||||
|
||||
// Remove non-existing codec should not fail. ACM1 legacy.
|
||||
EXPECT_EQ(0, receiver_->RemoveCodec(payload_type + 1));
|
||||
|
||||
// Remove an existing codec.
|
||||
EXPECT_EQ(0, receiver_->RemoveCodec(payload_type));
|
||||
|
||||
// Ask for the removed codec, must fail.
|
||||
EXPECT_EQ(-1, receiver_->DecoderByPayloadType(payload_type, &codec));
|
||||
}
|
||||
|
||||
TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(SampleRate)) {
|
||||
const int kCodecId[] = {
|
||||
ACMCodecDB::kISAC, ACMCodecDB::kISACSWB, ACMCodecDB::kISACFB,
|
||||
-1 // Terminator.
|
||||
};
|
||||
AddSetOfCodecs(kCodecId);
|
||||
|
||||
AudioFrame frame;
|
||||
const int kOutSampleRateHz = 8000; // Different than codec sample rate.
|
||||
int n = 0;
|
||||
while (kCodecId[n] >= 0) {
|
||||
const int num_10ms_frames = codecs_[kCodecId[n]].pacsize /
|
||||
(codecs_[kCodecId[n]].plfreq / 100);
|
||||
InsertOnePacketOfSilence(kCodecId[n]);
|
||||
for (int k = 0; k < num_10ms_frames; ++k) {
|
||||
EXPECT_EQ(0, receiver_->GetAudio(kOutSampleRateHz, &frame));
|
||||
}
|
||||
EXPECT_EQ(std::min(32000, codecs_[kCodecId[n]].plfreq),
|
||||
receiver_->current_sample_rate_hz());
|
||||
++n;
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that the playout mode is set correctly.
|
||||
TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(PlayoutMode)) {
|
||||
receiver_->SetPlayoutMode(voice);
|
||||
EXPECT_EQ(voice, receiver_->PlayoutMode());
|
||||
|
||||
receiver_->SetPlayoutMode(streaming);
|
||||
EXPECT_EQ(streaming, receiver_->PlayoutMode());
|
||||
|
||||
receiver_->SetPlayoutMode(fax);
|
||||
EXPECT_EQ(fax, receiver_->PlayoutMode());
|
||||
|
||||
receiver_->SetPlayoutMode(off);
|
||||
EXPECT_EQ(off, receiver_->PlayoutMode());
|
||||
}
|
||||
|
||||
TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(PostdecodingVad)) {
|
||||
receiver_->EnableVad();
|
||||
EXPECT_TRUE(receiver_->vad_enabled());
|
||||
|
||||
const int id = ACMCodecDB::kPCM16Bwb;
|
||||
ASSERT_EQ(0, receiver_->AddCodec(id, codecs_[id].pltype, codecs_[id].channels,
|
||||
NULL));
|
||||
const int kNumPackets = 5;
|
||||
const int num_10ms_frames = codecs_[id].pacsize / (codecs_[id].plfreq / 100);
|
||||
AudioFrame frame;
|
||||
for (int n = 0; n < kNumPackets; ++n) {
|
||||
InsertOnePacketOfSilence(id);
|
||||
for (int k = 0; k < num_10ms_frames; ++k)
|
||||
ASSERT_EQ(0, receiver_->GetAudio(codecs_[id].plfreq, &frame));
|
||||
}
|
||||
EXPECT_EQ(AudioFrame::kVadPassive, frame.vad_activity_);
|
||||
|
||||
receiver_->DisableVad();
|
||||
EXPECT_FALSE(receiver_->vad_enabled());
|
||||
|
||||
for (int n = 0; n < kNumPackets; ++n) {
|
||||
InsertOnePacketOfSilence(id);
|
||||
for (int k = 0; k < num_10ms_frames; ++k)
|
||||
ASSERT_EQ(0, receiver_->GetAudio(codecs_[id].plfreq, &frame));
|
||||
}
|
||||
EXPECT_EQ(AudioFrame::kVadUnknown, frame.vad_activity_);
|
||||
}
|
||||
|
||||
TEST_F(AcmReceiverTest, DISABLED_ON_ANDROID(LastAudioCodec)) {
|
||||
const int kCodecId[] = {
|
||||
ACMCodecDB::kISAC, ACMCodecDB::kPCMA, ACMCodecDB::kISACSWB,
|
||||
ACMCodecDB::kPCM16Bswb32kHz, ACMCodecDB::kG722_1C_48,
|
||||
-1 // Terminator.
|
||||
};
|
||||
AddSetOfCodecs(kCodecId);
|
||||
|
||||
const int kCngId[] = { // Not including full-band.
|
||||
ACMCodecDB::kCNNB, ACMCodecDB::kCNWB, ACMCodecDB::kCNSWB,
|
||||
-1 // Terminator.
|
||||
};
|
||||
AddSetOfCodecs(kCngId);
|
||||
|
||||
// Register CNG at sender side.
|
||||
int n = 0;
|
||||
while (kCngId[n] > 0) {
|
||||
ASSERT_EQ(0, acm_->RegisterSendCodec(codecs_[kCngId[n]]));
|
||||
++n;
|
||||
}
|
||||
|
||||
CodecInst codec;
|
||||
// No audio payload is received.
|
||||
EXPECT_EQ(-1, receiver_->LastAudioCodec(&codec));
|
||||
|
||||
// Start with sending DTX.
|
||||
ASSERT_EQ(0, acm_->SetVAD(true, true, VADVeryAggr));
|
||||
packet_sent_ = false;
|
||||
InsertOnePacketOfSilence(kCodecId[0]); // Enough to test with one codec.
|
||||
ASSERT_TRUE(packet_sent_);
|
||||
EXPECT_EQ(kAudioFrameCN, last_frame_type_);
|
||||
|
||||
// Has received, only, DTX. Last Audio codec is undefined.
|
||||
EXPECT_EQ(-1, receiver_->LastAudioCodec(&codec));
|
||||
EXPECT_EQ(-1, receiver_->last_audio_codec_id());
|
||||
EXPECT_EQ(-1, receiver_->last_audio_payload_type());
|
||||
|
||||
n = 0;
|
||||
while (kCodecId[n] >= 0) { // Loop over codecs.
|
||||
// Set DTX off to send audio payload.
|
||||
acm_->SetVAD(false, false, VADAggr);
|
||||
packet_sent_ = false;
|
||||
InsertOnePacketOfSilence(kCodecId[n]);
|
||||
|
||||
// Sanity check if Actually an audio payload received, and it should be
|
||||
// of type "speech."
|
||||
ASSERT_TRUE(packet_sent_);
|
||||
ASSERT_EQ(kAudioFrameSpeech, last_frame_type_);
|
||||
EXPECT_EQ(kCodecId[n], receiver_->last_audio_codec_id());
|
||||
|
||||
// Set VAD on to send DTX. Then check if the "Last Audio codec" returns
|
||||
// the expected codec.
|
||||
acm_->SetVAD(true, true, VADAggr);
|
||||
|
||||
// Do as many encoding until a DTX is sent.
|
||||
while (last_frame_type_ != kAudioFrameCN) {
|
||||
packet_sent_ = false;
|
||||
InsertOnePacketOfSilence(kCodecId[n]);
|
||||
ASSERT_TRUE(packet_sent_);
|
||||
}
|
||||
EXPECT_EQ(kCodecId[n], receiver_->last_audio_codec_id());
|
||||
EXPECT_EQ(codecs_[kCodecId[n]].pltype,
|
||||
receiver_->last_audio_payload_type());
|
||||
EXPECT_EQ(0, receiver_->LastAudioCodec(&codec));
|
||||
EXPECT_TRUE(CodecsEqual(codecs_[kCodecId[n]], codec));
|
||||
++n;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
54
jni/webrtc/modules/audio_coding/main/acm2/acm_red.cc
Normal file
54
jni/webrtc/modules/audio_coding/main/acm2/acm_red.cc
Normal file
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_red.h"
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
ACMRED::ACMRED(int16_t codec_id) { codec_id_ = codec_id; }
|
||||
|
||||
ACMRED::~ACMRED() {}
|
||||
|
||||
int16_t ACMRED::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
// RED is never used as an encoder
|
||||
// RED has no instance
|
||||
return 0;
|
||||
}
|
||||
|
||||
int16_t ACMRED::InternalInitEncoder(WebRtcACMCodecParams* /* codec_params */) {
|
||||
// This codec does not need initialization,
|
||||
// RED has no instance
|
||||
return 0;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMRED::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMRED::InternalCreateEncoder() {
|
||||
// RED has no instance
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMRED::InternalDestructEncoderInst(void* /* ptr_inst */) {
|
||||
// RED has no instance
|
||||
}
|
||||
|
||||
void ACMRED::DestructEncoderSafe() {
|
||||
// RED has no instance
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
44
jni/webrtc/modules/audio_coding/main/acm2/acm_red.h
Normal file
44
jni/webrtc/modules/audio_coding/main/acm2/acm_red.h
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RED_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RED_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMRED : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMRED(int16_t codec_id);
|
||||
~ACMRED();
|
||||
|
||||
// For FEC.
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream, int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RED_H_
|
||||
68
jni/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
Normal file
68
jni/webrtc/modules/audio_coding/main/acm2/acm_resampler.cc
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "webrtc/common_audio/resampler/include/resampler.h"
|
||||
#include "webrtc/system_wrappers/interface/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace acm2 {
|
||||
|
||||
ACMResampler::ACMResampler() {
|
||||
}
|
||||
|
||||
ACMResampler::~ACMResampler() {
|
||||
}
|
||||
|
||||
int ACMResampler::Resample10Msec(const int16_t* in_audio,
|
||||
int in_freq_hz,
|
||||
int out_freq_hz,
|
||||
int num_audio_channels,
|
||||
int out_capacity_samples,
|
||||
int16_t* out_audio) {
|
||||
int in_length = in_freq_hz * num_audio_channels / 100;
|
||||
int out_length = out_freq_hz * num_audio_channels / 100;
|
||||
if (in_freq_hz == out_freq_hz) {
|
||||
if (out_capacity_samples < in_length) {
|
||||
assert(false);
|
||||
return -1;
|
||||
}
|
||||
memcpy(out_audio, in_audio, in_length * sizeof(int16_t));
|
||||
return in_length / num_audio_channels;
|
||||
}
|
||||
|
||||
if (resampler_.InitializeIfNeeded(in_freq_hz, out_freq_hz,
|
||||
num_audio_channels) != 0) {
|
||||
LOG_FERR3(LS_ERROR, InitializeIfNeeded, in_freq_hz, out_freq_hz,
|
||||
num_audio_channels);
|
||||
return -1;
|
||||
}
|
||||
|
||||
out_length =
|
||||
resampler_.Resample(in_audio, in_length, out_audio, out_capacity_samples);
|
||||
if (out_length == -1) {
|
||||
LOG_FERR4(LS_ERROR,
|
||||
Resample,
|
||||
in_audio,
|
||||
in_length,
|
||||
out_audio,
|
||||
out_capacity_samples);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return out_length / num_audio_channels;
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
} // namespace webrtc
|
||||
39
jni/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
Normal file
39
jni/webrtc/modules/audio_coding/main/acm2/acm_resampler.h
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RESAMPLER_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RESAMPLER_H_
|
||||
|
||||
#include "webrtc/common_audio/resampler/include/push_resampler.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace acm2 {
|
||||
|
||||
class ACMResampler {
|
||||
public:
|
||||
ACMResampler();
|
||||
~ACMResampler();
|
||||
|
||||
int Resample10Msec(const int16_t* in_audio,
|
||||
int in_freq_hz,
|
||||
int out_freq_hz,
|
||||
int num_audio_channels,
|
||||
int out_capacity_samples,
|
||||
int16_t* out_audio);
|
||||
|
||||
private:
|
||||
PushResampler<int16_t> resampler_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_RESAMPLER_H_
|
||||
139
jni/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
Normal file
139
jni/webrtc/modules/audio_coding/main/acm2/acm_send_test.cc
Normal file
@@ -0,0 +1,139 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_send_test.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "webrtc/base/checks.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace test {
|
||||
|
||||
AcmSendTest::AcmSendTest(InputAudioFile* audio_source,
|
||||
int source_rate_hz,
|
||||
int test_duration_ms)
|
||||
: clock_(0),
|
||||
acm_(webrtc::AudioCodingModule::Create(0, &clock_)),
|
||||
audio_source_(audio_source),
|
||||
source_rate_hz_(source_rate_hz),
|
||||
input_block_size_samples_(source_rate_hz_ * kBlockSizeMs / 1000),
|
||||
codec_registered_(false),
|
||||
test_duration_ms_(test_duration_ms),
|
||||
frame_type_(kAudioFrameSpeech),
|
||||
payload_type_(0),
|
||||
timestamp_(0),
|
||||
sequence_number_(0) {
|
||||
input_frame_.sample_rate_hz_ = source_rate_hz_;
|
||||
input_frame_.num_channels_ = 1;
|
||||
input_frame_.samples_per_channel_ = input_block_size_samples_;
|
||||
assert(input_block_size_samples_ * input_frame_.num_channels_ <=
|
||||
AudioFrame::kMaxDataSizeSamples);
|
||||
acm_->RegisterTransportCallback(this);
|
||||
}
|
||||
|
||||
bool AcmSendTest::RegisterCodec(const char* payload_name,
|
||||
int sampling_freq_hz,
|
||||
int channels,
|
||||
int payload_type,
|
||||
int frame_size_samples) {
|
||||
FATAL_ERROR_IF(AudioCodingModule::Codec(
|
||||
payload_name, &codec_, sampling_freq_hz, channels) != 0);
|
||||
codec_.pltype = payload_type;
|
||||
codec_.pacsize = frame_size_samples;
|
||||
codec_registered_ = (acm_->RegisterSendCodec(codec_) == 0);
|
||||
assert(channels == 1); // TODO(henrik.lundin) Add multi-channel support.
|
||||
input_frame_.num_channels_ = channels;
|
||||
assert(input_block_size_samples_ * input_frame_.num_channels_ <=
|
||||
AudioFrame::kMaxDataSizeSamples);
|
||||
return codec_registered_;
|
||||
}
|
||||
|
||||
Packet* AcmSendTest::NextPacket() {
|
||||
assert(codec_registered_);
|
||||
if (filter_.test(payload_type_)) {
|
||||
// This payload type should be filtered out. Since the payload type is the
|
||||
// same throughout the whole test run, no packet at all will be delivered.
|
||||
// We can just as well signal that the test is over by returning NULL.
|
||||
return NULL;
|
||||
}
|
||||
// Insert audio and process until one packet is produced.
|
||||
while (clock_.TimeInMilliseconds() < test_duration_ms_) {
|
||||
clock_.AdvanceTimeMilliseconds(kBlockSizeMs);
|
||||
FATAL_ERROR_IF(
|
||||
!audio_source_->Read(input_block_size_samples_, input_frame_.data_));
|
||||
FATAL_ERROR_IF(acm_->Add10MsData(input_frame_) != 0);
|
||||
input_frame_.timestamp_ += input_block_size_samples_;
|
||||
int32_t encoded_bytes = acm_->Process();
|
||||
if (encoded_bytes > 0) {
|
||||
// Encoded packet received.
|
||||
return CreatePacket();
|
||||
}
|
||||
}
|
||||
// Test ended.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// This method receives the callback from ACM when a new packet is produced.
|
||||
int32_t AcmSendTest::SendData(FrameType frame_type,
|
||||
uint8_t payload_type,
|
||||
uint32_t timestamp,
|
||||
const uint8_t* payload_data,
|
||||
uint16_t payload_len_bytes,
|
||||
const RTPFragmentationHeader* fragmentation) {
|
||||
// Store the packet locally.
|
||||
frame_type_ = frame_type;
|
||||
payload_type_ = payload_type;
|
||||
timestamp_ = timestamp;
|
||||
last_payload_vec_.assign(payload_data, payload_data + payload_len_bytes);
|
||||
assert(last_payload_vec_.size() == payload_len_bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
Packet* AcmSendTest::CreatePacket() {
|
||||
const size_t kRtpHeaderSize = 12;
|
||||
size_t allocated_bytes = last_payload_vec_.size() + kRtpHeaderSize;
|
||||
uint8_t* packet_memory = new uint8_t[allocated_bytes];
|
||||
// Populate the header bytes.
|
||||
packet_memory[0] = 0x80;
|
||||
packet_memory[1] = payload_type_;
|
||||
packet_memory[2] = (sequence_number_ >> 8) & 0xFF;
|
||||
packet_memory[3] = (sequence_number_) & 0xFF;
|
||||
packet_memory[4] = (timestamp_ >> 24) & 0xFF;
|
||||
packet_memory[5] = (timestamp_ >> 16) & 0xFF;
|
||||
packet_memory[6] = (timestamp_ >> 8) & 0xFF;
|
||||
packet_memory[7] = timestamp_ & 0xFF;
|
||||
// Set SSRC to 0x12345678.
|
||||
packet_memory[8] = 0x12;
|
||||
packet_memory[9] = 0x34;
|
||||
packet_memory[10] = 0x56;
|
||||
packet_memory[11] = 0x78;
|
||||
|
||||
++sequence_number_;
|
||||
|
||||
// Copy the payload data.
|
||||
memcpy(packet_memory + kRtpHeaderSize,
|
||||
&last_payload_vec_[0],
|
||||
last_payload_vec_.size());
|
||||
Packet* packet =
|
||||
new Packet(packet_memory, allocated_bytes, clock_.TimeInMilliseconds());
|
||||
assert(packet);
|
||||
assert(packet->valid_header());
|
||||
return packet;
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
85
jni/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
Normal file
85
jni/webrtc/modules/audio_coding/main/acm2/acm_send_test.h
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SEND_TEST_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SEND_TEST_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "webrtc/base/constructormagic.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/packet_source.h"
|
||||
#include "webrtc/system_wrappers/interface/clock.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace test {
|
||||
class InputAudioFile;
|
||||
class Packet;
|
||||
|
||||
class AcmSendTest : public AudioPacketizationCallback, public PacketSource {
|
||||
public:
|
||||
AcmSendTest(InputAudioFile* audio_source,
|
||||
int source_rate_hz,
|
||||
int test_duration_ms);
|
||||
virtual ~AcmSendTest() {}
|
||||
|
||||
// Registers the send codec. Returns true on success, false otherwise.
|
||||
bool RegisterCodec(const char* payload_name,
|
||||
int sampling_freq_hz,
|
||||
int channels,
|
||||
int payload_type,
|
||||
int frame_size_samples);
|
||||
|
||||
// Returns the next encoded packet. Returns NULL if the test duration was
|
||||
// exceeded. Ownership of the packet is handed over to the caller.
|
||||
// Inherited from PacketSource.
|
||||
Packet* NextPacket();
|
||||
|
||||
// Inherited from AudioPacketizationCallback.
|
||||
virtual int32_t SendData(
|
||||
FrameType frame_type,
|
||||
uint8_t payload_type,
|
||||
uint32_t timestamp,
|
||||
const uint8_t* payload_data,
|
||||
uint16_t payload_len_bytes,
|
||||
const RTPFragmentationHeader* fragmentation) OVERRIDE;
|
||||
|
||||
private:
|
||||
static const int kBlockSizeMs = 10;
|
||||
|
||||
// Creates a Packet object from the last packet produced by ACM (and received
|
||||
// through the SendData method as a callback). Ownership of the new Packet
|
||||
// object is transferred to the caller.
|
||||
Packet* CreatePacket();
|
||||
|
||||
SimulatedClock clock_;
|
||||
scoped_ptr<AudioCodingModule> acm_;
|
||||
InputAudioFile* audio_source_;
|
||||
int source_rate_hz_;
|
||||
const int input_block_size_samples_;
|
||||
AudioFrame input_frame_;
|
||||
CodecInst codec_;
|
||||
bool codec_registered_;
|
||||
int test_duration_ms_;
|
||||
// The following member variables are set whenever SendData() is called.
|
||||
FrameType frame_type_;
|
||||
int payload_type_;
|
||||
uint32_t timestamp_;
|
||||
uint16_t sequence_number_;
|
||||
std::vector<uint8_t> last_payload_vec_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(AcmSendTest);
|
||||
};
|
||||
|
||||
} // namespace test
|
||||
} // namespace webrtc
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SEND_TEST_H_
|
||||
333
jni/webrtc/modules/audio_coding/main/acm2/acm_speex.cc
Normal file
333
jni/webrtc/modules/audio_coding/main/acm2/acm_speex.cc
Normal file
@@ -0,0 +1,333 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_speex.h"
|
||||
|
||||
#ifdef WEBRTC_CODEC_SPEEX
|
||||
// NOTE! Speex is not included in the open-source package. Modify this file or
|
||||
// your codec API to match the function calls and names of used Speex API file.
|
||||
#include "webrtc/modules/audio_coding/main/codecs/speex/interface/speex_interface.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
#endif
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
#ifndef WEBRTC_CODEC_SPEEX
|
||||
ACMSPEEX::ACMSPEEX(int16_t /* codec_id */)
|
||||
: encoder_inst_ptr_(NULL),
|
||||
compl_mode_(0),
|
||||
vbr_enabled_(false),
|
||||
encoding_rate_(-1),
|
||||
sampling_frequency_(-1),
|
||||
samples_in_20ms_audio_(0xFFFF) {
|
||||
return;
|
||||
}
|
||||
|
||||
ACMSPEEX::~ACMSPEEX() { return; }
|
||||
|
||||
int16_t ACMSPEEX::InternalEncode(uint8_t* /* bitstream */,
|
||||
int16_t* /* bitstream_len_byte */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t ACMSPEEX::EnableDTX() { return -1; }
|
||||
|
||||
int16_t ACMSPEEX::DisableDTX() { return -1; }
|
||||
|
||||
int16_t ACMSPEEX::InternalInitEncoder(
|
||||
WebRtcACMCodecParams* /* codec_params */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMSPEEX::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMSPEEX::InternalCreateEncoder() { return -1; }
|
||||
|
||||
void ACMSPEEX::DestructEncoderSafe() { return; }
|
||||
|
||||
int16_t ACMSPEEX::SetBitRateSafe(const int32_t /* rate */) { return -1; }
|
||||
|
||||
void ACMSPEEX::InternalDestructEncoderInst(void* /* ptr_inst */) { return; }
|
||||
|
||||
#ifdef UNUSEDSPEEX
|
||||
int16_t ACMSPEEX::EnableVBR() { return -1; }
|
||||
|
||||
int16_t ACMSPEEX::DisableVBR() { return -1; }
|
||||
|
||||
int16_t ACMSPEEX::SetComplMode(int16_t mode) { return -1; }
|
||||
#endif
|
||||
|
||||
#else //===================== Actual Implementation =======================
|
||||
|
||||
ACMSPEEX::ACMSPEEX(int16_t codec_id) : encoder_inst_ptr_(NULL) {
|
||||
codec_id_ = codec_id;
|
||||
|
||||
// Set sampling frequency, frame size and rate Speex
|
||||
if (codec_id_ == ACMCodecDB::kSPEEX8) {
|
||||
sampling_frequency_ = 8000;
|
||||
samples_in_20ms_audio_ = 160;
|
||||
encoding_rate_ = 11000;
|
||||
} else if (codec_id_ == ACMCodecDB::kSPEEX16) {
|
||||
sampling_frequency_ = 16000;
|
||||
samples_in_20ms_audio_ = 320;
|
||||
encoding_rate_ = 22000;
|
||||
} else {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Wrong codec id for Speex.");
|
||||
|
||||
sampling_frequency_ = -1;
|
||||
samples_in_20ms_audio_ = -1;
|
||||
encoding_rate_ = -1;
|
||||
}
|
||||
|
||||
has_internal_dtx_ = true;
|
||||
dtx_enabled_ = false;
|
||||
vbr_enabled_ = false;
|
||||
compl_mode_ = 3; // default complexity value
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
ACMSPEEX::~ACMSPEEX() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcSpeex_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
int16_t ACMSPEEX::InternalEncode(uint8_t* bitstream,
|
||||
int16_t* bitstream_len_byte) {
|
||||
int16_t status;
|
||||
int16_t num_encoded_samples = 0;
|
||||
int16_t n = 0;
|
||||
|
||||
while (num_encoded_samples < frame_len_smpl_) {
|
||||
status = WebRtcSpeex_Encode(
|
||||
encoder_inst_ptr_, &in_audio_[in_audio_ix_read_], encoding_rate_);
|
||||
|
||||
// increment the read index this tell the caller that how far
|
||||
// we have gone forward in reading the audio buffer
|
||||
in_audio_ix_read_ += samples_in_20ms_audio_;
|
||||
num_encoded_samples += samples_in_20ms_audio_;
|
||||
|
||||
if (status < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Error in Speex encoder");
|
||||
return status;
|
||||
}
|
||||
|
||||
// Update VAD, if internal DTX is used
|
||||
if (has_internal_dtx_ && dtx_enabled_) {
|
||||
vad_label_[n++] = status;
|
||||
vad_label_[n++] = status;
|
||||
}
|
||||
|
||||
if (status == 0) {
|
||||
// This frame is detected as inactive. We need send whatever
|
||||
// encoded so far.
|
||||
*bitstream_len_byte = WebRtcSpeex_GetBitstream(
|
||||
encoder_inst_ptr_, reinterpret_cast<int16_t*>(bitstream));
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
}
|
||||
|
||||
*bitstream_len_byte = WebRtcSpeex_GetBitstream(
|
||||
encoder_inst_ptr_, reinterpret_cast<int16_t*>(bitstream));
|
||||
return *bitstream_len_byte;
|
||||
}
|
||||
|
||||
int16_t ACMSPEEX::EnableDTX() {
|
||||
if (dtx_enabled_) {
|
||||
return 0;
|
||||
} else if (encoder_exist_) { // check if encoder exist
|
||||
// enable DTX
|
||||
if (WebRtcSpeex_EncoderInit(encoder_inst_ptr_, vbr_enabled_ ? 1 : 0,
|
||||
compl_mode_, 1) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Cannot enable DTX for Speex");
|
||||
return -1;
|
||||
}
|
||||
dtx_enabled_ = true;
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int16_t ACMSPEEX::DisableDTX() {
|
||||
if (!dtx_enabled_) {
|
||||
return 0;
|
||||
} else if (encoder_exist_) { // check if encoder exist
|
||||
// disable DTX
|
||||
if (WebRtcSpeex_EncoderInit(encoder_inst_ptr_, (vbr_enabled_ ? 1 : 0),
|
||||
compl_mode_, 0) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Cannot disable DTX for Speex");
|
||||
return -1;
|
||||
}
|
||||
dtx_enabled_ = false;
|
||||
return 0;
|
||||
} else {
|
||||
// encoder doesn't exists, therefore disabling is harmless
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int16_t ACMSPEEX::InternalInitEncoder(WebRtcACMCodecParams* codec_params) {
|
||||
// sanity check
|
||||
if (encoder_inst_ptr_ == NULL) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError,
|
||||
webrtc::kTraceAudioCoding,
|
||||
unique_id_,
|
||||
"Cannot initialize Speex encoder, instance does not exist");
|
||||
return -1;
|
||||
}
|
||||
|
||||
int16_t status = SetBitRateSafe((codec_params->codecInstant).rate);
|
||||
status += (WebRtcSpeex_EncoderInit(encoder_inst_ptr_,
|
||||
vbr_enabled_,
|
||||
compl_mode_,
|
||||
((codec_params->enable_dtx) ? 1 : 0)) < 0)
|
||||
? -1
|
||||
: 0;
|
||||
|
||||
if (status >= 0) {
|
||||
return 0;
|
||||
} else {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Error in initialization of Speex encoder");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
ACMGenericCodec* ACMSPEEX::CreateInstance(void) { return NULL; }
|
||||
|
||||
int16_t ACMSPEEX::InternalCreateEncoder() {
|
||||
return WebRtcSpeex_CreateEnc(&encoder_inst_ptr_, sampling_frequency_);
|
||||
}
|
||||
|
||||
void ACMSPEEX::DestructEncoderSafe() {
|
||||
if (encoder_inst_ptr_ != NULL) {
|
||||
WebRtcSpeex_FreeEnc(encoder_inst_ptr_);
|
||||
encoder_inst_ptr_ = NULL;
|
||||
}
|
||||
// there is no encoder set the following
|
||||
encoder_exist_ = false;
|
||||
encoder_initialized_ = false;
|
||||
encoding_rate_ = 0;
|
||||
}
|
||||
|
||||
int16_t ACMSPEEX::SetBitRateSafe(const int32_t rate) {
|
||||
// Check if changed rate
|
||||
if (rate == encoding_rate_) {
|
||||
return 0;
|
||||
} else if (rate > 2000) {
|
||||
encoding_rate_ = rate;
|
||||
encoder_params_.codecInstant.rate = rate;
|
||||
} else {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Unsupported encoding rate for Speex");
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ACMSPEEX::InternalDestructEncoderInst(void* ptr_inst) {
|
||||
if (ptr_inst != NULL) {
|
||||
WebRtcSpeex_FreeEnc(static_cast<SPEEX_encinst_t_*>(ptr_inst));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef UNUSEDSPEEX
|
||||
|
||||
// This API is currently not in use. If requested to be able to enable/disable
|
||||
// VBR an ACM API need to be added.
|
||||
int16_t ACMSPEEX::EnableVBR() {
|
||||
if (vbr_enabled_) {
|
||||
return 0;
|
||||
} else if (encoder_exist_) { // check if encoder exist
|
||||
// enable Variable Bit Rate (VBR)
|
||||
if (WebRtcSpeex_EncoderInit(encoder_inst_ptr_, 1, compl_mode_,
|
||||
(dtx_enabled_ ? 1 : 0)) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Cannot enable VBR mode for Speex");
|
||||
|
||||
return -1;
|
||||
}
|
||||
vbr_enabled_ = true;
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// This API is currently not in use. If requested to be able to enable/disable
|
||||
// VBR an ACM API need to be added.
|
||||
int16_t ACMSPEEX::DisableVBR() {
|
||||
if (!vbr_enabled_) {
|
||||
return 0;
|
||||
} else if (encoder_exist_) { // check if encoder exist
|
||||
// disable DTX
|
||||
if (WebRtcSpeex_EncoderInit(encoder_inst_ptr_, 0, compl_mode_,
|
||||
(dtx_enabled_ ? 1 : 0)) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Cannot disable DTX for Speex");
|
||||
|
||||
return -1;
|
||||
}
|
||||
vbr_enabled_ = false;
|
||||
return 0;
|
||||
} else {
|
||||
// encoder doesn't exists, therefore disabling is harmless
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// This API is currently not in use. If requested to be able to set complexity
|
||||
// an ACM API need to be added.
|
||||
int16_t ACMSPEEX::SetComplMode(int16_t mode) {
|
||||
// Check if new mode
|
||||
if (mode == compl_mode_) {
|
||||
return 0;
|
||||
} else if (encoder_exist_) { // check if encoder exist
|
||||
// Set new mode
|
||||
if (WebRtcSpeex_EncoderInit(encoder_inst_ptr_, 0, mode,
|
||||
(dtx_enabled_ ? 1 : 0)) < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, unique_id_,
|
||||
"Error in complexity mode for Speex");
|
||||
return -1;
|
||||
}
|
||||
compl_mode_ = mode;
|
||||
return 0;
|
||||
} else {
|
||||
// encoder doesn't exists, therefore disabling is harmless
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
69
jni/webrtc/modules/audio_coding/main/acm2/acm_speex.h
Normal file
69
jni/webrtc/modules/audio_coding/main/acm2/acm_speex.h
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SPEEX_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SPEEX_H_
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_generic_codec.h"
|
||||
|
||||
// forward declaration
|
||||
struct SPEEX_encinst_t_;
|
||||
struct SPEEX_decinst_t_;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMSPEEX : public ACMGenericCodec {
|
||||
public:
|
||||
explicit ACMSPEEX(int16_t codec_id);
|
||||
~ACMSPEEX();
|
||||
|
||||
// For FEC.
|
||||
ACMGenericCodec* CreateInstance(void);
|
||||
|
||||
int16_t InternalEncode(uint8_t* bitstream, int16_t* bitstream_len_byte);
|
||||
|
||||
int16_t InternalInitEncoder(WebRtcACMCodecParams* codec_params);
|
||||
|
||||
protected:
|
||||
void DestructEncoderSafe();
|
||||
|
||||
int16_t InternalCreateEncoder();
|
||||
|
||||
void InternalDestructEncoderInst(void* ptr_inst);
|
||||
|
||||
int16_t SetBitRateSafe(const int32_t rate);
|
||||
|
||||
int16_t EnableDTX();
|
||||
|
||||
int16_t DisableDTX();
|
||||
|
||||
#ifdef UNUSEDSPEEX
|
||||
int16_t EnableVBR();
|
||||
|
||||
int16_t DisableVBR();
|
||||
|
||||
int16_t SetComplMode(int16_t mode);
|
||||
#endif
|
||||
|
||||
SPEEX_encinst_t_* encoder_inst_ptr_;
|
||||
int16_t compl_mode_;
|
||||
bool vbr_enabled_;
|
||||
int32_t encoding_rate_;
|
||||
int16_t sampling_frequency_;
|
||||
uint16_t samples_in_20ms_audio_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_ACM_SPEEX_H_
|
||||
@@ -0,0 +1,97 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
|
||||
|
||||
#include "webrtc/common_types.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.h"
|
||||
#include "webrtc/system_wrappers/interface/clock.h"
|
||||
#include "webrtc/system_wrappers/interface/trace.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Create module
|
||||
AudioCodingModule* AudioCodingModule::Create(int id) {
|
||||
return Create(id, Clock::GetRealTimeClock());
|
||||
}
|
||||
|
||||
AudioCodingModule* AudioCodingModule::Create(int id, Clock* clock) {
|
||||
AudioCodingModule::Config config;
|
||||
config.id = id;
|
||||
config.clock = clock;
|
||||
return new acm2::AudioCodingModuleImpl(config);
|
||||
}
|
||||
|
||||
// Get number of supported codecs
|
||||
int AudioCodingModule::NumberOfCodecs() {
|
||||
return acm2::ACMCodecDB::kNumCodecs;
|
||||
}
|
||||
|
||||
// Get supported codec parameters with id
|
||||
int AudioCodingModule::Codec(int list_id, CodecInst* codec) {
|
||||
// Get the codec settings for the codec with the given list ID
|
||||
return acm2::ACMCodecDB::Codec(list_id, codec);
|
||||
}
|
||||
|
||||
// Get supported codec parameters with name, frequency and number of channels.
|
||||
int AudioCodingModule::Codec(const char* payload_name,
|
||||
CodecInst* codec,
|
||||
int sampling_freq_hz,
|
||||
int channels) {
|
||||
int codec_id;
|
||||
|
||||
// Get the id of the codec from the database.
|
||||
codec_id = acm2::ACMCodecDB::CodecId(
|
||||
payload_name, sampling_freq_hz, channels);
|
||||
if (codec_id < 0) {
|
||||
// We couldn't find a matching codec, set the parameters to unacceptable
|
||||
// values and return.
|
||||
codec->plname[0] = '\0';
|
||||
codec->pltype = -1;
|
||||
codec->pacsize = 0;
|
||||
codec->rate = 0;
|
||||
codec->plfreq = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Get default codec settings.
|
||||
acm2::ACMCodecDB::Codec(codec_id, codec);
|
||||
|
||||
// Keep the number of channels from the function call. For most codecs it
|
||||
// will be the same value as in default codec settings, but not for all.
|
||||
codec->channels = channels;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Get supported codec Index with name, frequency and number of channels.
|
||||
int AudioCodingModule::Codec(const char* payload_name,
|
||||
int sampling_freq_hz,
|
||||
int channels) {
|
||||
return acm2::ACMCodecDB::CodecId(payload_name, sampling_freq_hz, channels);
|
||||
}
|
||||
|
||||
// Checks the validity of the parameters of the given codec
|
||||
bool AudioCodingModule::IsCodecValid(const CodecInst& codec) {
|
||||
int mirror_id;
|
||||
|
||||
int codec_number = acm2::ACMCodecDB::CodecNumber(codec, &mirror_id);
|
||||
|
||||
if (codec_number < 0) {
|
||||
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1,
|
||||
"Invalid codec setting");
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
@@ -0,0 +1,183 @@
|
||||
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style license
|
||||
# that can be found in the LICENSE file in the root of the source
|
||||
# tree. An additional intellectual property rights grant can be found
|
||||
# in the file PATENTS. All contributing project authors may
|
||||
# be found in the AUTHORS file in the root of the source tree.
|
||||
|
||||
{
|
||||
'variables': {
|
||||
'audio_coding_dependencies': [
|
||||
'CNG',
|
||||
'G711',
|
||||
'G722',
|
||||
'iLBC',
|
||||
'iSAC',
|
||||
'iSACFix',
|
||||
'PCM16B',
|
||||
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio',
|
||||
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||
],
|
||||
'audio_coding_defines': [],
|
||||
'conditions': [
|
||||
['include_opus==1', {
|
||||
'audio_coding_dependencies': ['webrtc_opus',],
|
||||
'audio_coding_defines': ['WEBRTC_CODEC_OPUS',],
|
||||
}],
|
||||
],
|
||||
},
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'audio_coding_module',
|
||||
'type': 'static_library',
|
||||
'defines': [
|
||||
'<@(audio_coding_defines)',
|
||||
],
|
||||
'dependencies': [
|
||||
'<@(audio_coding_dependencies)',
|
||||
'neteq',
|
||||
],
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
'../../../interface',
|
||||
'<(webrtc_root)',
|
||||
],
|
||||
'direct_dependent_settings': {
|
||||
'include_dirs': [
|
||||
'../interface',
|
||||
'../../../interface',
|
||||
'<(webrtc_root)',
|
||||
],
|
||||
},
|
||||
'sources': [
|
||||
'../interface/audio_coding_module.h',
|
||||
'../interface/audio_coding_module_typedefs.h',
|
||||
'acm_amr.cc',
|
||||
'acm_amr.h',
|
||||
'acm_amrwb.cc',
|
||||
'acm_amrwb.h',
|
||||
'acm_celt.cc',
|
||||
'acm_celt.h',
|
||||
'acm_cng.cc',
|
||||
'acm_cng.h',
|
||||
'acm_codec_database.cc',
|
||||
'acm_codec_database.h',
|
||||
'acm_common_defs.h',
|
||||
'acm_dtmf_playout.cc',
|
||||
'acm_dtmf_playout.h',
|
||||
'acm_g722.cc',
|
||||
'acm_g722.h',
|
||||
'acm_g7221.cc',
|
||||
'acm_g7221.h',
|
||||
'acm_g7221c.cc',
|
||||
'acm_g7221c.h',
|
||||
'acm_g729.cc',
|
||||
'acm_g729.h',
|
||||
'acm_g7291.cc',
|
||||
'acm_g7291.h',
|
||||
'acm_generic_codec.cc',
|
||||
'acm_generic_codec.h',
|
||||
'acm_gsmfr.cc',
|
||||
'acm_gsmfr.h',
|
||||
'acm_ilbc.cc',
|
||||
'acm_ilbc.h',
|
||||
'acm_isac.cc',
|
||||
'acm_isac.h',
|
||||
'acm_isac_macros.h',
|
||||
'acm_opus.cc',
|
||||
'acm_opus.h',
|
||||
'acm_speex.cc',
|
||||
'acm_speex.h',
|
||||
'acm_pcm16b.cc',
|
||||
'acm_pcm16b.h',
|
||||
'acm_pcma.cc',
|
||||
'acm_pcma.h',
|
||||
'acm_pcmu.cc',
|
||||
'acm_pcmu.h',
|
||||
'acm_red.cc',
|
||||
'acm_red.h',
|
||||
'acm_receiver.cc',
|
||||
'acm_receiver.h',
|
||||
'acm_resampler.cc',
|
||||
'acm_resampler.h',
|
||||
'audio_coding_module.cc',
|
||||
'audio_coding_module_impl.cc',
|
||||
'audio_coding_module_impl.h',
|
||||
'call_statistics.cc',
|
||||
'call_statistics.h',
|
||||
'initial_delay_manager.cc',
|
||||
'initial_delay_manager.h',
|
||||
'nack.cc',
|
||||
'nack.h',
|
||||
],
|
||||
},
|
||||
],
|
||||
'conditions': [
|
||||
['include_tests==1', {
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'acm_receive_test',
|
||||
'type': 'static_library',
|
||||
'dependencies': [
|
||||
'audio_coding_module',
|
||||
'neteq_unittest_tools',
|
||||
'<(DEPTH)/testing/gtest.gyp:gtest',
|
||||
],
|
||||
'sources': [
|
||||
'acm_receive_test.cc',
|
||||
'acm_receive_test.h',
|
||||
],
|
||||
}, # acm_receive_test
|
||||
{
|
||||
'target_name': 'acm_send_test',
|
||||
'type': 'static_library',
|
||||
'dependencies': [
|
||||
'audio_coding_module',
|
||||
'neteq_unittest_tools',
|
||||
'<(DEPTH)/testing/gtest.gyp:gtest',
|
||||
],
|
||||
'sources': [
|
||||
'acm_send_test.cc',
|
||||
'acm_send_test.h',
|
||||
],
|
||||
}, # acm_send_test
|
||||
{
|
||||
'target_name': 'delay_test',
|
||||
'type': 'executable',
|
||||
'dependencies': [
|
||||
'audio_coding_module',
|
||||
'<(DEPTH)/testing/gtest.gyp:gtest',
|
||||
'<(webrtc_root)/test/test.gyp:test_support',
|
||||
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:field_trial_default',
|
||||
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
|
||||
],
|
||||
'sources': [
|
||||
'../test/delay_test.cc',
|
||||
'../test/Channel.cc',
|
||||
'../test/PCMFile.cc',
|
||||
'../test/utility.cc',
|
||||
],
|
||||
}, # delay_test
|
||||
{
|
||||
'target_name': 'insert_packet_with_timing',
|
||||
'type': 'executable',
|
||||
'dependencies': [
|
||||
'audio_coding_module',
|
||||
'<(DEPTH)/testing/gtest.gyp:gtest',
|
||||
'<(webrtc_root)/test/test.gyp:test_support',
|
||||
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
|
||||
'<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:field_trial_default',
|
||||
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
|
||||
],
|
||||
'sources': [
|
||||
'../test/insert_packet_with_timing.cc',
|
||||
'../test/Channel.cc',
|
||||
'../test/PCMFile.cc',
|
||||
],
|
||||
}, # delay_test
|
||||
],
|
||||
}],
|
||||
],
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,387 @@
|
||||
/*
|
||||
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_AUDIO_CODING_MODULE_IMPL_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_AUDIO_CODING_MODULE_IMPL_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "webrtc/common_types.h"
|
||||
#include "webrtc/engine_configurations.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_receiver.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/system_wrappers/interface/thread_annotations.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class CriticalSectionWrapper;
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class ACMDTMFDetection;
|
||||
class ACMGenericCodec;
|
||||
|
||||
class AudioCodingModuleImpl : public AudioCodingModule {
|
||||
public:
|
||||
explicit AudioCodingModuleImpl(const AudioCodingModule::Config& config);
|
||||
~AudioCodingModuleImpl();
|
||||
|
||||
// Change the unique identifier of this object.
|
||||
virtual int32_t ChangeUniqueId(const int32_t id);
|
||||
|
||||
// Returns the number of milliseconds until the module want a worker thread
|
||||
// to call Process.
|
||||
int32_t TimeUntilNextProcess();
|
||||
|
||||
// Process any pending tasks such as timeouts.
|
||||
int32_t Process();
|
||||
|
||||
/////////////////////////////////////////
|
||||
// Sender
|
||||
//
|
||||
|
||||
// Initialize send codec.
|
||||
int InitializeSender();
|
||||
|
||||
// Reset send codec.
|
||||
int ResetEncoder();
|
||||
|
||||
// Can be called multiple times for Codec, CNG, RED.
|
||||
int RegisterSendCodec(const CodecInst& send_codec);
|
||||
|
||||
// Register Secondary codec for dual-streaming. Dual-streaming is activated
|
||||
// right after the secondary codec is registered.
|
||||
int RegisterSecondarySendCodec(const CodecInst& send_codec);
|
||||
|
||||
// Unregister the secondary codec. Dual-streaming is deactivated right after
|
||||
// deregistering secondary codec.
|
||||
void UnregisterSecondarySendCodec();
|
||||
|
||||
// Get the secondary codec.
|
||||
int SecondarySendCodec(CodecInst* secondary_codec) const;
|
||||
|
||||
// Get current send codec.
|
||||
int SendCodec(CodecInst* current_codec) const;
|
||||
|
||||
// Get current send frequency.
|
||||
int SendFrequency() const;
|
||||
|
||||
// Get encode bit-rate.
|
||||
// Adaptive rate codecs return their current encode target rate, while other
|
||||
// codecs return there long-term average or their fixed rate.
|
||||
int SendBitrate() const;
|
||||
|
||||
// Set available bandwidth, inform the encoder about the
|
||||
// estimated bandwidth received from the remote party.
|
||||
virtual int SetReceivedEstimatedBandwidth(int bw);
|
||||
|
||||
// Register a transport callback which will be
|
||||
// called to deliver the encoded buffers.
|
||||
int RegisterTransportCallback(AudioPacketizationCallback* transport);
|
||||
|
||||
// Add 10 ms of raw (PCM) audio data to the encoder.
|
||||
int Add10MsData(const AudioFrame& audio_frame);
|
||||
|
||||
/////////////////////////////////////////
|
||||
// (RED) Redundant Coding
|
||||
//
|
||||
|
||||
// Configure RED status i.e. on/off.
|
||||
int SetREDStatus(bool enable_red);
|
||||
|
||||
// Get RED status.
|
||||
bool REDStatus() const;
|
||||
|
||||
/////////////////////////////////////////
|
||||
// (FEC) Forward Error Correction (codec internal)
|
||||
//
|
||||
|
||||
// Configure FEC status i.e. on/off.
|
||||
int SetCodecFEC(bool enabled_codec_fec);
|
||||
|
||||
// Get FEC status.
|
||||
bool CodecFEC() const;
|
||||
|
||||
// Set target packet loss rate
|
||||
int SetPacketLossRate(int loss_rate);
|
||||
|
||||
/////////////////////////////////////////
|
||||
// (VAD) Voice Activity Detection
|
||||
// and
|
||||
// (CNG) Comfort Noise Generation
|
||||
//
|
||||
|
||||
int SetVAD(bool enable_dtx = true,
|
||||
bool enable_vad = false,
|
||||
ACMVADMode mode = VADNormal);
|
||||
|
||||
int VAD(bool* dtx_enabled, bool* vad_enabled, ACMVADMode* mode) const;
|
||||
|
||||
int RegisterVADCallback(ACMVADCallback* vad_callback);
|
||||
|
||||
/////////////////////////////////////////
|
||||
// Receiver
|
||||
//
|
||||
|
||||
// Initialize receiver, resets codec database etc.
|
||||
int InitializeReceiver();
|
||||
|
||||
// Reset the decoder state.
|
||||
int ResetDecoder();
|
||||
|
||||
// Get current receive frequency.
|
||||
int ReceiveFrequency() const;
|
||||
|
||||
// Get current playout frequency.
|
||||
int PlayoutFrequency() const;
|
||||
|
||||
// Register possible receive codecs, can be called multiple times,
|
||||
// for codecs, CNG, DTMF, RED.
|
||||
int RegisterReceiveCodec(const CodecInst& receive_codec);
|
||||
|
||||
// Get current received codec.
|
||||
int ReceiveCodec(CodecInst* current_codec) const;
|
||||
|
||||
// Incoming packet from network parsed and ready for decode.
|
||||
int IncomingPacket(const uint8_t* incoming_payload,
|
||||
int payload_length,
|
||||
const WebRtcRTPHeader& rtp_info);
|
||||
|
||||
// Incoming payloads, without rtp-info, the rtp-info will be created in ACM.
|
||||
// One usage for this API is when pre-encoded files are pushed in ACM.
|
||||
int IncomingPayload(const uint8_t* incoming_payload,
|
||||
int payload_length,
|
||||
uint8_t payload_type,
|
||||
uint32_t timestamp);
|
||||
|
||||
// Minimum playout delay.
|
||||
int SetMinimumPlayoutDelay(int time_ms);
|
||||
|
||||
// Maximum playout delay.
|
||||
int SetMaximumPlayoutDelay(int time_ms);
|
||||
|
||||
// Smallest latency NetEq will maintain.
|
||||
int LeastRequiredDelayMs() const;
|
||||
|
||||
// Impose an initial delay on playout. ACM plays silence until |delay_ms|
|
||||
// audio is accumulated in NetEq buffer, then starts decoding payloads.
|
||||
int SetInitialPlayoutDelay(int delay_ms);
|
||||
|
||||
// TODO(turajs): DTMF playout is always activated in NetEq these APIs should
|
||||
// be removed, as well as all VoE related APIs and methods.
|
||||
//
|
||||
// Configure Dtmf playout status i.e on/off playout the incoming outband Dtmf
|
||||
// tone.
|
||||
int SetDtmfPlayoutStatus(bool enable) { return 0; }
|
||||
|
||||
// Get Dtmf playout status.
|
||||
bool DtmfPlayoutStatus() const { return true; }
|
||||
|
||||
// Estimate the Bandwidth based on the incoming stream, needed
|
||||
// for one way audio where the RTCP send the BW estimate.
|
||||
// This is also done in the RTP module .
|
||||
int DecoderEstimatedBandwidth() const;
|
||||
|
||||
// Set playout mode voice, fax.
|
||||
int SetPlayoutMode(AudioPlayoutMode mode);
|
||||
|
||||
// Get playout mode voice, fax.
|
||||
AudioPlayoutMode PlayoutMode() const;
|
||||
|
||||
// Get playout timestamp.
|
||||
int PlayoutTimestamp(uint32_t* timestamp);
|
||||
|
||||
// Get 10 milliseconds of raw audio data to play out, and
|
||||
// automatic resample to the requested frequency if > 0.
|
||||
int PlayoutData10Ms(int desired_freq_hz, AudioFrame* audio_frame);
|
||||
|
||||
/////////////////////////////////////////
|
||||
// Statistics
|
||||
//
|
||||
|
||||
int NetworkStatistics(ACMNetworkStatistics* statistics);
|
||||
|
||||
void DestructEncoderInst(void* inst);
|
||||
|
||||
// GET RED payload for iSAC. The method id called when 'this' ACM is
|
||||
// the default ACM.
|
||||
// TODO(henrik.lundin) Not used. Remove?
|
||||
int REDPayloadISAC(int isac_rate,
|
||||
int isac_bw_estimate,
|
||||
uint8_t* payload,
|
||||
int16_t* length_bytes);
|
||||
|
||||
int ReplaceInternalDTXWithWebRtc(bool use_webrtc_dtx);
|
||||
|
||||
int IsInternalDTXReplacedWithWebRtc(bool* uses_webrtc_dtx);
|
||||
|
||||
int SetISACMaxRate(int max_bit_per_sec);
|
||||
|
||||
int SetISACMaxPayloadSize(int max_size_bytes);
|
||||
|
||||
int ConfigISACBandwidthEstimator(int frame_size_ms,
|
||||
int rate_bit_per_sec,
|
||||
bool enforce_frame_size = false);
|
||||
|
||||
// If current send codec is Opus, informs it about the maximum audio
|
||||
// bandwidth needs to be encoded.
|
||||
int SetOpusMaxBandwidth(int bandwidth_hz);
|
||||
|
||||
int UnregisterReceiveCodec(uint8_t payload_type);
|
||||
|
||||
int EnableNack(size_t max_nack_list_size);
|
||||
|
||||
void DisableNack();
|
||||
|
||||
std::vector<uint16_t> GetNackList(int round_trip_time_ms) const;
|
||||
|
||||
void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
|
||||
|
||||
private:
|
||||
int UnregisterReceiveCodecSafe(int payload_type);
|
||||
|
||||
ACMGenericCodec* CreateCodec(const CodecInst& codec);
|
||||
|
||||
int InitializeReceiverSafe() EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
|
||||
|
||||
bool HaveValidEncoder(const char* caller_name) const
|
||||
EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
|
||||
|
||||
// Set VAD/DTX status. This function does not acquire a lock, and it is
|
||||
// created to be called only from inside a critical section.
|
||||
int SetVADSafe(bool enable_dtx, bool enable_vad, ACMVADMode mode)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
|
||||
|
||||
// Process buffered audio when dual-streaming is not enabled (When RED is
|
||||
// enabled still this function is used.)
|
||||
int ProcessSingleStream();
|
||||
|
||||
// Process buffered audio when dual-streaming is enabled, i.e. secondary send
|
||||
// codec is registered.
|
||||
int ProcessDualStream();
|
||||
|
||||
// Preprocessing of input audio, including resampling and down-mixing if
|
||||
// required, before pushing audio into encoder's buffer.
|
||||
//
|
||||
// in_frame: input audio-frame
|
||||
// ptr_out: pointer to output audio_frame. If no preprocessing is required
|
||||
// |ptr_out| will be pointing to |in_frame|, otherwise pointing to
|
||||
// |preprocess_frame_|.
|
||||
//
|
||||
// Return value:
|
||||
// -1: if encountering an error.
|
||||
// 0: otherwise.
|
||||
int PreprocessToAddData(const AudioFrame& in_frame,
|
||||
const AudioFrame** ptr_out)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
|
||||
|
||||
// Change required states after starting to receive the codec corresponding
|
||||
// to |index|.
|
||||
int UpdateUponReceivingCodec(int index);
|
||||
|
||||
int EncodeFragmentation(int fragmentation_index,
|
||||
int payload_type,
|
||||
uint32_t current_timestamp,
|
||||
ACMGenericCodec* encoder,
|
||||
uint8_t* stream)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
|
||||
|
||||
void ResetFragmentation(int vector_size)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
|
||||
|
||||
// Get a pointer to AudioDecoder of the given codec. For some codecs, e.g.
|
||||
// iSAC, encoding and decoding have to be performed on a shared
|
||||
// codec-instance. By calling this method, we get the codec-instance that ACM
|
||||
// owns, then pass that to NetEq. This way, we perform both encoding and
|
||||
// decoding on the same codec-instance. Furthermore, ACM would have control
|
||||
// over decoder functionality if required. If |codec| does not share an
|
||||
// instance between encoder and decoder, the |*decoder| is set NULL.
|
||||
// The field ACMCodecDB::CodecSettings.owns_decoder indicates that if a
|
||||
// codec owns the decoder-instance. For such codecs |*decoder| should be a
|
||||
// valid pointer, otherwise it will be NULL.
|
||||
int GetAudioDecoder(const CodecInst& codec, int codec_id,
|
||||
int mirror_id, AudioDecoder** decoder)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
|
||||
|
||||
CriticalSectionWrapper* acm_crit_sect_;
|
||||
int id_; // TODO(henrik.lundin) Make const.
|
||||
uint32_t expected_codec_ts_ GUARDED_BY(acm_crit_sect_);
|
||||
uint32_t expected_in_ts_ GUARDED_BY(acm_crit_sect_);
|
||||
CodecInst send_codec_inst_ GUARDED_BY(acm_crit_sect_);
|
||||
|
||||
uint8_t cng_nb_pltype_ GUARDED_BY(acm_crit_sect_);
|
||||
uint8_t cng_wb_pltype_ GUARDED_BY(acm_crit_sect_);
|
||||
uint8_t cng_swb_pltype_ GUARDED_BY(acm_crit_sect_);
|
||||
uint8_t cng_fb_pltype_ GUARDED_BY(acm_crit_sect_);
|
||||
|
||||
uint8_t red_pltype_ GUARDED_BY(acm_crit_sect_);
|
||||
bool vad_enabled_ GUARDED_BY(acm_crit_sect_);
|
||||
bool dtx_enabled_ GUARDED_BY(acm_crit_sect_);
|
||||
ACMVADMode vad_mode_ GUARDED_BY(acm_crit_sect_);
|
||||
ACMGenericCodec* codecs_[ACMCodecDB::kMaxNumCodecs]
|
||||
GUARDED_BY(acm_crit_sect_);
|
||||
int mirror_codec_idx_[ACMCodecDB::kMaxNumCodecs] GUARDED_BY(acm_crit_sect_);
|
||||
bool stereo_send_ GUARDED_BY(acm_crit_sect_);
|
||||
int current_send_codec_idx_ GUARDED_BY(acm_crit_sect_);
|
||||
bool send_codec_registered_ GUARDED_BY(acm_crit_sect_);
|
||||
ACMResampler resampler_ GUARDED_BY(acm_crit_sect_);
|
||||
AcmReceiver receiver_; // AcmReceiver has it's own internal lock.
|
||||
|
||||
// RED.
|
||||
bool is_first_red_ GUARDED_BY(acm_crit_sect_);
|
||||
bool red_enabled_ GUARDED_BY(acm_crit_sect_);
|
||||
|
||||
// TODO(turajs): |red_buffer_| is allocated in constructor, why having them
|
||||
// as pointers and not an array. If concerned about the memory, then make a
|
||||
// set-up function to allocate them only when they are going to be used, i.e.
|
||||
// RED or Dual-streaming is enabled.
|
||||
uint8_t* red_buffer_ GUARDED_BY(acm_crit_sect_);
|
||||
|
||||
// TODO(turajs): we actually don't need |fragmentation_| as a member variable.
|
||||
// It is sufficient to keep the length & payload type of previous payload in
|
||||
// member variables.
|
||||
RTPFragmentationHeader fragmentation_ GUARDED_BY(acm_crit_sect_);
|
||||
uint32_t last_red_timestamp_ GUARDED_BY(acm_crit_sect_);
|
||||
|
||||
// Codec internal FEC
|
||||
bool codec_fec_enabled_ GUARDED_BY(acm_crit_sect_);
|
||||
|
||||
// This is to keep track of CN instances where we can send DTMFs.
|
||||
uint8_t previous_pltype_ GUARDED_BY(acm_crit_sect_);
|
||||
|
||||
// Used when payloads are pushed into ACM without any RTP info
|
||||
// One example is when pre-encoded bit-stream is pushed from
|
||||
// a file.
|
||||
// IMPORTANT: this variable is only used in IncomingPayload(), therefore,
|
||||
// no lock acquired when interacting with this variable. If it is going to
|
||||
// be used in other methods, locks need to be taken.
|
||||
WebRtcRTPHeader* aux_rtp_header_;
|
||||
|
||||
bool receiver_initialized_ GUARDED_BY(acm_crit_sect_);
|
||||
|
||||
AudioFrame preprocess_frame_ GUARDED_BY(acm_crit_sect_);
|
||||
CodecInst secondary_send_codec_inst_ GUARDED_BY(acm_crit_sect_);
|
||||
scoped_ptr<ACMGenericCodec> secondary_encoder_ GUARDED_BY(acm_crit_sect_);
|
||||
uint32_t codec_timestamp_ GUARDED_BY(acm_crit_sect_);
|
||||
bool first_10ms_data_ GUARDED_BY(acm_crit_sect_);
|
||||
|
||||
CriticalSectionWrapper* callback_crit_sect_;
|
||||
AudioPacketizationCallback* packetization_callback_
|
||||
GUARDED_BY(callback_crit_sect_);
|
||||
ACMVADCallback* vad_callback_ GUARDED_BY(callback_crit_sect_);
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_AUDIO_CODING_MODULE_IMPL_H_
|
||||
@@ -0,0 +1,779 @@
|
||||
/*
|
||||
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include <vector>
|
||||
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
#include "webrtc/base/md5digest.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_receive_test.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/acm_send_test.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/audio_checksum.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/audio_loop.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/input_audio_file.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/output_audio_file.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/packet.h"
|
||||
#include "webrtc/modules/audio_coding/neteq/tools/rtp_file_source.h"
|
||||
#include "webrtc/modules/interface/module_common_types.h"
|
||||
#include "webrtc/system_wrappers/interface/clock.h"
|
||||
#include "webrtc/system_wrappers/interface/compile_assert.h"
|
||||
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/event_wrapper.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/system_wrappers/interface/sleep.h"
|
||||
#include "webrtc/system_wrappers/interface/thread_annotations.h"
|
||||
#include "webrtc/system_wrappers/interface/thread_wrapper.h"
|
||||
#include "webrtc/test/testsupport/fileutils.h"
|
||||
#include "webrtc/test/testsupport/gtest_disable.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
const int kSampleRateHz = 16000;
|
||||
const int kNumSamples10ms = kSampleRateHz / 100;
|
||||
const int kFrameSizeMs = 10; // Multiple of 10.
|
||||
const int kFrameSizeSamples = kFrameSizeMs / 10 * kNumSamples10ms;
|
||||
const int kPayloadSizeBytes = kFrameSizeSamples * sizeof(int16_t);
|
||||
const uint8_t kPayloadType = 111;
|
||||
|
||||
class RtpUtility {
|
||||
public:
|
||||
RtpUtility(int samples_per_packet, uint8_t payload_type)
|
||||
: samples_per_packet_(samples_per_packet), payload_type_(payload_type) {}
|
||||
|
||||
virtual ~RtpUtility() {}
|
||||
|
||||
void Populate(WebRtcRTPHeader* rtp_header) {
|
||||
rtp_header->header.sequenceNumber = 0xABCD;
|
||||
rtp_header->header.timestamp = 0xABCDEF01;
|
||||
rtp_header->header.payloadType = payload_type_;
|
||||
rtp_header->header.markerBit = false;
|
||||
rtp_header->header.ssrc = 0x1234;
|
||||
rtp_header->header.numCSRCs = 0;
|
||||
rtp_header->frameType = kAudioFrameSpeech;
|
||||
|
||||
rtp_header->header.payload_type_frequency = kSampleRateHz;
|
||||
rtp_header->type.Audio.channel = 1;
|
||||
rtp_header->type.Audio.isCNG = false;
|
||||
}
|
||||
|
||||
void Forward(WebRtcRTPHeader* rtp_header) {
|
||||
++rtp_header->header.sequenceNumber;
|
||||
rtp_header->header.timestamp += samples_per_packet_;
|
||||
}
|
||||
|
||||
private:
|
||||
int samples_per_packet_;
|
||||
uint8_t payload_type_;
|
||||
};
|
||||
|
||||
class PacketizationCallbackStub : public AudioPacketizationCallback {
|
||||
public:
|
||||
PacketizationCallbackStub()
|
||||
: num_calls_(0),
|
||||
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()) {}
|
||||
|
||||
virtual int32_t SendData(
|
||||
FrameType frame_type,
|
||||
uint8_t payload_type,
|
||||
uint32_t timestamp,
|
||||
const uint8_t* payload_data,
|
||||
uint16_t payload_len_bytes,
|
||||
const RTPFragmentationHeader* fragmentation) OVERRIDE {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
++num_calls_;
|
||||
last_payload_vec_.assign(payload_data, payload_data + payload_len_bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int num_calls() const {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
return num_calls_;
|
||||
}
|
||||
|
||||
int last_payload_len_bytes() const {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
return last_payload_vec_.size();
|
||||
}
|
||||
|
||||
void SwapBuffers(std::vector<uint8_t>* payload) {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
last_payload_vec_.swap(*payload);
|
||||
}
|
||||
|
||||
private:
|
||||
int num_calls_ GUARDED_BY(crit_sect_);
|
||||
std::vector<uint8_t> last_payload_vec_ GUARDED_BY(crit_sect_);
|
||||
const scoped_ptr<CriticalSectionWrapper> crit_sect_;
|
||||
};
|
||||
|
||||
class AudioCodingModuleTest : public ::testing::Test {
|
||||
protected:
|
||||
AudioCodingModuleTest()
|
||||
: id_(1),
|
||||
rtp_utility_(new RtpUtility(kFrameSizeSamples, kPayloadType)),
|
||||
clock_(Clock::GetRealTimeClock()) {}
|
||||
|
||||
~AudioCodingModuleTest() {}
|
||||
|
||||
void TearDown() {}
|
||||
|
||||
void SetUp() {
|
||||
acm_.reset(AudioCodingModule::Create(id_, clock_));
|
||||
|
||||
RegisterCodec();
|
||||
|
||||
rtp_utility_->Populate(&rtp_header_);
|
||||
|
||||
input_frame_.sample_rate_hz_ = kSampleRateHz;
|
||||
input_frame_.num_channels_ = 1;
|
||||
input_frame_.samples_per_channel_ = kSampleRateHz * 10 / 1000; // 10 ms.
|
||||
COMPILE_ASSERT(kSampleRateHz * 10 / 1000 <= AudioFrame::kMaxDataSizeSamples,
|
||||
audio_frame_too_small);
|
||||
memset(input_frame_.data_,
|
||||
0,
|
||||
input_frame_.samples_per_channel_ * sizeof(input_frame_.data_[0]));
|
||||
|
||||
ASSERT_EQ(0, acm_->RegisterTransportCallback(&packet_cb_));
|
||||
}
|
||||
|
||||
virtual void RegisterCodec() {
|
||||
AudioCodingModule::Codec("L16", &codec_, kSampleRateHz, 1);
|
||||
codec_.pltype = kPayloadType;
|
||||
|
||||
// Register L16 codec in ACM.
|
||||
ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec_));
|
||||
ASSERT_EQ(0, acm_->RegisterSendCodec(codec_));
|
||||
}
|
||||
|
||||
virtual void InsertPacketAndPullAudio() {
|
||||
InsertPacket();
|
||||
PullAudio();
|
||||
}
|
||||
|
||||
virtual void InsertPacket() {
|
||||
const uint8_t kPayload[kPayloadSizeBytes] = {0};
|
||||
ASSERT_EQ(0,
|
||||
acm_->IncomingPacket(kPayload, kPayloadSizeBytes, rtp_header_));
|
||||
rtp_utility_->Forward(&rtp_header_);
|
||||
}
|
||||
|
||||
virtual void PullAudio() {
|
||||
AudioFrame audio_frame;
|
||||
ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &audio_frame));
|
||||
}
|
||||
|
||||
virtual void InsertAudio() {
|
||||
ASSERT_EQ(0, acm_->Add10MsData(input_frame_));
|
||||
input_frame_.timestamp_ += kNumSamples10ms;
|
||||
}
|
||||
|
||||
virtual void Encode() {
|
||||
int32_t encoded_bytes = acm_->Process();
|
||||
// Expect to get one packet with two bytes per sample, or no packet at all,
|
||||
// depending on how many 10 ms blocks go into |codec_.pacsize|.
|
||||
EXPECT_TRUE(encoded_bytes == 2 * codec_.pacsize || encoded_bytes == 0);
|
||||
}
|
||||
|
||||
const int id_;
|
||||
scoped_ptr<RtpUtility> rtp_utility_;
|
||||
scoped_ptr<AudioCodingModule> acm_;
|
||||
PacketizationCallbackStub packet_cb_;
|
||||
WebRtcRTPHeader rtp_header_;
|
||||
AudioFrame input_frame_;
|
||||
CodecInst codec_;
|
||||
Clock* clock_;
|
||||
};
|
||||
|
||||
// Check if the statistics are initialized correctly. Before any call to ACM
|
||||
// all fields have to be zero.
|
||||
TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(InitializedToZero)) {
|
||||
AudioDecodingCallStats stats;
|
||||
acm_->GetDecodingCallStatistics(&stats);
|
||||
EXPECT_EQ(0, stats.calls_to_neteq);
|
||||
EXPECT_EQ(0, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(0, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(0, stats.decoded_plc);
|
||||
EXPECT_EQ(0, stats.decoded_plc_cng);
|
||||
}
|
||||
|
||||
// Apply an initial playout delay. Calls to AudioCodingModule::PlayoutData10ms()
|
||||
// should result in generating silence, check the associated field.
|
||||
TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(SilenceGeneratorCalled)) {
|
||||
AudioDecodingCallStats stats;
|
||||
const int kInitialDelay = 100;
|
||||
|
||||
acm_->SetInitialPlayoutDelay(kInitialDelay);
|
||||
|
||||
int num_calls = 0;
|
||||
for (int time_ms = 0; time_ms < kInitialDelay;
|
||||
time_ms += kFrameSizeMs, ++num_calls) {
|
||||
InsertPacketAndPullAudio();
|
||||
}
|
||||
acm_->GetDecodingCallStatistics(&stats);
|
||||
EXPECT_EQ(0, stats.calls_to_neteq);
|
||||
EXPECT_EQ(num_calls, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(0, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(0, stats.decoded_plc);
|
||||
EXPECT_EQ(0, stats.decoded_plc_cng);
|
||||
}
|
||||
|
||||
// Insert some packets and pull audio. Check statistics are valid. Then,
|
||||
// simulate packet loss and check if PLC and PLC-to-CNG statistics are
|
||||
// correctly updated.
|
||||
TEST_F(AudioCodingModuleTest, DISABLED_ON_ANDROID(NetEqCalls)) {
|
||||
AudioDecodingCallStats stats;
|
||||
const int kNumNormalCalls = 10;
|
||||
|
||||
for (int num_calls = 0; num_calls < kNumNormalCalls; ++num_calls) {
|
||||
InsertPacketAndPullAudio();
|
||||
}
|
||||
acm_->GetDecodingCallStatistics(&stats);
|
||||
EXPECT_EQ(kNumNormalCalls, stats.calls_to_neteq);
|
||||
EXPECT_EQ(0, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(0, stats.decoded_plc);
|
||||
EXPECT_EQ(0, stats.decoded_plc_cng);
|
||||
|
||||
const int kNumPlc = 3;
|
||||
const int kNumPlcCng = 5;
|
||||
|
||||
// Simulate packet-loss. NetEq first performs PLC then PLC fades to CNG.
|
||||
for (int n = 0; n < kNumPlc + kNumPlcCng; ++n) {
|
||||
PullAudio();
|
||||
}
|
||||
acm_->GetDecodingCallStatistics(&stats);
|
||||
EXPECT_EQ(kNumNormalCalls + kNumPlc + kNumPlcCng, stats.calls_to_neteq);
|
||||
EXPECT_EQ(0, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(kNumNormalCalls, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(kNumPlc, stats.decoded_plc);
|
||||
EXPECT_EQ(kNumPlcCng, stats.decoded_plc_cng);
|
||||
}
|
||||
|
||||
TEST_F(AudioCodingModuleTest, VerifyOutputFrame) {
|
||||
AudioFrame audio_frame;
|
||||
const int kSampleRateHz = 32000;
|
||||
EXPECT_EQ(0, acm_->PlayoutData10Ms(kSampleRateHz, &audio_frame));
|
||||
EXPECT_EQ(id_, audio_frame.id_);
|
||||
EXPECT_EQ(0u, audio_frame.timestamp_);
|
||||
EXPECT_GT(audio_frame.num_channels_, 0);
|
||||
EXPECT_EQ(kSampleRateHz / 100, audio_frame.samples_per_channel_);
|
||||
EXPECT_EQ(kSampleRateHz, audio_frame.sample_rate_hz_);
|
||||
}
|
||||
|
||||
TEST_F(AudioCodingModuleTest, FailOnZeroDesiredFrequency) {
|
||||
AudioFrame audio_frame;
|
||||
EXPECT_EQ(-1, acm_->PlayoutData10Ms(0, &audio_frame));
|
||||
}
|
||||
|
||||
// A multi-threaded test for ACM. This base class is using the PCM16b 16 kHz
|
||||
// codec, while the derive class AcmIsacMtTest is using iSAC.
|
||||
class AudioCodingModuleMtTest : public AudioCodingModuleTest {
|
||||
protected:
|
||||
static const int kNumPackets = 500;
|
||||
static const int kNumPullCalls = 500;
|
||||
|
||||
AudioCodingModuleMtTest()
|
||||
: AudioCodingModuleTest(),
|
||||
send_thread_(ThreadWrapper::CreateThread(CbSendThread,
|
||||
this,
|
||||
kRealtimePriority,
|
||||
"send")),
|
||||
insert_packet_thread_(ThreadWrapper::CreateThread(CbInsertPacketThread,
|
||||
this,
|
||||
kRealtimePriority,
|
||||
"insert_packet")),
|
||||
pull_audio_thread_(ThreadWrapper::CreateThread(CbPullAudioThread,
|
||||
this,
|
||||
kRealtimePriority,
|
||||
"pull_audio")),
|
||||
test_complete_(EventWrapper::Create()),
|
||||
send_count_(0),
|
||||
insert_packet_count_(0),
|
||||
pull_audio_count_(0),
|
||||
crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
|
||||
next_insert_packet_time_ms_(0),
|
||||
fake_clock_(new SimulatedClock(0)) {
|
||||
clock_ = fake_clock_.get();
|
||||
}
|
||||
|
||||
void SetUp() {
|
||||
AudioCodingModuleTest::SetUp();
|
||||
StartThreads();
|
||||
}
|
||||
|
||||
void StartThreads() {
|
||||
unsigned int thread_id = 0;
|
||||
ASSERT_TRUE(send_thread_->Start(thread_id));
|
||||
ASSERT_TRUE(insert_packet_thread_->Start(thread_id));
|
||||
ASSERT_TRUE(pull_audio_thread_->Start(thread_id));
|
||||
}
|
||||
|
||||
void TearDown() {
|
||||
AudioCodingModuleTest::TearDown();
|
||||
pull_audio_thread_->Stop();
|
||||
send_thread_->Stop();
|
||||
insert_packet_thread_->Stop();
|
||||
}
|
||||
|
||||
EventTypeWrapper RunTest() {
|
||||
return test_complete_->Wait(10 * 60 * 1000); // 10 minutes' timeout.
|
||||
}
|
||||
|
||||
virtual bool TestDone() {
|
||||
if (packet_cb_.num_calls() > kNumPackets) {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
if (pull_audio_count_ > kNumPullCalls) {
|
||||
// Both conditions for completion are met. End the test.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool CbSendThread(void* context) {
|
||||
return reinterpret_cast<AudioCodingModuleMtTest*>(context)->CbSendImpl();
|
||||
}
|
||||
|
||||
// The send thread doesn't have to care about the current simulated time,
|
||||
// since only the AcmReceiver is using the clock.
|
||||
bool CbSendImpl() {
|
||||
SleepMs(1);
|
||||
if (HasFatalFailure()) {
|
||||
// End the test early if a fatal failure (ASSERT_*) has occurred.
|
||||
test_complete_->Set();
|
||||
}
|
||||
++send_count_;
|
||||
InsertAudio();
|
||||
Encode();
|
||||
if (TestDone()) {
|
||||
test_complete_->Set();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool CbInsertPacketThread(void* context) {
|
||||
return reinterpret_cast<AudioCodingModuleMtTest*>(context)
|
||||
->CbInsertPacketImpl();
|
||||
}
|
||||
|
||||
bool CbInsertPacketImpl() {
|
||||
SleepMs(1);
|
||||
{
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
if (clock_->TimeInMilliseconds() < next_insert_packet_time_ms_) {
|
||||
return true;
|
||||
}
|
||||
next_insert_packet_time_ms_ += 10;
|
||||
}
|
||||
// Now we're not holding the crit sect when calling ACM.
|
||||
++insert_packet_count_;
|
||||
InsertPacket();
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool CbPullAudioThread(void* context) {
|
||||
return reinterpret_cast<AudioCodingModuleMtTest*>(context)
|
||||
->CbPullAudioImpl();
|
||||
}
|
||||
|
||||
bool CbPullAudioImpl() {
|
||||
SleepMs(1);
|
||||
{
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
// Don't let the insert thread fall behind.
|
||||
if (next_insert_packet_time_ms_ < clock_->TimeInMilliseconds()) {
|
||||
return true;
|
||||
}
|
||||
++pull_audio_count_;
|
||||
}
|
||||
// Now we're not holding the crit sect when calling ACM.
|
||||
PullAudio();
|
||||
fake_clock_->AdvanceTimeMilliseconds(10);
|
||||
return true;
|
||||
}
|
||||
|
||||
scoped_ptr<ThreadWrapper> send_thread_;
|
||||
scoped_ptr<ThreadWrapper> insert_packet_thread_;
|
||||
scoped_ptr<ThreadWrapper> pull_audio_thread_;
|
||||
const scoped_ptr<EventWrapper> test_complete_;
|
||||
int send_count_;
|
||||
int insert_packet_count_;
|
||||
int pull_audio_count_ GUARDED_BY(crit_sect_);
|
||||
const scoped_ptr<CriticalSectionWrapper> crit_sect_;
|
||||
int64_t next_insert_packet_time_ms_ GUARDED_BY(crit_sect_);
|
||||
scoped_ptr<SimulatedClock> fake_clock_;
|
||||
};
|
||||
|
||||
TEST_F(AudioCodingModuleMtTest, DoTest) {
|
||||
EXPECT_EQ(kEventSignaled, RunTest());
|
||||
}
|
||||
|
||||
// This is a multi-threaded ACM test using iSAC. The test encodes audio
|
||||
// from a PCM file. The most recent encoded frame is used as input to the
|
||||
// receiving part. Depending on timing, it may happen that the same RTP packet
|
||||
// is inserted into the receiver multiple times, but this is a valid use-case,
|
||||
// and simplifies the test code a lot.
|
||||
class AcmIsacMtTest : public AudioCodingModuleMtTest {
|
||||
protected:
|
||||
static const int kNumPackets = 500;
|
||||
static const int kNumPullCalls = 500;
|
||||
|
||||
AcmIsacMtTest()
|
||||
: AudioCodingModuleMtTest(),
|
||||
last_packet_number_(0) {}
|
||||
|
||||
~AcmIsacMtTest() {}
|
||||
|
||||
void SetUp() {
|
||||
AudioCodingModuleTest::SetUp();
|
||||
|
||||
// Set up input audio source to read from specified file, loop after 5
|
||||
// seconds, and deliver blocks of 10 ms.
|
||||
const std::string input_file_name =
|
||||
webrtc::test::ResourcePath("audio_coding/speech_mono_16kHz", "pcm");
|
||||
audio_loop_.Init(input_file_name, 5 * kSampleRateHz, kNumSamples10ms);
|
||||
|
||||
// Generate one packet to have something to insert.
|
||||
int loop_counter = 0;
|
||||
while (packet_cb_.last_payload_len_bytes() == 0) {
|
||||
InsertAudio();
|
||||
Encode();
|
||||
ASSERT_LT(loop_counter++, 10);
|
||||
}
|
||||
// Set |last_packet_number_| to one less that |num_calls| so that the packet
|
||||
// will be fetched in the next InsertPacket() call.
|
||||
last_packet_number_ = packet_cb_.num_calls() - 1;
|
||||
|
||||
StartThreads();
|
||||
}
|
||||
|
||||
virtual void RegisterCodec() {
|
||||
COMPILE_ASSERT(kSampleRateHz == 16000, test_designed_for_isac_16khz);
|
||||
AudioCodingModule::Codec("ISAC", &codec_, kSampleRateHz, 1);
|
||||
codec_.pltype = kPayloadType;
|
||||
|
||||
// Register iSAC codec in ACM, effectively unregistering the PCM16B codec
|
||||
// registered in AudioCodingModuleTest::SetUp();
|
||||
ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec_));
|
||||
ASSERT_EQ(0, acm_->RegisterSendCodec(codec_));
|
||||
}
|
||||
|
||||
void InsertPacket() {
|
||||
int num_calls = packet_cb_.num_calls(); // Store locally for thread safety.
|
||||
if (num_calls > last_packet_number_) {
|
||||
// Get the new payload out from the callback handler.
|
||||
// Note that since we swap buffers here instead of directly inserting
|
||||
// a pointer to the data in |packet_cb_|, we avoid locking the callback
|
||||
// for the duration of the IncomingPacket() call.
|
||||
packet_cb_.SwapBuffers(&last_payload_vec_);
|
||||
ASSERT_GT(last_payload_vec_.size(), 0u);
|
||||
rtp_utility_->Forward(&rtp_header_);
|
||||
last_packet_number_ = num_calls;
|
||||
}
|
||||
ASSERT_GT(last_payload_vec_.size(), 0u);
|
||||
ASSERT_EQ(
|
||||
0,
|
||||
acm_->IncomingPacket(
|
||||
&last_payload_vec_[0], last_payload_vec_.size(), rtp_header_));
|
||||
}
|
||||
|
||||
void InsertAudio() {
|
||||
memcpy(input_frame_.data_, audio_loop_.GetNextBlock(), kNumSamples10ms);
|
||||
AudioCodingModuleTest::InsertAudio();
|
||||
}
|
||||
|
||||
void Encode() { ASSERT_GE(acm_->Process(), 0); }
|
||||
|
||||
// This method is the same as AudioCodingModuleMtTest::TestDone(), but here
|
||||
// it is using the constants defined in this class (i.e., shorter test run).
|
||||
virtual bool TestDone() {
|
||||
if (packet_cb_.num_calls() > kNumPackets) {
|
||||
CriticalSectionScoped lock(crit_sect_.get());
|
||||
if (pull_audio_count_ > kNumPullCalls) {
|
||||
// Both conditions for completion are met. End the test.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int last_packet_number_;
|
||||
std::vector<uint8_t> last_payload_vec_;
|
||||
test::AudioLoop audio_loop_;
|
||||
};
|
||||
|
||||
TEST_F(AcmIsacMtTest, DoTest) {
|
||||
EXPECT_EQ(kEventSignaled, RunTest());
|
||||
}
|
||||
|
||||
class AcmReceiverBitExactness : public ::testing::Test {
|
||||
public:
|
||||
static std::string PlatformChecksum(std::string win64,
|
||||
std::string android,
|
||||
std::string others) {
|
||||
#if defined(_WIN32) && defined(WEBRTC_ARCH_64_BITS)
|
||||
return win64;
|
||||
#elif defined(WEBRTC_ANDROID)
|
||||
return android;
|
||||
#else
|
||||
return others;
|
||||
#endif
|
||||
}
|
||||
|
||||
protected:
|
||||
void Run(int output_freq_hz, const std::string& checksum_ref) {
|
||||
const std::string input_file_name =
|
||||
webrtc::test::ResourcePath("audio_coding/neteq_universal_new", "rtp");
|
||||
scoped_ptr<test::RtpFileSource> packet_source(
|
||||
test::RtpFileSource::Create(input_file_name));
|
||||
#ifdef WEBRTC_ANDROID
|
||||
// Filter out iLBC and iSAC-swb since they are not supported on Android.
|
||||
packet_source->FilterOutPayloadType(102); // iLBC.
|
||||
packet_source->FilterOutPayloadType(104); // iSAC-swb.
|
||||
#endif
|
||||
|
||||
test::AudioChecksum checksum;
|
||||
const std::string output_file_name =
|
||||
webrtc::test::OutputPath() +
|
||||
::testing::UnitTest::GetInstance()
|
||||
->current_test_info()
|
||||
->test_case_name() +
|
||||
"_" + ::testing::UnitTest::GetInstance()->current_test_info()->name() +
|
||||
"_output.pcm";
|
||||
test::OutputAudioFile output_file(output_file_name);
|
||||
test::AudioSinkFork output(&checksum, &output_file);
|
||||
|
||||
test::AcmReceiveTest test(packet_source.get(), &output, output_freq_hz);
|
||||
ASSERT_NO_FATAL_FAILURE(test.RegisterNetEqTestCodecs());
|
||||
test.Run();
|
||||
|
||||
std::string checksum_string = checksum.Finish();
|
||||
EXPECT_EQ(checksum_ref, checksum_string);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(AcmReceiverBitExactness, 8kHzOutput) {
|
||||
Run(8000,
|
||||
PlatformChecksum("bd6f8d9602cd82444ea2539e674df747",
|
||||
"6ac89c7145072c26bfeba602cd661afb",
|
||||
"8a8440f5511eb729221b9aac25cda3a0"));
|
||||
}
|
||||
|
||||
TEST_F(AcmReceiverBitExactness, 16kHzOutput) {
|
||||
Run(16000,
|
||||
PlatformChecksum("a39bc6ee0c4eb15f3ad2f43cebcc571d",
|
||||
"3e888eb04f57db2c6ef952fe64f17fe6",
|
||||
"7be583092c5adbcb0f6cd66eca20ea63"));
|
||||
}
|
||||
|
||||
TEST_F(AcmReceiverBitExactness, 32kHzOutput) {
|
||||
Run(32000,
|
||||
PlatformChecksum("80964572aaa2dc92f9e34896dd3802b3",
|
||||
"aeca37e963310f5b6552b7edea23c2f1",
|
||||
"3a84188abe9fca25fedd6034760f3e22"));
|
||||
}
|
||||
|
||||
TEST_F(AcmReceiverBitExactness, 48kHzOutput) {
|
||||
Run(48000,
|
||||
PlatformChecksum("8aacde91f390e0d5a9c2ed571a25fd37",
|
||||
"76b9e99e0a3998aa28355e7a2bd836f7",
|
||||
"89b4b19bdb4de40f1d88302ef8cb9f9b"));
|
||||
}
|
||||
|
||||
// This test verifies bit exactness for the send-side of ACM. The test setup is
|
||||
// a chain of three different test classes:
|
||||
//
|
||||
// test::AcmSendTest -> AcmSenderBitExactness -> test::AcmReceiveTest
|
||||
//
|
||||
// The receiver side is driving the test by requesting new packets from
|
||||
// AcmSenderBitExactness::NextPacket(). This method, in turn, asks for the
|
||||
// packet from test::AcmSendTest::NextPacket, which inserts audio from the
|
||||
// input file until one packet is produced. (The input file loops indefinitely.)
|
||||
// Before passing the packet to the receiver, this test class verifies the
|
||||
// packet header and updates a payload checksum with the new payload. The
|
||||
// decoded output from the receiver is also verified with a (separate) checksum.
|
||||
class AcmSenderBitExactness : public ::testing::Test,
|
||||
public test::PacketSource {
|
||||
protected:
|
||||
static const int kTestDurationMs = 1000;
|
||||
|
||||
AcmSenderBitExactness()
|
||||
: frame_size_rtp_timestamps_(0),
|
||||
packet_count_(0),
|
||||
payload_type_(0),
|
||||
last_sequence_number_(0),
|
||||
last_timestamp_(0) {}
|
||||
|
||||
// Sets up the test::AcmSendTest object. Returns true on success, otherwise
|
||||
// false.
|
||||
bool SetUpSender() {
|
||||
const std::string input_file_name =
|
||||
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
|
||||
// Note that |audio_source_| will loop forever. The test duration is set
|
||||
// explicitly by |kTestDurationMs|.
|
||||
audio_source_.reset(new test::InputAudioFile(input_file_name));
|
||||
static const int kSourceRateHz = 32000;
|
||||
send_test_.reset(new test::AcmSendTest(
|
||||
audio_source_.get(), kSourceRateHz, kTestDurationMs));
|
||||
return send_test_.get() != NULL;
|
||||
}
|
||||
|
||||
// Registers a send codec in the test::AcmSendTest object. Returns true on
|
||||
// success, false on failure.
|
||||
bool RegisterSendCodec(const char* payload_name,
|
||||
int sampling_freq_hz,
|
||||
int channels,
|
||||
int payload_type,
|
||||
int frame_size_samples,
|
||||
int frame_size_rtp_timestamps) {
|
||||
payload_type_ = payload_type;
|
||||
frame_size_rtp_timestamps_ = frame_size_rtp_timestamps;
|
||||
return send_test_->RegisterCodec(payload_name,
|
||||
sampling_freq_hz,
|
||||
channels,
|
||||
payload_type,
|
||||
frame_size_samples);
|
||||
}
|
||||
|
||||
// Runs the test. SetUpSender() and RegisterSendCodec() must have been called
|
||||
// before calling this method.
|
||||
void Run(const std::string& audio_checksum_ref,
|
||||
const std::string& payload_checksum_ref,
|
||||
int expected_packets) {
|
||||
// Set up the receiver used to decode the packets and verify the decoded
|
||||
// output.
|
||||
test::AudioChecksum audio_checksum;
|
||||
const std::string output_file_name =
|
||||
webrtc::test::OutputPath() +
|
||||
::testing::UnitTest::GetInstance()
|
||||
->current_test_info()
|
||||
->test_case_name() +
|
||||
"_" +
|
||||
::testing::UnitTest::GetInstance()->current_test_info()->name() +
|
||||
"_output.pcm";
|
||||
test::OutputAudioFile output_file(output_file_name);
|
||||
// Have the output audio sent both to file and to the checksum calculator.
|
||||
test::AudioSinkFork output(&audio_checksum, &output_file);
|
||||
const int kOutputFreqHz = 8000;
|
||||
test::AcmReceiveTest receive_test(this, &output, kOutputFreqHz);
|
||||
ASSERT_NO_FATAL_FAILURE(receive_test.RegisterDefaultCodecs());
|
||||
|
||||
// This is where the actual test is executed.
|
||||
receive_test.Run();
|
||||
|
||||
// Extract and verify the audio checksum.
|
||||
std::string checksum_string = audio_checksum.Finish();
|
||||
EXPECT_EQ(audio_checksum_ref, checksum_string);
|
||||
|
||||
// Extract and verify the payload checksum.
|
||||
char checksum_result[rtc::Md5Digest::kSize];
|
||||
payload_checksum_.Finish(checksum_result, rtc::Md5Digest::kSize);
|
||||
checksum_string = rtc::hex_encode(checksum_result, rtc::Md5Digest::kSize);
|
||||
EXPECT_EQ(payload_checksum_ref, checksum_string);
|
||||
|
||||
// Verify number of packets produced.
|
||||
EXPECT_EQ(expected_packets, packet_count_);
|
||||
}
|
||||
|
||||
// Returns a pointer to the next packet. Returns NULL if the source is
|
||||
// depleted (i.e., the test duration is exceeded), or if an error occurred.
|
||||
// Inherited from test::PacketSource.
|
||||
test::Packet* NextPacket() OVERRIDE {
|
||||
// Get the next packet from AcmSendTest. Ownership of |packet| is
|
||||
// transferred to this method.
|
||||
test::Packet* packet = send_test_->NextPacket();
|
||||
if (!packet)
|
||||
return NULL;
|
||||
|
||||
VerifyPacket(packet);
|
||||
// TODO(henrik.lundin) Save the packet to file as well.
|
||||
|
||||
// Pass it on to the caller. The caller becomes the owner of |packet|.
|
||||
return packet;
|
||||
}
|
||||
|
||||
// Verifies the packet.
|
||||
void VerifyPacket(const test::Packet* packet) {
|
||||
EXPECT_TRUE(packet->valid_header());
|
||||
// (We can check the header fields even if valid_header() is false.)
|
||||
EXPECT_EQ(payload_type_, packet->header().payloadType);
|
||||
if (packet_count_ > 0) {
|
||||
// This is not the first packet.
|
||||
uint16_t sequence_number_diff =
|
||||
packet->header().sequenceNumber - last_sequence_number_;
|
||||
EXPECT_EQ(1, sequence_number_diff);
|
||||
uint32_t timestamp_diff = packet->header().timestamp - last_timestamp_;
|
||||
EXPECT_EQ(frame_size_rtp_timestamps_, timestamp_diff);
|
||||
}
|
||||
++packet_count_;
|
||||
last_sequence_number_ = packet->header().sequenceNumber;
|
||||
last_timestamp_ = packet->header().timestamp;
|
||||
// Update the checksum.
|
||||
payload_checksum_.Update(packet->payload(), packet->payload_length_bytes());
|
||||
}
|
||||
|
||||
void SetUpTest(const char* codec_name,
|
||||
int codec_sample_rate_hz,
|
||||
int channels,
|
||||
int payload_type,
|
||||
int codec_frame_size_samples,
|
||||
int codec_frame_size_rtp_timestamps) {
|
||||
ASSERT_TRUE(SetUpSender());
|
||||
ASSERT_TRUE(RegisterSendCodec(codec_name,
|
||||
codec_sample_rate_hz,
|
||||
channels,
|
||||
payload_type,
|
||||
codec_frame_size_samples,
|
||||
codec_frame_size_rtp_timestamps));
|
||||
}
|
||||
|
||||
scoped_ptr<test::AcmSendTest> send_test_;
|
||||
scoped_ptr<test::InputAudioFile> audio_source_;
|
||||
uint32_t frame_size_rtp_timestamps_;
|
||||
int packet_count_;
|
||||
uint8_t payload_type_;
|
||||
uint16_t last_sequence_number_;
|
||||
uint32_t last_timestamp_;
|
||||
rtc::Md5Digest payload_checksum_;
|
||||
};
|
||||
|
||||
TEST_F(AcmSenderBitExactness, IsacWb30ms) {
|
||||
ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 480, 480));
|
||||
Run(AcmReceiverBitExactness::PlatformChecksum(
|
||||
"c7e5bdadfa2871df95639fcc297cf23d",
|
||||
"0499ca260390769b3172136faad925b9",
|
||||
"0b58f9eeee43d5891f5f6c75e77984a3"),
|
||||
AcmReceiverBitExactness::PlatformChecksum(
|
||||
"d42cb5195463da26c8129bbfe73a22e6",
|
||||
"83de248aea9c3c2bd680b6952401b4ca",
|
||||
"3c79f16f34218271f3dca4e2b1dfe1bb"),
|
||||
33);
|
||||
}
|
||||
|
||||
TEST_F(AcmSenderBitExactness, IsacWb60ms) {
|
||||
ASSERT_NO_FATAL_FAILURE(SetUpTest("ISAC", 16000, 1, 103, 960, 960));
|
||||
Run(AcmReceiverBitExactness::PlatformChecksum(
|
||||
"14d63c5f08127d280e722e3191b73bdd",
|
||||
"8da003e16c5371af2dc2be79a50f9076",
|
||||
"1ad29139a04782a33daad8c2b9b35875"),
|
||||
AcmReceiverBitExactness::PlatformChecksum(
|
||||
"ebe04a819d3a9d83a83a17f271e1139a",
|
||||
"97aeef98553b5a4b5a68f8b716e8eaf0",
|
||||
"9e0a0ab743ad987b55b8e14802769c56"),
|
||||
16);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
||||
55
jni/webrtc/modules/audio_coding/main/acm2/call_statistics.cc
Normal file
55
jni/webrtc/modules/audio_coding/main/acm2/call_statistics.cc
Normal file
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
void CallStatistics::DecodedByNetEq(AudioFrame::SpeechType speech_type) {
|
||||
++decoding_stat_.calls_to_neteq;
|
||||
switch (speech_type) {
|
||||
case AudioFrame::kNormalSpeech: {
|
||||
++decoding_stat_.decoded_normal;
|
||||
break;
|
||||
}
|
||||
case AudioFrame::kPLC: {
|
||||
++decoding_stat_.decoded_plc;
|
||||
break;
|
||||
}
|
||||
case AudioFrame::kCNG: {
|
||||
++decoding_stat_.decoded_cng;
|
||||
break;
|
||||
}
|
||||
case AudioFrame::kPLCCNG: {
|
||||
++decoding_stat_.decoded_plc_cng;
|
||||
break;
|
||||
}
|
||||
case AudioFrame::kUndefined: {
|
||||
// If the audio is decoded by NetEq, |kUndefined| is not an option.
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CallStatistics::DecodedBySilenceGenerator() {
|
||||
++decoding_stat_.calls_to_silence_generator;
|
||||
}
|
||||
|
||||
const AudioDecodingCallStats& CallStatistics::GetDecodingStatistics() const {
|
||||
return decoding_stat_;
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
63
jni/webrtc/modules/audio_coding/main/acm2/call_statistics.h
Normal file
63
jni/webrtc/modules/audio_coding/main/acm2/call_statistics.h
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CALL_STATISTICS_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CALL_STATISTICS_H_
|
||||
|
||||
#include "webrtc/common_types.h"
|
||||
#include "webrtc/modules/interface/module_common_types.h"
|
||||
|
||||
//
|
||||
// This class is for book keeping of calls to ACM. It is not useful to log API
|
||||
// calls which are supposed to be called every 10ms, e.g. PlayoutData10Ms(),
|
||||
// however, it is useful to know the number of such calls in a given time
|
||||
// interval. The current implementation covers calls to PlayoutData10Ms() with
|
||||
// detailed accounting of the decoded speech type.
|
||||
//
|
||||
// Thread Safety
|
||||
// =============
|
||||
// Please note that this class in not thread safe. The class must be protected
|
||||
// if different APIs are called from different threads.
|
||||
//
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class CallStatistics {
|
||||
public:
|
||||
CallStatistics() {}
|
||||
~CallStatistics() {}
|
||||
|
||||
// Call this method to indicate that NetEq engaged in decoding. |speech_type|
|
||||
// is the audio-type according to NetEq.
|
||||
void DecodedByNetEq(AudioFrame::SpeechType speech_type);
|
||||
|
||||
// Call this method to indicate that a decoding call resulted in generating
|
||||
// silence, i.e. call to NetEq is bypassed and the output audio is zero.
|
||||
void DecodedBySilenceGenerator();
|
||||
|
||||
// Get statistics for decoding. The statistics include the number of calls to
|
||||
// NetEq and silence generator, as well as the type of speech pulled of off
|
||||
// NetEq, c.f. declaration of AudioDecodingCallStats for detailed description.
|
||||
const AudioDecodingCallStats& GetDecodingStatistics() const;
|
||||
|
||||
private:
|
||||
// Reset the decoding statistics.
|
||||
void ResetDecodingStatistics();
|
||||
|
||||
AudioDecodingCallStats decoding_stat_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_CALL_STATISTICS_H_
|
||||
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/call_statistics.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
TEST(CallStatisticsTest, InitializedZero) {
|
||||
CallStatistics call_stats;
|
||||
AudioDecodingCallStats stats;
|
||||
|
||||
stats = call_stats.GetDecodingStatistics();
|
||||
EXPECT_EQ(0, stats.calls_to_neteq);
|
||||
EXPECT_EQ(0, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(0, stats.decoded_normal);
|
||||
EXPECT_EQ(0, stats.decoded_cng);
|
||||
EXPECT_EQ(0, stats.decoded_plc);
|
||||
EXPECT_EQ(0, stats.decoded_plc_cng);
|
||||
}
|
||||
|
||||
TEST(CallStatisticsTest, AllCalls) {
|
||||
CallStatistics call_stats;
|
||||
AudioDecodingCallStats stats;
|
||||
|
||||
call_stats.DecodedBySilenceGenerator();
|
||||
call_stats.DecodedByNetEq(AudioFrame::kNormalSpeech);
|
||||
call_stats.DecodedByNetEq(AudioFrame::kPLC);
|
||||
call_stats.DecodedByNetEq(AudioFrame::kPLCCNG);
|
||||
call_stats.DecodedByNetEq(AudioFrame::kCNG);
|
||||
|
||||
stats = call_stats.GetDecodingStatistics();
|
||||
EXPECT_EQ(4, stats.calls_to_neteq);
|
||||
EXPECT_EQ(1, stats.calls_to_silence_generator);
|
||||
EXPECT_EQ(1, stats.decoded_normal);
|
||||
EXPECT_EQ(1, stats.decoded_cng);
|
||||
EXPECT_EQ(1, stats.decoded_plc);
|
||||
EXPECT_EQ(1, stats.decoded_plc_cng);
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,242 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/initial_delay_manager.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
InitialDelayManager::InitialDelayManager(int initial_delay_ms,
|
||||
int late_packet_threshold)
|
||||
: last_packet_type_(kUndefinedPacket),
|
||||
last_receive_timestamp_(0),
|
||||
timestamp_step_(0),
|
||||
audio_payload_type_(kInvalidPayloadType),
|
||||
initial_delay_ms_(initial_delay_ms),
|
||||
buffered_audio_ms_(0),
|
||||
buffering_(true),
|
||||
playout_timestamp_(0),
|
||||
late_packet_threshold_(late_packet_threshold) {
|
||||
last_packet_rtp_info_.header.payloadType = kInvalidPayloadType;
|
||||
last_packet_rtp_info_.header.ssrc = 0;
|
||||
last_packet_rtp_info_.header.sequenceNumber = 0;
|
||||
last_packet_rtp_info_.header.timestamp = 0;
|
||||
}
|
||||
|
||||
void InitialDelayManager::UpdateLastReceivedPacket(
|
||||
const WebRtcRTPHeader& rtp_info,
|
||||
uint32_t receive_timestamp,
|
||||
PacketType type,
|
||||
bool new_codec,
|
||||
int sample_rate_hz,
|
||||
SyncStream* sync_stream) {
|
||||
assert(sync_stream);
|
||||
|
||||
// If payload of audio packets is changing |new_codec| has to be true.
|
||||
assert(!(!new_codec && type == kAudioPacket &&
|
||||
rtp_info.header.payloadType != audio_payload_type_));
|
||||
|
||||
// Just shorthands.
|
||||
const RTPHeader* current_header = &rtp_info.header;
|
||||
RTPHeader* last_header = &last_packet_rtp_info_.header;
|
||||
|
||||
// Don't do anything if getting DTMF. The chance of DTMF in applications where
|
||||
// initial delay is required is very low (we don't know of any). This avoids a
|
||||
// lot of corner cases. The effect of ignoring DTMF packet is minimal. Note
|
||||
// that DTMFs are inserted into NetEq just not accounted here.
|
||||
if (type == kAvtPacket ||
|
||||
(last_packet_type_ != kUndefinedPacket &&
|
||||
!IsNewerSequenceNumber(current_header->sequenceNumber,
|
||||
last_header->sequenceNumber))) {
|
||||
sync_stream->num_sync_packets = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
// Either if it is a new packet or the first packet record and set variables.
|
||||
if (new_codec ||
|
||||
last_packet_rtp_info_.header.payloadType == kInvalidPayloadType) {
|
||||
timestamp_step_ = 0;
|
||||
if (type == kAudioPacket)
|
||||
audio_payload_type_ = rtp_info.header.payloadType;
|
||||
else
|
||||
audio_payload_type_ = kInvalidPayloadType; // Invalid.
|
||||
|
||||
RecordLastPacket(rtp_info, receive_timestamp, type);
|
||||
sync_stream->num_sync_packets = 0;
|
||||
buffered_audio_ms_ = 0;
|
||||
buffering_ = true;
|
||||
|
||||
// If |buffering_| is set then |playout_timestamp_| should have correct
|
||||
// value.
|
||||
UpdatePlayoutTimestamp(*current_header, sample_rate_hz);
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t timestamp_increase = current_header->timestamp -
|
||||
last_header->timestamp;
|
||||
|
||||
// |timestamp_increase| is invalid if this is the first packet. The effect is
|
||||
// that |buffered_audio_ms_| is not increased.
|
||||
if (last_packet_type_ == kUndefinedPacket) {
|
||||
timestamp_increase = 0;
|
||||
}
|
||||
|
||||
if (buffering_) {
|
||||
buffered_audio_ms_ += timestamp_increase * 1000 / sample_rate_hz;
|
||||
|
||||
// A timestamp that reflects the initial delay, while buffering.
|
||||
UpdatePlayoutTimestamp(*current_header, sample_rate_hz);
|
||||
|
||||
if (buffered_audio_ms_ >= initial_delay_ms_)
|
||||
buffering_ = false;
|
||||
}
|
||||
|
||||
if (current_header->sequenceNumber == last_header->sequenceNumber + 1) {
|
||||
// Two consecutive audio packets, the previous packet-type is audio, so we
|
||||
// can update |timestamp_step_|.
|
||||
if (last_packet_type_ == kAudioPacket)
|
||||
timestamp_step_ = timestamp_increase;
|
||||
RecordLastPacket(rtp_info, receive_timestamp, type);
|
||||
sync_stream->num_sync_packets = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
uint16_t packet_gap = current_header->sequenceNumber -
|
||||
last_header->sequenceNumber - 1;
|
||||
|
||||
// For smooth transitions leave a gap between audio and sync packets.
|
||||
sync_stream->num_sync_packets = last_packet_type_ == kSyncPacket ?
|
||||
packet_gap - 1 : packet_gap - 2;
|
||||
|
||||
// Do nothing if we haven't received any audio packet.
|
||||
if (sync_stream->num_sync_packets > 0 &&
|
||||
audio_payload_type_ != kInvalidPayloadType) {
|
||||
if (timestamp_step_ == 0) {
|
||||
// Make an estimate for |timestamp_step_| if it is not updated, yet.
|
||||
assert(packet_gap > 0);
|
||||
timestamp_step_ = timestamp_increase / (packet_gap + 1);
|
||||
}
|
||||
sync_stream->timestamp_step = timestamp_step_;
|
||||
|
||||
// Build the first sync-packet based on the current received packet.
|
||||
memcpy(&sync_stream->rtp_info, &rtp_info, sizeof(rtp_info));
|
||||
sync_stream->rtp_info.header.payloadType = audio_payload_type_;
|
||||
|
||||
uint16_t sequence_number_update = sync_stream->num_sync_packets + 1;
|
||||
uint32_t timestamp_update = timestamp_step_ * sequence_number_update;
|
||||
|
||||
// Rewind sequence number and timestamps. This will give a more accurate
|
||||
// description of the missing packets.
|
||||
//
|
||||
// Note that we leave a gap between the last packet in sync-stream and the
|
||||
// current received packet, so it should be compensated for in the following
|
||||
// computation of timestamps and sequence number.
|
||||
sync_stream->rtp_info.header.sequenceNumber -= sequence_number_update;
|
||||
sync_stream->receive_timestamp = receive_timestamp - timestamp_update;
|
||||
sync_stream->rtp_info.header.timestamp -= timestamp_update;
|
||||
sync_stream->rtp_info.header.payloadType = audio_payload_type_;
|
||||
} else {
|
||||
sync_stream->num_sync_packets = 0;
|
||||
}
|
||||
|
||||
RecordLastPacket(rtp_info, receive_timestamp, type);
|
||||
return;
|
||||
}
|
||||
|
||||
void InitialDelayManager::RecordLastPacket(const WebRtcRTPHeader& rtp_info,
|
||||
uint32_t receive_timestamp,
|
||||
PacketType type) {
|
||||
last_packet_type_ = type;
|
||||
last_receive_timestamp_ = receive_timestamp;
|
||||
memcpy(&last_packet_rtp_info_, &rtp_info, sizeof(rtp_info));
|
||||
}
|
||||
|
||||
void InitialDelayManager::LatePackets(
|
||||
uint32_t timestamp_now, SyncStream* sync_stream) {
|
||||
assert(sync_stream);
|
||||
sync_stream->num_sync_packets = 0;
|
||||
|
||||
// If there is no estimate of timestamp increment, |timestamp_step_|, then
|
||||
// we cannot estimate the number of late packets.
|
||||
// If the last packet has been CNG, estimating late packets is not meaningful,
|
||||
// as a CNG packet is on unknown length.
|
||||
// We can set a higher threshold if the last packet is CNG and continue
|
||||
// execution, but this is how ACM1 code was written.
|
||||
if (timestamp_step_ <= 0 ||
|
||||
last_packet_type_ == kCngPacket ||
|
||||
last_packet_type_ == kUndefinedPacket ||
|
||||
audio_payload_type_ == kInvalidPayloadType) // No audio packet received.
|
||||
return;
|
||||
|
||||
int num_late_packets = (timestamp_now - last_receive_timestamp_) /
|
||||
timestamp_step_;
|
||||
|
||||
if (num_late_packets < late_packet_threshold_)
|
||||
return;
|
||||
|
||||
int sync_offset = 1; // One gap at the end of the sync-stream.
|
||||
if (last_packet_type_ != kSyncPacket) {
|
||||
++sync_offset; // One more gap at the beginning of the sync-stream.
|
||||
--num_late_packets;
|
||||
}
|
||||
uint32_t timestamp_update = sync_offset * timestamp_step_;
|
||||
|
||||
sync_stream->num_sync_packets = num_late_packets;
|
||||
if (num_late_packets == 0)
|
||||
return;
|
||||
|
||||
// Build the first sync-packet in the sync-stream.
|
||||
memcpy(&sync_stream->rtp_info, &last_packet_rtp_info_,
|
||||
sizeof(last_packet_rtp_info_));
|
||||
|
||||
// Increase sequence number and timestamps.
|
||||
sync_stream->rtp_info.header.sequenceNumber += sync_offset;
|
||||
sync_stream->rtp_info.header.timestamp += timestamp_update;
|
||||
sync_stream->receive_timestamp = last_receive_timestamp_ + timestamp_update;
|
||||
sync_stream->timestamp_step = timestamp_step_;
|
||||
|
||||
// Sync-packets have audio payload-type.
|
||||
sync_stream->rtp_info.header.payloadType = audio_payload_type_;
|
||||
|
||||
uint16_t sequence_number_update = num_late_packets + sync_offset - 1;
|
||||
timestamp_update = sequence_number_update * timestamp_step_;
|
||||
|
||||
// Fake the last RTP, assuming the caller will inject the whole sync-stream.
|
||||
last_packet_rtp_info_.header.timestamp += timestamp_update;
|
||||
last_packet_rtp_info_.header.sequenceNumber += sequence_number_update;
|
||||
last_packet_rtp_info_.header.payloadType = audio_payload_type_;
|
||||
last_receive_timestamp_ += timestamp_update;
|
||||
|
||||
last_packet_type_ = kSyncPacket;
|
||||
return;
|
||||
}
|
||||
|
||||
bool InitialDelayManager::GetPlayoutTimestamp(uint32_t* playout_timestamp) {
|
||||
if (!buffering_) {
|
||||
return false;
|
||||
}
|
||||
*playout_timestamp = playout_timestamp_;
|
||||
return true;
|
||||
}
|
||||
|
||||
void InitialDelayManager::DisableBuffering() {
|
||||
buffering_ = false;
|
||||
}
|
||||
|
||||
void InitialDelayManager::UpdatePlayoutTimestamp(
|
||||
const RTPHeader& current_header, int sample_rate_hz) {
|
||||
playout_timestamp_ = current_header.timestamp - static_cast<uint32_t>(
|
||||
initial_delay_ms_ * sample_rate_hz / 1000);
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
@@ -0,0 +1,120 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_INITIAL_DELAY_MANAGER_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_INITIAL_DELAY_MANAGER_H_
|
||||
|
||||
#include "webrtc/modules/interface/module_common_types.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class InitialDelayManager {
|
||||
public:
|
||||
enum PacketType {
|
||||
kUndefinedPacket, kCngPacket, kAvtPacket, kAudioPacket, kSyncPacket };
|
||||
|
||||
// Specifies a stream of sync-packets.
|
||||
struct SyncStream {
|
||||
SyncStream()
|
||||
: num_sync_packets(0),
|
||||
receive_timestamp(0),
|
||||
timestamp_step(0) {
|
||||
memset(&rtp_info, 0, sizeof(rtp_info));
|
||||
}
|
||||
|
||||
int num_sync_packets;
|
||||
|
||||
// RTP header of the first sync-packet in the sequence.
|
||||
WebRtcRTPHeader rtp_info;
|
||||
|
||||
// Received timestamp of the first sync-packet in the sequence.
|
||||
uint32_t receive_timestamp;
|
||||
|
||||
// Samples per packet.
|
||||
uint32_t timestamp_step;
|
||||
};
|
||||
|
||||
InitialDelayManager(int initial_delay_ms, int late_packet_threshold);
|
||||
|
||||
// Update with the last received RTP header, |header|, and received timestamp,
|
||||
// |received_timestamp|. |type| indicates the packet type. If codec is changed
|
||||
// since the last time |new_codec| should be true. |sample_rate_hz| is the
|
||||
// decoder's sampling rate in Hz. |header| has a field to store sampling rate
|
||||
// but we are not sure if that is properly set at the send side, and |header|
|
||||
// is declared constant in the caller of this function
|
||||
// (AcmReceiver::InsertPacket()). |sync_stream| contains information required
|
||||
// to generate a stream of sync packets.
|
||||
void UpdateLastReceivedPacket(const WebRtcRTPHeader& header,
|
||||
uint32_t receive_timestamp,
|
||||
PacketType type,
|
||||
bool new_codec,
|
||||
int sample_rate_hz,
|
||||
SyncStream* sync_stream);
|
||||
|
||||
// Based on the last received timestamp and given the current timestamp,
|
||||
// sequence of late (or perhaps missing) packets is computed.
|
||||
void LatePackets(uint32_t timestamp_now, SyncStream* sync_stream);
|
||||
|
||||
// Get playout timestamp.
|
||||
// Returns true if the timestamp is valid (when buffering), otherwise false.
|
||||
bool GetPlayoutTimestamp(uint32_t* playout_timestamp);
|
||||
|
||||
// True if buffered audio is less than the given initial delay (specified at
|
||||
// the constructor). Buffering might be disabled by the client of this class.
|
||||
bool buffering() { return buffering_; }
|
||||
|
||||
// Disable buffering in the class.
|
||||
void DisableBuffering();
|
||||
|
||||
// True if any packet received for buffering.
|
||||
bool PacketBuffered() { return last_packet_type_ != kUndefinedPacket; }
|
||||
|
||||
private:
|
||||
static const uint8_t kInvalidPayloadType = 0xFF;
|
||||
|
||||
// Update playout timestamps. While buffering, this is about
|
||||
// |initial_delay_ms| millisecond behind the latest received timestamp.
|
||||
void UpdatePlayoutTimestamp(const RTPHeader& current_header,
|
||||
int sample_rate_hz);
|
||||
|
||||
// Record an RTP headr and related parameter
|
||||
void RecordLastPacket(const WebRtcRTPHeader& rtp_info,
|
||||
uint32_t receive_timestamp,
|
||||
PacketType type);
|
||||
|
||||
PacketType last_packet_type_;
|
||||
WebRtcRTPHeader last_packet_rtp_info_;
|
||||
uint32_t last_receive_timestamp_;
|
||||
uint32_t timestamp_step_;
|
||||
uint8_t audio_payload_type_;
|
||||
const int initial_delay_ms_;
|
||||
int buffered_audio_ms_;
|
||||
bool buffering_;
|
||||
|
||||
// During the initial phase where packets are being accumulated and silence
|
||||
// is played out, |playout_ts| is a timestamp which is equal to
|
||||
// |initial_delay_ms_| milliseconds earlier than the most recently received
|
||||
// RTP timestamp.
|
||||
uint32_t playout_timestamp_;
|
||||
|
||||
// If the number of late packets exceed this value (computed based on current
|
||||
// timestamp and last received timestamp), sequence of sync-packets is
|
||||
// specified.
|
||||
const int late_packet_threshold_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_INITIAL_DELAY_MANAGER_H_
|
||||
@@ -0,0 +1,377 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "webrtc/modules/audio_coding/main/acm2/initial_delay_manager.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
namespace {
|
||||
|
||||
const uint8_t kAudioPayloadType = 0;
|
||||
const uint8_t kCngPayloadType = 1;
|
||||
const uint8_t kAvtPayloadType = 2;
|
||||
|
||||
const int kSamplingRateHz = 16000;
|
||||
const int kInitDelayMs = 200;
|
||||
const int kFrameSizeMs = 20;
|
||||
const uint32_t kTimestampStep = kFrameSizeMs * kSamplingRateHz / 1000;
|
||||
const int kLatePacketThreshold = 5;
|
||||
|
||||
void InitRtpInfo(WebRtcRTPHeader* rtp_info) {
|
||||
memset(rtp_info, 0, sizeof(*rtp_info));
|
||||
rtp_info->header.markerBit = false;
|
||||
rtp_info->header.payloadType = kAudioPayloadType;
|
||||
rtp_info->header.sequenceNumber = 1234;
|
||||
rtp_info->header.timestamp = 0xFFFFFFFD; // Close to wrap around.
|
||||
rtp_info->header.ssrc = 0x87654321; // Arbitrary.
|
||||
rtp_info->header.numCSRCs = 0; // Arbitrary.
|
||||
rtp_info->header.paddingLength = 0;
|
||||
rtp_info->header.headerLength = sizeof(RTPHeader);
|
||||
rtp_info->header.payload_type_frequency = kSamplingRateHz;
|
||||
rtp_info->header.extension.absoluteSendTime = 0;
|
||||
rtp_info->header.extension.transmissionTimeOffset = 0;
|
||||
rtp_info->frameType = kAudioFrameSpeech;
|
||||
}
|
||||
|
||||
void ForwardRtpHeader(int n,
|
||||
WebRtcRTPHeader* rtp_info,
|
||||
uint32_t* rtp_receive_timestamp) {
|
||||
rtp_info->header.sequenceNumber += n;
|
||||
rtp_info->header.timestamp += n * kTimestampStep;
|
||||
*rtp_receive_timestamp += n * kTimestampStep;
|
||||
}
|
||||
|
||||
void NextRtpHeader(WebRtcRTPHeader* rtp_info,
|
||||
uint32_t* rtp_receive_timestamp) {
|
||||
ForwardRtpHeader(1, rtp_info, rtp_receive_timestamp);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
class InitialDelayManagerTest : public ::testing::Test {
|
||||
protected:
|
||||
InitialDelayManagerTest()
|
||||
: manager_(new InitialDelayManager(kInitDelayMs, kLatePacketThreshold)),
|
||||
rtp_receive_timestamp_(1111) { } // Arbitrary starting point.
|
||||
|
||||
virtual void SetUp() {
|
||||
ASSERT_TRUE(manager_.get() != NULL);
|
||||
InitRtpInfo(&rtp_info_);
|
||||
}
|
||||
|
||||
void GetNextRtpHeader(WebRtcRTPHeader* rtp_info,
|
||||
uint32_t* rtp_receive_timestamp) const {
|
||||
memcpy(rtp_info, &rtp_info_, sizeof(*rtp_info));
|
||||
*rtp_receive_timestamp = rtp_receive_timestamp_;
|
||||
NextRtpHeader(rtp_info, rtp_receive_timestamp);
|
||||
}
|
||||
|
||||
scoped_ptr<InitialDelayManager> manager_;
|
||||
WebRtcRTPHeader rtp_info_;
|
||||
uint32_t rtp_receive_timestamp_;
|
||||
};
|
||||
|
||||
TEST_F(InitialDelayManagerTest, Init) {
|
||||
EXPECT_TRUE(manager_->buffering());
|
||||
EXPECT_FALSE(manager_->PacketBuffered());
|
||||
manager_->DisableBuffering();
|
||||
EXPECT_FALSE(manager_->buffering());
|
||||
InitialDelayManager::SyncStream sync_stream;
|
||||
|
||||
// Call before any packet inserted.
|
||||
manager_->LatePackets(0x6789ABCD, &sync_stream); // Arbitrary but large
|
||||
// receive timestamp.
|
||||
EXPECT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Insert non-audio packets, a CNG and DTMF.
|
||||
rtp_info_.header.payloadType = kCngPayloadType;
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kCngPacket, false,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
EXPECT_EQ(0, sync_stream.num_sync_packets);
|
||||
ForwardRtpHeader(5, &rtp_info_, &rtp_receive_timestamp_);
|
||||
rtp_info_.header.payloadType = kAvtPayloadType;
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAvtPacket, false,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
// Gap in sequence numbers but no audio received, sync-stream should be empty.
|
||||
EXPECT_EQ(0, sync_stream.num_sync_packets);
|
||||
manager_->LatePackets(0x45678987, &sync_stream); // Large arbitrary receive
|
||||
// timestamp.
|
||||
// |manager_| has no estimate of timestamp-step and has not received any
|
||||
// audio packet.
|
||||
EXPECT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
|
||||
NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
|
||||
rtp_info_.header.payloadType = kAudioPayloadType;
|
||||
// First packet.
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, true,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
EXPECT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Call LatePAcket() after only one packet inserted.
|
||||
manager_->LatePackets(0x6789ABCD, &sync_stream); // Arbitrary but large
|
||||
// receive timestamp.
|
||||
EXPECT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Gap in timestamp, but this packet is also flagged as "new," therefore,
|
||||
// expecting empty sync-stream.
|
||||
ForwardRtpHeader(5, &rtp_info_, &rtp_receive_timestamp_);
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, true,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
}
|
||||
|
||||
TEST_F(InitialDelayManagerTest, MissingPacket) {
|
||||
InitialDelayManager::SyncStream sync_stream;
|
||||
// First packet.
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, true,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
ASSERT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Second packet.
|
||||
NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, false,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
ASSERT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Third packet, missing packets start from here.
|
||||
NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
|
||||
|
||||
// First sync-packet in sync-stream is one after the above packet.
|
||||
WebRtcRTPHeader expected_rtp_info;
|
||||
uint32_t expected_receive_timestamp;
|
||||
GetNextRtpHeader(&expected_rtp_info, &expected_receive_timestamp);
|
||||
|
||||
const int kNumMissingPackets = 10;
|
||||
ForwardRtpHeader(kNumMissingPackets, &rtp_info_, &rtp_receive_timestamp_);
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, false,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
EXPECT_EQ(kNumMissingPackets - 2, sync_stream.num_sync_packets);
|
||||
EXPECT_EQ(0, memcmp(&expected_rtp_info, &sync_stream.rtp_info,
|
||||
sizeof(expected_rtp_info)));
|
||||
EXPECT_EQ(kTimestampStep, sync_stream.timestamp_step);
|
||||
EXPECT_EQ(expected_receive_timestamp, sync_stream.receive_timestamp);
|
||||
}
|
||||
|
||||
// There hasn't been any consecutive packets to estimate timestamp-step.
|
||||
TEST_F(InitialDelayManagerTest, MissingPacketEstimateTimestamp) {
|
||||
InitialDelayManager::SyncStream sync_stream;
|
||||
// First packet.
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, true,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
ASSERT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Second packet, missing packets start here.
|
||||
NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
|
||||
|
||||
// First sync-packet in sync-stream is one after the above.
|
||||
WebRtcRTPHeader expected_rtp_info;
|
||||
uint32_t expected_receive_timestamp;
|
||||
GetNextRtpHeader(&expected_rtp_info, &expected_receive_timestamp);
|
||||
|
||||
const int kNumMissingPackets = 10;
|
||||
ForwardRtpHeader(kNumMissingPackets, &rtp_info_, &rtp_receive_timestamp_);
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, false,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
EXPECT_EQ(kNumMissingPackets - 2, sync_stream.num_sync_packets);
|
||||
EXPECT_EQ(0, memcmp(&expected_rtp_info, &sync_stream.rtp_info,
|
||||
sizeof(expected_rtp_info)));
|
||||
}
|
||||
|
||||
TEST_F(InitialDelayManagerTest, MissingPacketWithCng) {
|
||||
InitialDelayManager::SyncStream sync_stream;
|
||||
|
||||
// First packet.
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, true,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
ASSERT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Second packet as CNG.
|
||||
NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
|
||||
rtp_info_.header.payloadType = kCngPayloadType;
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kCngPacket, false,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
ASSERT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Audio packet after CNG. Missing packets start from this packet.
|
||||
rtp_info_.header.payloadType = kAudioPayloadType;
|
||||
NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
|
||||
|
||||
// Timestamps are increased higher than regular packet.
|
||||
const uint32_t kCngTimestampStep = 5 * kTimestampStep;
|
||||
rtp_info_.header.timestamp += kCngTimestampStep;
|
||||
rtp_receive_timestamp_ += kCngTimestampStep;
|
||||
|
||||
// First sync-packet in sync-stream is the one after the above packet.
|
||||
WebRtcRTPHeader expected_rtp_info;
|
||||
uint32_t expected_receive_timestamp;
|
||||
GetNextRtpHeader(&expected_rtp_info, &expected_receive_timestamp);
|
||||
|
||||
const int kNumMissingPackets = 10;
|
||||
ForwardRtpHeader(kNumMissingPackets, &rtp_info_, &rtp_receive_timestamp_);
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, false,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
EXPECT_EQ(kNumMissingPackets - 2, sync_stream.num_sync_packets);
|
||||
EXPECT_EQ(0, memcmp(&expected_rtp_info, &sync_stream.rtp_info,
|
||||
sizeof(expected_rtp_info)));
|
||||
EXPECT_EQ(kTimestampStep, sync_stream.timestamp_step);
|
||||
EXPECT_EQ(expected_receive_timestamp, sync_stream.receive_timestamp);
|
||||
}
|
||||
|
||||
TEST_F(InitialDelayManagerTest, LatePacket) {
|
||||
InitialDelayManager::SyncStream sync_stream;
|
||||
// First packet.
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, true,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
ASSERT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Second packet.
|
||||
NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, false,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
ASSERT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Timestamp increment for 10ms;
|
||||
const uint32_t kTimestampStep10Ms = kSamplingRateHz / 100;
|
||||
|
||||
// 10 ms after the second packet is inserted.
|
||||
uint32_t timestamp_now = rtp_receive_timestamp_ + kTimestampStep10Ms;
|
||||
|
||||
// Third packet, late packets start from this packet.
|
||||
NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
|
||||
|
||||
// First sync-packet in sync-stream, which is one after the above packet.
|
||||
WebRtcRTPHeader expected_rtp_info;
|
||||
uint32_t expected_receive_timestamp;
|
||||
GetNextRtpHeader(&expected_rtp_info, &expected_receive_timestamp);
|
||||
|
||||
const int kLatePacketThreshold = 5;
|
||||
|
||||
int expected_num_late_packets = kLatePacketThreshold - 1;
|
||||
for (int k = 0; k < 2; ++k) {
|
||||
for (int n = 1; n < kLatePacketThreshold * kFrameSizeMs / 10; ++n) {
|
||||
manager_->LatePackets(timestamp_now, &sync_stream);
|
||||
EXPECT_EQ(0, sync_stream.num_sync_packets) <<
|
||||
"try " << k << " loop number " << n;
|
||||
timestamp_now += kTimestampStep10Ms;
|
||||
}
|
||||
manager_->LatePackets(timestamp_now, &sync_stream);
|
||||
|
||||
EXPECT_EQ(expected_num_late_packets, sync_stream.num_sync_packets) <<
|
||||
"try " << k;
|
||||
EXPECT_EQ(kTimestampStep, sync_stream.timestamp_step) <<
|
||||
"try " << k;
|
||||
EXPECT_EQ(expected_receive_timestamp, sync_stream.receive_timestamp) <<
|
||||
"try " << k;
|
||||
EXPECT_EQ(0, memcmp(&expected_rtp_info, &sync_stream.rtp_info,
|
||||
sizeof(expected_rtp_info)));
|
||||
|
||||
timestamp_now += kTimestampStep10Ms;
|
||||
|
||||
// |manger_| assumes the |sync_stream| obtained by LatePacket() is fully
|
||||
// injected. The last injected packet is sync-packet, therefore, there will
|
||||
// not be any gap between sync stream of this and the next iteration.
|
||||
ForwardRtpHeader(sync_stream.num_sync_packets, &expected_rtp_info,
|
||||
&expected_receive_timestamp);
|
||||
expected_num_late_packets = kLatePacketThreshold;
|
||||
}
|
||||
|
||||
// Test "no-gap" for missing packet after late packet.
|
||||
// |expected_rtp_info| is the expected sync-packet if any packet is missing.
|
||||
memcpy(&rtp_info_, &expected_rtp_info, sizeof(rtp_info_));
|
||||
rtp_receive_timestamp_ = expected_receive_timestamp;
|
||||
|
||||
int kNumMissingPackets = 3; // Arbitrary.
|
||||
ForwardRtpHeader(kNumMissingPackets, &rtp_info_, &rtp_receive_timestamp_);
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, false,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
|
||||
// Note that there is one packet gap between the last sync-packet and the
|
||||
// latest inserted packet.
|
||||
EXPECT_EQ(kNumMissingPackets - 1, sync_stream.num_sync_packets);
|
||||
EXPECT_EQ(kTimestampStep, sync_stream.timestamp_step);
|
||||
EXPECT_EQ(expected_receive_timestamp, sync_stream.receive_timestamp);
|
||||
EXPECT_EQ(0, memcmp(&expected_rtp_info, &sync_stream.rtp_info,
|
||||
sizeof(expected_rtp_info)));
|
||||
}
|
||||
|
||||
TEST_F(InitialDelayManagerTest, NoLatePacketAfterCng) {
|
||||
InitialDelayManager::SyncStream sync_stream;
|
||||
|
||||
// First packet.
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket, true,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
ASSERT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Second packet as CNG.
|
||||
NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
|
||||
const uint8_t kCngPayloadType = 1; // Arbitrary.
|
||||
rtp_info_.header.payloadType = kCngPayloadType;
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kCngPacket, false,
|
||||
kSamplingRateHz, &sync_stream);
|
||||
ASSERT_EQ(0, sync_stream.num_sync_packets);
|
||||
|
||||
// Forward the time more then |kLatePacketThreshold| packets.
|
||||
uint32_t timestamp_now = rtp_receive_timestamp_ + kTimestampStep * (3 +
|
||||
kLatePacketThreshold);
|
||||
|
||||
manager_->LatePackets(timestamp_now, &sync_stream);
|
||||
EXPECT_EQ(0, sync_stream.num_sync_packets);
|
||||
}
|
||||
|
||||
TEST_F(InitialDelayManagerTest, BufferingAudio) {
|
||||
InitialDelayManager::SyncStream sync_stream;
|
||||
|
||||
// Very first packet is not counted in calculation of buffered audio.
|
||||
for (int n = 0; n < kInitDelayMs / kFrameSizeMs; ++n) {
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket,
|
||||
n == 0, kSamplingRateHz, &sync_stream);
|
||||
EXPECT_EQ(0, sync_stream.num_sync_packets);
|
||||
EXPECT_TRUE(manager_->buffering());
|
||||
const uint32_t expected_playout_timestamp = rtp_info_.header.timestamp -
|
||||
kInitDelayMs * kSamplingRateHz / 1000;
|
||||
uint32_t actual_playout_timestamp = 0;
|
||||
EXPECT_TRUE(manager_->GetPlayoutTimestamp(&actual_playout_timestamp));
|
||||
EXPECT_EQ(expected_playout_timestamp, actual_playout_timestamp);
|
||||
NextRtpHeader(&rtp_info_, &rtp_receive_timestamp_);
|
||||
}
|
||||
|
||||
manager_->UpdateLastReceivedPacket(rtp_info_, rtp_receive_timestamp_,
|
||||
InitialDelayManager::kAudioPacket,
|
||||
false, kSamplingRateHz, &sync_stream);
|
||||
EXPECT_EQ(0, sync_stream.num_sync_packets);
|
||||
EXPECT_FALSE(manager_->buffering());
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
229
jni/webrtc/modules/audio_coding/main/acm2/nack.cc
Normal file
229
jni/webrtc/modules/audio_coding/main/acm2/nack.cc
Normal file
@@ -0,0 +1,229 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/nack.h"
|
||||
|
||||
#include <assert.h> // For assert.
|
||||
|
||||
#include <algorithm> // For std::max.
|
||||
|
||||
#include "webrtc/modules/interface/module_common_types.h"
|
||||
#include "webrtc/system_wrappers/interface/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
namespace {
|
||||
|
||||
const int kDefaultSampleRateKhz = 48;
|
||||
const int kDefaultPacketSizeMs = 20;
|
||||
|
||||
} // namespace
|
||||
|
||||
Nack::Nack(int nack_threshold_packets)
|
||||
: nack_threshold_packets_(nack_threshold_packets),
|
||||
sequence_num_last_received_rtp_(0),
|
||||
timestamp_last_received_rtp_(0),
|
||||
any_rtp_received_(false),
|
||||
sequence_num_last_decoded_rtp_(0),
|
||||
timestamp_last_decoded_rtp_(0),
|
||||
any_rtp_decoded_(false),
|
||||
sample_rate_khz_(kDefaultSampleRateKhz),
|
||||
samples_per_packet_(sample_rate_khz_ * kDefaultPacketSizeMs),
|
||||
max_nack_list_size_(kNackListSizeLimit) {}
|
||||
|
||||
Nack* Nack::Create(int nack_threshold_packets) {
|
||||
return new Nack(nack_threshold_packets);
|
||||
}
|
||||
|
||||
void Nack::UpdateSampleRate(int sample_rate_hz) {
|
||||
assert(sample_rate_hz > 0);
|
||||
sample_rate_khz_ = sample_rate_hz / 1000;
|
||||
}
|
||||
|
||||
void Nack::UpdateLastReceivedPacket(uint16_t sequence_number,
|
||||
uint32_t timestamp) {
|
||||
// Just record the value of sequence number and timestamp if this is the
|
||||
// first packet.
|
||||
if (!any_rtp_received_) {
|
||||
sequence_num_last_received_rtp_ = sequence_number;
|
||||
timestamp_last_received_rtp_ = timestamp;
|
||||
any_rtp_received_ = true;
|
||||
// If no packet is decoded, to have a reasonable estimate of time-to-play
|
||||
// use the given values.
|
||||
if (!any_rtp_decoded_) {
|
||||
sequence_num_last_decoded_rtp_ = sequence_number;
|
||||
timestamp_last_decoded_rtp_ = timestamp;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (sequence_number == sequence_num_last_received_rtp_)
|
||||
return;
|
||||
|
||||
// Received RTP should not be in the list.
|
||||
nack_list_.erase(sequence_number);
|
||||
|
||||
// If this is an old sequence number, no more action is required, return.
|
||||
if (IsNewerSequenceNumber(sequence_num_last_received_rtp_, sequence_number))
|
||||
return;
|
||||
|
||||
UpdateSamplesPerPacket(sequence_number, timestamp);
|
||||
|
||||
UpdateList(sequence_number);
|
||||
|
||||
sequence_num_last_received_rtp_ = sequence_number;
|
||||
timestamp_last_received_rtp_ = timestamp;
|
||||
LimitNackListSize();
|
||||
}
|
||||
|
||||
void Nack::UpdateSamplesPerPacket(uint16_t sequence_number_current_received_rtp,
|
||||
uint32_t timestamp_current_received_rtp) {
|
||||
uint32_t timestamp_increase = timestamp_current_received_rtp -
|
||||
timestamp_last_received_rtp_;
|
||||
uint16_t sequence_num_increase = sequence_number_current_received_rtp -
|
||||
sequence_num_last_received_rtp_;
|
||||
|
||||
samples_per_packet_ = timestamp_increase / sequence_num_increase;
|
||||
}
|
||||
|
||||
void Nack::UpdateList(uint16_t sequence_number_current_received_rtp) {
|
||||
// Some of the packets which were considered late, now are considered missing.
|
||||
ChangeFromLateToMissing(sequence_number_current_received_rtp);
|
||||
|
||||
if (IsNewerSequenceNumber(sequence_number_current_received_rtp,
|
||||
sequence_num_last_received_rtp_ + 1))
|
||||
AddToList(sequence_number_current_received_rtp);
|
||||
}
|
||||
|
||||
void Nack::ChangeFromLateToMissing(
|
||||
uint16_t sequence_number_current_received_rtp) {
|
||||
NackList::const_iterator lower_bound = nack_list_.lower_bound(
|
||||
static_cast<uint16_t>(sequence_number_current_received_rtp -
|
||||
nack_threshold_packets_));
|
||||
|
||||
for (NackList::iterator it = nack_list_.begin(); it != lower_bound; ++it)
|
||||
it->second.is_missing = true;
|
||||
}
|
||||
|
||||
uint32_t Nack::EstimateTimestamp(uint16_t sequence_num) {
|
||||
uint16_t sequence_num_diff = sequence_num - sequence_num_last_received_rtp_;
|
||||
return sequence_num_diff * samples_per_packet_ + timestamp_last_received_rtp_;
|
||||
}
|
||||
|
||||
void Nack::AddToList(uint16_t sequence_number_current_received_rtp) {
|
||||
assert(!any_rtp_decoded_ || IsNewerSequenceNumber(
|
||||
sequence_number_current_received_rtp, sequence_num_last_decoded_rtp_));
|
||||
|
||||
// Packets with sequence numbers older than |upper_bound_missing| are
|
||||
// considered missing, and the rest are considered late.
|
||||
uint16_t upper_bound_missing = sequence_number_current_received_rtp -
|
||||
nack_threshold_packets_;
|
||||
|
||||
for (uint16_t n = sequence_num_last_received_rtp_ + 1;
|
||||
IsNewerSequenceNumber(sequence_number_current_received_rtp, n); ++n) {
|
||||
bool is_missing = IsNewerSequenceNumber(upper_bound_missing, n);
|
||||
uint32_t timestamp = EstimateTimestamp(n);
|
||||
NackElement nack_element(TimeToPlay(timestamp), timestamp, is_missing);
|
||||
nack_list_.insert(nack_list_.end(), std::make_pair(n, nack_element));
|
||||
}
|
||||
}
|
||||
|
||||
void Nack::UpdateEstimatedPlayoutTimeBy10ms() {
|
||||
while (!nack_list_.empty() &&
|
||||
nack_list_.begin()->second.time_to_play_ms <= 10)
|
||||
nack_list_.erase(nack_list_.begin());
|
||||
|
||||
for (NackList::iterator it = nack_list_.begin(); it != nack_list_.end(); ++it)
|
||||
it->second.time_to_play_ms -= 10;
|
||||
}
|
||||
|
||||
void Nack::UpdateLastDecodedPacket(uint16_t sequence_number,
|
||||
uint32_t timestamp) {
|
||||
if (IsNewerSequenceNumber(sequence_number, sequence_num_last_decoded_rtp_) ||
|
||||
!any_rtp_decoded_) {
|
||||
sequence_num_last_decoded_rtp_ = sequence_number;
|
||||
timestamp_last_decoded_rtp_ = timestamp;
|
||||
// Packets in the list with sequence numbers less than the
|
||||
// sequence number of the decoded RTP should be removed from the lists.
|
||||
// They will be discarded by the jitter buffer if they arrive.
|
||||
nack_list_.erase(nack_list_.begin(), nack_list_.upper_bound(
|
||||
sequence_num_last_decoded_rtp_));
|
||||
|
||||
// Update estimated time-to-play.
|
||||
for (NackList::iterator it = nack_list_.begin(); it != nack_list_.end();
|
||||
++it)
|
||||
it->second.time_to_play_ms = TimeToPlay(it->second.estimated_timestamp);
|
||||
} else {
|
||||
assert(sequence_number == sequence_num_last_decoded_rtp_);
|
||||
|
||||
// Same sequence number as before. 10 ms is elapsed, update estimations for
|
||||
// time-to-play.
|
||||
UpdateEstimatedPlayoutTimeBy10ms();
|
||||
|
||||
// Update timestamp for better estimate of time-to-play, for packets which
|
||||
// are added to NACK list later on.
|
||||
timestamp_last_decoded_rtp_ += sample_rate_khz_ * 10;
|
||||
}
|
||||
any_rtp_decoded_ = true;
|
||||
}
|
||||
|
||||
Nack::NackList Nack::GetNackList() const {
|
||||
return nack_list_;
|
||||
}
|
||||
|
||||
void Nack::Reset() {
|
||||
nack_list_.clear();
|
||||
|
||||
sequence_num_last_received_rtp_ = 0;
|
||||
timestamp_last_received_rtp_ = 0;
|
||||
any_rtp_received_ = false;
|
||||
sequence_num_last_decoded_rtp_ = 0;
|
||||
timestamp_last_decoded_rtp_ = 0;
|
||||
any_rtp_decoded_ = false;
|
||||
sample_rate_khz_ = kDefaultSampleRateKhz;
|
||||
samples_per_packet_ = sample_rate_khz_ * kDefaultPacketSizeMs;
|
||||
}
|
||||
|
||||
int Nack::SetMaxNackListSize(size_t max_nack_list_size) {
|
||||
if (max_nack_list_size == 0 || max_nack_list_size > kNackListSizeLimit)
|
||||
return -1;
|
||||
max_nack_list_size_ = max_nack_list_size;
|
||||
LimitNackListSize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void Nack::LimitNackListSize() {
|
||||
uint16_t limit = sequence_num_last_received_rtp_ -
|
||||
static_cast<uint16_t>(max_nack_list_size_) - 1;
|
||||
nack_list_.erase(nack_list_.begin(), nack_list_.upper_bound(limit));
|
||||
}
|
||||
|
||||
int Nack::TimeToPlay(uint32_t timestamp) const {
|
||||
uint32_t timestamp_increase = timestamp - timestamp_last_decoded_rtp_;
|
||||
return timestamp_increase / sample_rate_khz_;
|
||||
}
|
||||
|
||||
// We don't erase elements with time-to-play shorter than round-trip-time.
|
||||
std::vector<uint16_t> Nack::GetNackList(int round_trip_time_ms) const {
|
||||
std::vector<uint16_t> sequence_numbers;
|
||||
for (NackList::const_iterator it = nack_list_.begin(); it != nack_list_.end();
|
||||
++it) {
|
||||
if (it->second.is_missing &&
|
||||
it->second.time_to_play_ms > round_trip_time_ms)
|
||||
sequence_numbers.push_back(it->first);
|
||||
}
|
||||
return sequence_numbers;
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
213
jni/webrtc/modules/audio_coding/main/acm2/nack.h
Normal file
213
jni/webrtc/modules/audio_coding/main/acm2/nack.h
Normal file
@@ -0,0 +1,213 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_NACK_H_
|
||||
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_NACK_H_
|
||||
|
||||
#include <vector>
|
||||
#include <map>
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
#include "webrtc/test/testsupport/gtest_prod_util.h"
|
||||
|
||||
//
|
||||
// The Nack class keeps track of the lost packets, an estimate of time-to-play
|
||||
// for each packet is also given.
|
||||
//
|
||||
// Every time a packet is pushed into NetEq, LastReceivedPacket() has to be
|
||||
// called to update the NACK list.
|
||||
//
|
||||
// Every time 10ms audio is pulled from NetEq LastDecodedPacket() should be
|
||||
// called, and time-to-play is updated at that moment.
|
||||
//
|
||||
// If packet N is received, any packet prior to |N - NackThreshold| which is not
|
||||
// arrived is considered lost, and should be labeled as "missing" (the size of
|
||||
// the list might be limited and older packet eliminated from the list). Packets
|
||||
// |N - NackThreshold|, |N - NackThreshold + 1|, ..., |N - 1| are considered
|
||||
// "late." A "late" packet with sequence number K is changed to "missing" any
|
||||
// time a packet with sequence number newer than |K + NackList| is arrived.
|
||||
//
|
||||
// The Nack class has to know about the sample rate of the packets to compute
|
||||
// time-to-play. So sample rate should be set as soon as the first packet is
|
||||
// received. If there is a change in the receive codec (sender changes codec)
|
||||
// then Nack should be reset. This is because NetEQ would flush its buffer and
|
||||
// re-transmission is meaning less for old packet. Therefore, in that case,
|
||||
// after reset the sampling rate has to be updated.
|
||||
//
|
||||
// Thread Safety
|
||||
// =============
|
||||
// Please note that this class in not thread safe. The class must be protected
|
||||
// if different APIs are called from different threads.
|
||||
//
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
class Nack {
|
||||
public:
|
||||
// A limit for the size of the NACK list.
|
||||
static const size_t kNackListSizeLimit = 500; // 10 seconds for 20 ms frame
|
||||
// packets.
|
||||
// Factory method.
|
||||
static Nack* Create(int nack_threshold_packets);
|
||||
|
||||
~Nack() {}
|
||||
|
||||
// Set a maximum for the size of the NACK list. If the last received packet
|
||||
// has sequence number of N, then NACK list will not contain any element
|
||||
// with sequence number earlier than N - |max_nack_list_size|.
|
||||
//
|
||||
// The largest maximum size is defined by |kNackListSizeLimit|
|
||||
int SetMaxNackListSize(size_t max_nack_list_size);
|
||||
|
||||
// Set the sampling rate.
|
||||
//
|
||||
// If associated sampling rate of the received packets is changed, call this
|
||||
// function to update sampling rate. Note that if there is any change in
|
||||
// received codec then NetEq will flush its buffer and NACK has to be reset.
|
||||
// After Reset() is called sampling rate has to be set.
|
||||
void UpdateSampleRate(int sample_rate_hz);
|
||||
|
||||
// Update the sequence number and the timestamp of the last decoded RTP. This
|
||||
// API should be called every time 10 ms audio is pulled from NetEq.
|
||||
void UpdateLastDecodedPacket(uint16_t sequence_number, uint32_t timestamp);
|
||||
|
||||
// Update the sequence number and the timestamp of the last received RTP. This
|
||||
// API should be called every time a packet pushed into ACM.
|
||||
void UpdateLastReceivedPacket(uint16_t sequence_number, uint32_t timestamp);
|
||||
|
||||
// Get a list of "missing" packets which have expected time-to-play larger
|
||||
// than the given round-trip-time (in milliseconds).
|
||||
// Note: Late packets are not included.
|
||||
std::vector<uint16_t> GetNackList(int round_trip_time_ms) const;
|
||||
|
||||
// Reset to default values. The NACK list is cleared.
|
||||
// |nack_threshold_packets_| & |max_nack_list_size_| preserve their values.
|
||||
void Reset();
|
||||
|
||||
private:
|
||||
// This test need to access the private method GetNackList().
|
||||
FRIEND_TEST_ALL_PREFIXES(NackTest, EstimateTimestampAndTimeToPlay);
|
||||
|
||||
struct NackElement {
|
||||
NackElement(int initial_time_to_play_ms,
|
||||
uint32_t initial_timestamp,
|
||||
bool missing)
|
||||
: time_to_play_ms(initial_time_to_play_ms),
|
||||
estimated_timestamp(initial_timestamp),
|
||||
is_missing(missing) {}
|
||||
|
||||
// Estimated time (ms) left for this packet to be decoded. This estimate is
|
||||
// updated every time jitter buffer decodes a packet.
|
||||
int time_to_play_ms;
|
||||
|
||||
// A guess about the timestamp of the missing packet, it is used for
|
||||
// estimation of |time_to_play_ms|. The estimate might be slightly wrong if
|
||||
// there has been frame-size change since the last received packet and the
|
||||
// missing packet. However, the risk of this is low, and in case of such
|
||||
// errors, there will be a minor misestimation in time-to-play of missing
|
||||
// packets. This will have a very minor effect on NACK performance.
|
||||
uint32_t estimated_timestamp;
|
||||
|
||||
// True if the packet is considered missing. Otherwise indicates packet is
|
||||
// late.
|
||||
bool is_missing;
|
||||
};
|
||||
|
||||
class NackListCompare {
|
||||
public:
|
||||
bool operator() (uint16_t sequence_number_old,
|
||||
uint16_t sequence_number_new) const {
|
||||
return IsNewerSequenceNumber(sequence_number_new, sequence_number_old);
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<uint16_t, NackElement, NackListCompare> NackList;
|
||||
|
||||
// Constructor.
|
||||
explicit Nack(int nack_threshold_packets);
|
||||
|
||||
// This API is used only for testing to assess whether time-to-play is
|
||||
// computed correctly.
|
||||
NackList GetNackList() const;
|
||||
|
||||
// Given the |sequence_number_current_received_rtp| of currently received RTP,
|
||||
// recognize packets which are not arrive and add to the list.
|
||||
void AddToList(uint16_t sequence_number_current_received_rtp);
|
||||
|
||||
// This function subtracts 10 ms of time-to-play for all packets in NACK list.
|
||||
// This is called when 10 ms elapsed with no new RTP packet decoded.
|
||||
void UpdateEstimatedPlayoutTimeBy10ms();
|
||||
|
||||
// Given the |sequence_number_current_received_rtp| and
|
||||
// |timestamp_current_received_rtp| of currently received RTP update number
|
||||
// of samples per packet.
|
||||
void UpdateSamplesPerPacket(uint16_t sequence_number_current_received_rtp,
|
||||
uint32_t timestamp_current_received_rtp);
|
||||
|
||||
// Given the |sequence_number_current_received_rtp| of currently received RTP
|
||||
// update the list. That is; some packets will change from late to missing,
|
||||
// some packets are inserted as missing and some inserted as late.
|
||||
void UpdateList(uint16_t sequence_number_current_received_rtp);
|
||||
|
||||
// Packets which are considered late for too long (according to
|
||||
// |nack_threshold_packets_|) are flagged as missing.
|
||||
void ChangeFromLateToMissing(uint16_t sequence_number_current_received_rtp);
|
||||
|
||||
// Packets which have sequence number older that
|
||||
// |sequence_num_last_received_rtp_| - |max_nack_list_size_| are removed
|
||||
// from the NACK list.
|
||||
void LimitNackListSize();
|
||||
|
||||
// Estimate timestamp of a missing packet given its sequence number.
|
||||
uint32_t EstimateTimestamp(uint16_t sequence_number);
|
||||
|
||||
// Compute time-to-play given a timestamp.
|
||||
int TimeToPlay(uint32_t timestamp) const;
|
||||
|
||||
// If packet N is arrived, any packet prior to N - |nack_threshold_packets_|
|
||||
// which is not arrived is considered missing, and should be in NACK list.
|
||||
// Also any packet in the range of N-1 and N - |nack_threshold_packets_|,
|
||||
// exclusive, which is not arrived is considered late, and should should be
|
||||
// in the list of late packets.
|
||||
const int nack_threshold_packets_;
|
||||
|
||||
// Valid if a packet is received.
|
||||
uint16_t sequence_num_last_received_rtp_;
|
||||
uint32_t timestamp_last_received_rtp_;
|
||||
bool any_rtp_received_; // If any packet received.
|
||||
|
||||
// Valid if a packet is decoded.
|
||||
uint16_t sequence_num_last_decoded_rtp_;
|
||||
uint32_t timestamp_last_decoded_rtp_;
|
||||
bool any_rtp_decoded_; // If any packet decoded.
|
||||
|
||||
int sample_rate_khz_; // Sample rate in kHz.
|
||||
|
||||
// Number of samples per packet. We update this every time we receive a
|
||||
// packet, not only for consecutive packets.
|
||||
int samples_per_packet_;
|
||||
|
||||
// A list of missing packets to be retransmitted. Components of the list
|
||||
// contain the sequence number of missing packets and the estimated time that
|
||||
// each pack is going to be played out.
|
||||
NackList nack_list_;
|
||||
|
||||
// NACK list will not keep track of missing packets prior to
|
||||
// |sequence_num_last_received_rtp_| - |max_nack_list_size_|.
|
||||
size_t max_nack_list_size_;
|
||||
};
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_ACM2_NACK_H_
|
||||
486
jni/webrtc/modules/audio_coding/main/acm2/nack_unittest.cc
Normal file
486
jni/webrtc/modules/audio_coding/main/acm2/nack_unittest.cc
Normal file
@@ -0,0 +1,486 @@
|
||||
/*
|
||||
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "webrtc/modules/audio_coding/main/acm2/nack.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "gtest/gtest.h"
|
||||
#include "webrtc/typedefs.h"
|
||||
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
|
||||
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
namespace acm2 {
|
||||
|
||||
namespace {
|
||||
|
||||
const int kNackThreshold = 3;
|
||||
const int kSampleRateHz = 16000;
|
||||
const int kPacketSizeMs = 30;
|
||||
const uint32_t kTimestampIncrement = 480; // 30 ms.
|
||||
const int kShortRoundTripTimeMs = 1;
|
||||
|
||||
bool IsNackListCorrect(const std::vector<uint16_t>& nack_list,
|
||||
const uint16_t* lost_sequence_numbers,
|
||||
size_t num_lost_packets) {
|
||||
if (nack_list.size() != num_lost_packets)
|
||||
return false;
|
||||
|
||||
if (num_lost_packets == 0)
|
||||
return true;
|
||||
|
||||
for (size_t k = 0; k < nack_list.size(); ++k) {
|
||||
int seq_num = nack_list[k];
|
||||
bool seq_num_matched = false;
|
||||
for (size_t n = 0; n < num_lost_packets; ++n) {
|
||||
if (seq_num == lost_sequence_numbers[n]) {
|
||||
seq_num_matched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!seq_num_matched)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(NackTest, EmptyListWhenNoPacketLoss) {
|
||||
scoped_ptr<Nack> nack(Nack::Create(kNackThreshold));
|
||||
nack->UpdateSampleRate(kSampleRateHz);
|
||||
|
||||
int seq_num = 1;
|
||||
uint32_t timestamp = 0;
|
||||
|
||||
std::vector<uint16_t> nack_list;
|
||||
for (int n = 0; n < 100; n++) {
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
seq_num++;
|
||||
timestamp += kTimestampIncrement;
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(nack_list.empty());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NackTest, NoNackIfReorderWithinNackThreshold) {
|
||||
scoped_ptr<Nack> nack(Nack::Create(kNackThreshold));
|
||||
nack->UpdateSampleRate(kSampleRateHz);
|
||||
|
||||
int seq_num = 1;
|
||||
uint32_t timestamp = 0;
|
||||
std::vector<uint16_t> nack_list;
|
||||
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(nack_list.empty());
|
||||
int num_late_packets = kNackThreshold + 1;
|
||||
|
||||
// Push in reverse order
|
||||
while (num_late_packets > 0) {
|
||||
nack->UpdateLastReceivedPacket(seq_num + num_late_packets, timestamp +
|
||||
num_late_packets * kTimestampIncrement);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(nack_list.empty());
|
||||
num_late_packets--;
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NackTest, LatePacketsMovedToNackThenNackListDoesNotChange) {
|
||||
const uint16_t kSequenceNumberLostPackets[] = { 2, 3, 4, 5, 6, 7, 8, 9 };
|
||||
static const int kNumAllLostPackets = sizeof(kSequenceNumberLostPackets) /
|
||||
sizeof(kSequenceNumberLostPackets[0]);
|
||||
|
||||
for (int k = 0; k < 2; k++) { // Two iteration with/without wrap around.
|
||||
scoped_ptr<Nack> nack(Nack::Create(kNackThreshold));
|
||||
nack->UpdateSampleRate(kSampleRateHz);
|
||||
|
||||
uint16_t sequence_num_lost_packets[kNumAllLostPackets];
|
||||
for (int n = 0; n < kNumAllLostPackets; n++) {
|
||||
sequence_num_lost_packets[n] = kSequenceNumberLostPackets[n] + k *
|
||||
65531; // Have wrap around in sequence numbers for |k == 1|.
|
||||
}
|
||||
uint16_t seq_num = sequence_num_lost_packets[0] - 1;
|
||||
|
||||
uint32_t timestamp = 0;
|
||||
std::vector<uint16_t> nack_list;
|
||||
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(nack_list.empty());
|
||||
|
||||
seq_num = sequence_num_lost_packets[kNumAllLostPackets - 1] + 1;
|
||||
timestamp += kTimestampIncrement * (kNumAllLostPackets + 1);
|
||||
int num_lost_packets = std::max(0, kNumAllLostPackets - kNackThreshold);
|
||||
|
||||
for (int n = 0; n < kNackThreshold + 1; ++n) {
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(IsNackListCorrect(nack_list, sequence_num_lost_packets,
|
||||
num_lost_packets));
|
||||
seq_num++;
|
||||
timestamp += kTimestampIncrement;
|
||||
num_lost_packets++;
|
||||
}
|
||||
|
||||
for (int n = 0; n < 100; ++n) {
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(IsNackListCorrect(nack_list, sequence_num_lost_packets,
|
||||
kNumAllLostPackets));
|
||||
seq_num++;
|
||||
timestamp += kTimestampIncrement;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NackTest, ArrivedPacketsAreRemovedFromNackList) {
|
||||
const uint16_t kSequenceNumberLostPackets[] = { 2, 3, 4, 5, 6, 7, 8, 9 };
|
||||
static const int kNumAllLostPackets = sizeof(kSequenceNumberLostPackets) /
|
||||
sizeof(kSequenceNumberLostPackets[0]);
|
||||
|
||||
for (int k = 0; k < 2; ++k) { // Two iteration with/without wrap around.
|
||||
scoped_ptr<Nack> nack(Nack::Create(kNackThreshold));
|
||||
nack->UpdateSampleRate(kSampleRateHz);
|
||||
|
||||
uint16_t sequence_num_lost_packets[kNumAllLostPackets];
|
||||
for (int n = 0; n < kNumAllLostPackets; ++n) {
|
||||
sequence_num_lost_packets[n] = kSequenceNumberLostPackets[n] + k *
|
||||
65531; // Wrap around for |k == 1|.
|
||||
}
|
||||
|
||||
uint16_t seq_num = sequence_num_lost_packets[0] - 1;
|
||||
uint32_t timestamp = 0;
|
||||
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
std::vector<uint16_t> nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(nack_list.empty());
|
||||
|
||||
size_t index_retransmitted_rtp = 0;
|
||||
uint32_t timestamp_retransmitted_rtp = timestamp + kTimestampIncrement;
|
||||
|
||||
seq_num = sequence_num_lost_packets[kNumAllLostPackets - 1] + 1;
|
||||
timestamp += kTimestampIncrement * (kNumAllLostPackets + 1);
|
||||
size_t num_lost_packets = std::max(0, kNumAllLostPackets - kNackThreshold);
|
||||
for (int n = 0; n < kNumAllLostPackets; ++n) {
|
||||
// Number of lost packets does not change for the first
|
||||
// |kNackThreshold + 1| packets, one is added to the list and one is
|
||||
// removed. Thereafter, the list shrinks every iteration.
|
||||
if (n >= kNackThreshold + 1)
|
||||
num_lost_packets--;
|
||||
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(IsNackListCorrect(
|
||||
nack_list, &sequence_num_lost_packets[index_retransmitted_rtp],
|
||||
num_lost_packets));
|
||||
seq_num++;
|
||||
timestamp += kTimestampIncrement;
|
||||
|
||||
// Retransmission of a lost RTP.
|
||||
nack->UpdateLastReceivedPacket(
|
||||
sequence_num_lost_packets[index_retransmitted_rtp],
|
||||
timestamp_retransmitted_rtp);
|
||||
index_retransmitted_rtp++;
|
||||
timestamp_retransmitted_rtp += kTimestampIncrement;
|
||||
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(IsNackListCorrect(
|
||||
nack_list, &sequence_num_lost_packets[index_retransmitted_rtp],
|
||||
num_lost_packets - 1)); // One less lost packet in the list.
|
||||
}
|
||||
ASSERT_TRUE(nack_list.empty());
|
||||
}
|
||||
}
|
||||
|
||||
// Assess if estimation of timestamps and time-to-play is correct. Introduce all
|
||||
// combinations that timestamps and sequence numbers might have wrap around.
|
||||
TEST(NackTest, EstimateTimestampAndTimeToPlay) {
|
||||
const uint16_t kLostPackets[] = { 2, 3, 4, 5, 6, 7, 8, 9, 10,
|
||||
11, 12, 13, 14, 15 };
|
||||
static const int kNumAllLostPackets = sizeof(kLostPackets) /
|
||||
sizeof(kLostPackets[0]);
|
||||
|
||||
|
||||
for (int k = 0; k < 4; ++k) {
|
||||
scoped_ptr<Nack> nack(Nack::Create(kNackThreshold));
|
||||
nack->UpdateSampleRate(kSampleRateHz);
|
||||
|
||||
// Sequence number wrap around if |k| is 2 or 3;
|
||||
int seq_num_offset = (k < 2) ? 0 : 65531;
|
||||
|
||||
// Timestamp wrap around if |k| is 1 or 3.
|
||||
uint32_t timestamp_offset = (k & 0x1) ?
|
||||
static_cast<uint32_t>(0xffffffff) - 6 : 0;
|
||||
|
||||
uint32_t timestamp_lost_packets[kNumAllLostPackets];
|
||||
uint16_t seq_num_lost_packets[kNumAllLostPackets];
|
||||
for (int n = 0; n < kNumAllLostPackets; ++n) {
|
||||
timestamp_lost_packets[n] = timestamp_offset + kLostPackets[n] *
|
||||
kTimestampIncrement;
|
||||
seq_num_lost_packets[n] = seq_num_offset + kLostPackets[n];
|
||||
}
|
||||
|
||||
// We and to push two packets before lost burst starts.
|
||||
uint16_t seq_num = seq_num_lost_packets[0] - 2;
|
||||
uint32_t timestamp = timestamp_lost_packets[0] - 2 * kTimestampIncrement;
|
||||
|
||||
const uint16_t first_seq_num = seq_num;
|
||||
const uint32_t first_timestamp = timestamp;
|
||||
|
||||
// Two consecutive packets to have a correct estimate of timestamp increase.
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
seq_num++;
|
||||
timestamp += kTimestampIncrement;
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
|
||||
// A packet after the last one which is supposed to be lost.
|
||||
seq_num = seq_num_lost_packets[kNumAllLostPackets - 1] + 1;
|
||||
timestamp = timestamp_lost_packets[kNumAllLostPackets - 1] +
|
||||
kTimestampIncrement;
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
|
||||
Nack::NackList nack_list = nack->GetNackList();
|
||||
EXPECT_EQ(static_cast<size_t>(kNumAllLostPackets), nack_list.size());
|
||||
|
||||
// Pretend the first packet is decoded.
|
||||
nack->UpdateLastDecodedPacket(first_seq_num, first_timestamp);
|
||||
nack_list = nack->GetNackList();
|
||||
|
||||
Nack::NackList::iterator it = nack_list.begin();
|
||||
while (it != nack_list.end()) {
|
||||
seq_num = it->first - seq_num_offset;
|
||||
int index = seq_num - kLostPackets[0];
|
||||
EXPECT_EQ(timestamp_lost_packets[index], it->second.estimated_timestamp);
|
||||
EXPECT_EQ((index + 2) * kPacketSizeMs, it->second.time_to_play_ms);
|
||||
++it;
|
||||
}
|
||||
|
||||
// Pretend 10 ms is passed, and we had pulled audio from NetEq, it still
|
||||
// reports the same sequence number as decoded, time-to-play should be
|
||||
// updated by 10 ms.
|
||||
nack->UpdateLastDecodedPacket(first_seq_num, first_timestamp);
|
||||
nack_list = nack->GetNackList();
|
||||
it = nack_list.begin();
|
||||
while (it != nack_list.end()) {
|
||||
seq_num = it->first - seq_num_offset;
|
||||
int index = seq_num - kLostPackets[0];
|
||||
EXPECT_EQ((index + 2) * kPacketSizeMs - 10, it->second.time_to_play_ms);
|
||||
++it;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NackTest, MissingPacketsPriorToLastDecodedRtpShouldNotBeInNackList) {
|
||||
for (int m = 0; m < 2; ++m) {
|
||||
uint16_t seq_num_offset = (m == 0) ? 0 : 65531; // Wrap around if |m| is 1.
|
||||
scoped_ptr<Nack> nack(Nack::Create(kNackThreshold));
|
||||
nack->UpdateSampleRate(kSampleRateHz);
|
||||
|
||||
// Two consecutive packets to have a correct estimate of timestamp increase.
|
||||
uint16_t seq_num = 0;
|
||||
nack->UpdateLastReceivedPacket(seq_num_offset + seq_num,
|
||||
seq_num * kTimestampIncrement);
|
||||
seq_num++;
|
||||
nack->UpdateLastReceivedPacket(seq_num_offset + seq_num,
|
||||
seq_num * kTimestampIncrement);
|
||||
|
||||
// Skip 10 packets (larger than NACK threshold).
|
||||
const int kNumLostPackets = 10;
|
||||
seq_num += kNumLostPackets + 1;
|
||||
nack->UpdateLastReceivedPacket(seq_num_offset + seq_num,
|
||||
seq_num * kTimestampIncrement);
|
||||
|
||||
const size_t kExpectedListSize = kNumLostPackets - kNackThreshold;
|
||||
std::vector<uint16_t> nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_EQ(kExpectedListSize, nack_list.size());
|
||||
|
||||
for (int k = 0; k < 2; ++k) {
|
||||
// Decoding of the first and the second arrived packets.
|
||||
for (int n = 0; n < kPacketSizeMs / 10; ++n) {
|
||||
nack->UpdateLastDecodedPacket(seq_num_offset + k,
|
||||
k * kTimestampIncrement);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_EQ(kExpectedListSize, nack_list.size());
|
||||
}
|
||||
}
|
||||
|
||||
// Decoding of the last received packet.
|
||||
nack->UpdateLastDecodedPacket(seq_num + seq_num_offset,
|
||||
seq_num * kTimestampIncrement);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(nack_list.empty());
|
||||
|
||||
// Make sure list of late packets is also empty. To check that, push few
|
||||
// packets, if the late list is not empty its content will pop up in NACK
|
||||
// list.
|
||||
for (int n = 0; n < kNackThreshold + 10; ++n) {
|
||||
seq_num++;
|
||||
nack->UpdateLastReceivedPacket(seq_num_offset + seq_num,
|
||||
seq_num * kTimestampIncrement);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(nack_list.empty());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NackTest, Reset) {
|
||||
scoped_ptr<Nack> nack(Nack::Create(kNackThreshold));
|
||||
nack->UpdateSampleRate(kSampleRateHz);
|
||||
|
||||
// Two consecutive packets to have a correct estimate of timestamp increase.
|
||||
uint16_t seq_num = 0;
|
||||
nack->UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement);
|
||||
seq_num++;
|
||||
nack->UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement);
|
||||
|
||||
// Skip 10 packets (larger than NACK threshold).
|
||||
const int kNumLostPackets = 10;
|
||||
seq_num += kNumLostPackets + 1;
|
||||
nack->UpdateLastReceivedPacket(seq_num, seq_num * kTimestampIncrement);
|
||||
|
||||
const size_t kExpectedListSize = kNumLostPackets - kNackThreshold;
|
||||
std::vector<uint16_t> nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_EQ(kExpectedListSize, nack_list.size());
|
||||
|
||||
nack->Reset();
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(nack_list.empty());
|
||||
}
|
||||
|
||||
TEST(NackTest, ListSizeAppliedFromBeginning) {
|
||||
const size_t kNackListSize = 10;
|
||||
for (int m = 0; m < 2; ++m) {
|
||||
uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if |m| is 1.
|
||||
scoped_ptr<Nack> nack(Nack::Create(kNackThreshold));
|
||||
nack->UpdateSampleRate(kSampleRateHz);
|
||||
nack->SetMaxNackListSize(kNackListSize);
|
||||
|
||||
uint16_t seq_num = seq_num_offset;
|
||||
uint32_t timestamp = 0x12345678;
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
|
||||
// Packet lost more than NACK-list size limit.
|
||||
uint16_t num_lost_packets = kNackThreshold + kNackListSize + 5;
|
||||
|
||||
seq_num += num_lost_packets + 1;
|
||||
timestamp += (num_lost_packets + 1) * kTimestampIncrement;
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
|
||||
std::vector<uint16_t> nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_EQ(kNackListSize - kNackThreshold, nack_list.size());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NackTest, ChangeOfListSizeAppliedAndOldElementsRemoved) {
|
||||
const size_t kNackListSize = 10;
|
||||
for (int m = 0; m < 2; ++m) {
|
||||
uint16_t seq_num_offset = (m == 0) ? 0 : 65525; // Wrap around if |m| is 1.
|
||||
scoped_ptr<Nack> nack(Nack::Create(kNackThreshold));
|
||||
nack->UpdateSampleRate(kSampleRateHz);
|
||||
|
||||
uint16_t seq_num = seq_num_offset;
|
||||
uint32_t timestamp = 0x87654321;
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
|
||||
// Packet lost more than NACK-list size limit.
|
||||
uint16_t num_lost_packets = kNackThreshold + kNackListSize + 5;
|
||||
|
||||
scoped_ptr<uint16_t[]> seq_num_lost(new uint16_t[num_lost_packets]);
|
||||
for (int n = 0; n < num_lost_packets; ++n) {
|
||||
seq_num_lost[n] = ++seq_num;
|
||||
}
|
||||
|
||||
++seq_num;
|
||||
timestamp += (num_lost_packets + 1) * kTimestampIncrement;
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
size_t expected_size = num_lost_packets - kNackThreshold;
|
||||
|
||||
std::vector<uint16_t> nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_EQ(expected_size, nack_list.size());
|
||||
|
||||
nack->SetMaxNackListSize(kNackListSize);
|
||||
expected_size = kNackListSize - kNackThreshold;
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(IsNackListCorrect(
|
||||
nack_list, &seq_num_lost[num_lost_packets - kNackListSize],
|
||||
expected_size));
|
||||
|
||||
// NACK list does not change size but the content is changing. The oldest
|
||||
// element is removed and one from late list is inserted.
|
||||
size_t n;
|
||||
for (n = 1; n <= static_cast<size_t>(kNackThreshold); ++n) {
|
||||
++seq_num;
|
||||
timestamp += kTimestampIncrement;
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(IsNackListCorrect(
|
||||
nack_list, &seq_num_lost[num_lost_packets - kNackListSize + n],
|
||||
expected_size));
|
||||
}
|
||||
|
||||
// NACK list should shrink.
|
||||
for (; n < kNackListSize; ++n) {
|
||||
++seq_num;
|
||||
timestamp += kTimestampIncrement;
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
--expected_size;
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(IsNackListCorrect(
|
||||
nack_list, &seq_num_lost[num_lost_packets - kNackListSize + n],
|
||||
expected_size));
|
||||
}
|
||||
|
||||
// After this packet, NACK list should be empty.
|
||||
++seq_num;
|
||||
timestamp += kTimestampIncrement;
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
nack_list = nack->GetNackList(kShortRoundTripTimeMs);
|
||||
EXPECT_TRUE(nack_list.empty());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NackTest, RoudTripTimeIsApplied) {
|
||||
const int kNackListSize = 200;
|
||||
scoped_ptr<Nack> nack(Nack::Create(kNackThreshold));
|
||||
nack->UpdateSampleRate(kSampleRateHz);
|
||||
nack->SetMaxNackListSize(kNackListSize);
|
||||
|
||||
uint16_t seq_num = 0;
|
||||
uint32_t timestamp = 0x87654321;
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
|
||||
// Packet lost more than NACK-list size limit.
|
||||
uint16_t kNumLostPackets = kNackThreshold + 5;
|
||||
|
||||
seq_num += (1 + kNumLostPackets);
|
||||
timestamp += (1 + kNumLostPackets) * kTimestampIncrement;
|
||||
nack->UpdateLastReceivedPacket(seq_num, timestamp);
|
||||
|
||||
// Expected time-to-play are:
|
||||
// kPacketSizeMs - 10, 2*kPacketSizeMs - 10, 3*kPacketSizeMs - 10, ...
|
||||
//
|
||||
// sequence number: 1, 2, 3, 4, 5
|
||||
// time-to-play: 20, 50, 80, 110, 140
|
||||
//
|
||||
std::vector<uint16_t> nack_list = nack->GetNackList(100);
|
||||
ASSERT_EQ(2u, nack_list.size());
|
||||
EXPECT_EQ(4, nack_list[0]);
|
||||
EXPECT_EQ(5, nack_list[1]);
|
||||
}
|
||||
|
||||
} // namespace acm2
|
||||
|
||||
} // namespace webrtc
|
||||
Reference in New Issue
Block a user