Support for Signal calls.

Merge in RedPhone

// FREEBIE
This commit is contained in:
Moxie Marlinspike
2015-09-09 13:54:29 -07:00
parent 3d4ae60d81
commit d83a3d71bc
2585 changed files with 803492 additions and 45 deletions

View File

@@ -0,0 +1,13 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "ACMTest.h"
ACMTest::~ACMTest() {}

View File

@@ -0,0 +1,21 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ACMTEST_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ACMTEST_H_
class ACMTest {
public:
ACMTest() {}
virtual ~ACMTest() = 0;
virtual void Perform() = 0;
};
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ACMTEST_H_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,167 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_APITEST_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_APITEST_H_
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
class Config;
enum APITESTAction {
TEST_CHANGE_CODEC_ONLY = 0,
DTX_TEST = 1
};
class APITest : public ACMTest {
public:
explicit APITest(const Config& config);
~APITest();
void Perform();
private:
int16_t SetUp();
static bool PushAudioThreadA(void* obj);
static bool PullAudioThreadA(void* obj);
static bool ProcessThreadA(void* obj);
static bool APIThreadA(void* obj);
static bool PushAudioThreadB(void* obj);
static bool PullAudioThreadB(void* obj);
static bool ProcessThreadB(void* obj);
static bool APIThreadB(void* obj);
void CheckVADStatus(char side);
// Set Min delay, get delay, playout timestamp
void TestDelay(char side);
// Unregister a codec & register again.
void TestRegisteration(char side);
// Playout Mode, background noise mode.
// Receiver Frequency, playout frequency.
void TestPlayout(char receiveSide);
//
void TestSendVAD(char side);
void CurrentCodec(char side);
void ChangeCodec(char side);
void Wait(uint32_t waitLengthMs);
void RunTest(char thread);
bool PushAudioRunA();
bool PullAudioRunA();
bool ProcessRunA();
bool APIRunA();
bool PullAudioRunB();
bool PushAudioRunB();
bool ProcessRunB();
bool APIRunB();
//--- ACMs
scoped_ptr<AudioCodingModule> _acmA;
scoped_ptr<AudioCodingModule> _acmB;
//--- Channels
Channel* _channel_A2B;
Channel* _channel_B2A;
//--- I/O files
// A
PCMFile _inFileA;
PCMFile _outFileA;
// B
PCMFile _outFileB;
PCMFile _inFileB;
//--- I/O params
// A
int32_t _outFreqHzA;
// B
int32_t _outFreqHzB;
// Should we write to file.
// we might skip writing to file if we
// run the test for a long time.
bool _writeToFile;
//--- Events
// A
EventWrapper* _pullEventA; // pulling data from ACM
EventWrapper* _pushEventA; // pushing data to ACM
EventWrapper* _processEventA; // process
EventWrapper* _apiEventA; // API calls
// B
EventWrapper* _pullEventB; // pulling data from ACM
EventWrapper* _pushEventB; // pushing data to ACM
EventWrapper* _processEventB; // process
EventWrapper* _apiEventB; // API calls
// keep track of the codec in either side.
uint8_t _codecCntrA;
uint8_t _codecCntrB;
// Is set to true if there is no encoder in either side
bool _thereIsEncoderA;
bool _thereIsEncoderB;
bool _thereIsDecoderA;
bool _thereIsDecoderB;
bool _sendVADA;
bool _sendDTXA;
ACMVADMode _sendVADModeA;
bool _sendVADB;
bool _sendDTXB;
ACMVADMode _sendVADModeB;
int32_t _minDelayA;
int32_t _minDelayB;
bool _payloadUsed[32];
AudioPlayoutMode _playoutModeA;
AudioPlayoutMode _playoutModeB;
bool _verbose;
int _dotPositionA;
int _dotMoveDirectionA;
int _dotPositionB;
int _dotMoveDirectionB;
char _movingDot[41];
DTMFDetector* _dtmfCallback;
VADCallback* _vadCallbackA;
VADCallback* _vadCallbackB;
RWLockWrapper& _apiTestRWLock;
bool _randomTest;
int _testNumA;
int _testNumB;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_APITEST_H_

View File

@@ -0,0 +1,407 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include <assert.h>
#include <iostream>
#include "webrtc/system_wrappers/interface/tick_util.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
namespace webrtc {
int32_t Channel::SendData(const FrameType frameType, const uint8_t payloadType,
const uint32_t timeStamp, const uint8_t* payloadData,
const uint16_t payloadSize,
const RTPFragmentationHeader* fragmentation) {
WebRtcRTPHeader rtpInfo;
int32_t status;
uint16_t payloadDataSize = payloadSize;
rtpInfo.header.markerBit = false;
rtpInfo.header.ssrc = 0;
rtpInfo.header.sequenceNumber = (external_sequence_number_ < 0) ?
_seqNo++ : static_cast<uint16_t>(external_sequence_number_);
rtpInfo.header.payloadType = payloadType;
rtpInfo.header.timestamp = (external_send_timestamp_ < 0) ? timeStamp :
static_cast<uint32_t>(external_send_timestamp_);
if (frameType == kAudioFrameCN) {
rtpInfo.type.Audio.isCNG = true;
} else {
rtpInfo.type.Audio.isCNG = false;
}
if (frameType == kFrameEmpty) {
// Skip this frame
return 0;
}
rtpInfo.type.Audio.channel = 1;
// Treat fragmentation separately
if (fragmentation != NULL) {
// If silence for too long, send only new data.
if ((fragmentation->fragmentationTimeDiff[1] <= 0x3fff) &&
(fragmentation->fragmentationVectorSize == 2)) {
// only 0x80 if we have multiple blocks
_payloadData[0] = 0x80 + fragmentation->fragmentationPlType[1];
uint32_t REDheader = (((uint32_t) fragmentation->fragmentationTimeDiff[1])
<< 10) + fragmentation->fragmentationLength[1];
_payloadData[1] = uint8_t((REDheader >> 16) & 0x000000FF);
_payloadData[2] = uint8_t((REDheader >> 8) & 0x000000FF);
_payloadData[3] = uint8_t(REDheader & 0x000000FF);
_payloadData[4] = fragmentation->fragmentationPlType[0];
// copy the RED data
memcpy(_payloadData + 5,
payloadData + fragmentation->fragmentationOffset[1],
fragmentation->fragmentationLength[1]);
// copy the normal data
memcpy(_payloadData + 5 + fragmentation->fragmentationLength[1],
payloadData + fragmentation->fragmentationOffset[0],
fragmentation->fragmentationLength[0]);
payloadDataSize += 5;
} else {
// single block (newest one)
memcpy(_payloadData, payloadData + fragmentation->fragmentationOffset[0],
fragmentation->fragmentationLength[0]);
payloadDataSize = uint16_t(fragmentation->fragmentationLength[0]);
rtpInfo.header.payloadType = fragmentation->fragmentationPlType[0];
}
} else {
memcpy(_payloadData, payloadData, payloadDataSize);
if (_isStereo) {
if (_leftChannel) {
memcpy(&_rtpInfo, &rtpInfo, sizeof(WebRtcRTPHeader));
_leftChannel = false;
rtpInfo.type.Audio.channel = 1;
} else {
memcpy(&rtpInfo, &_rtpInfo, sizeof(WebRtcRTPHeader));
_leftChannel = true;
rtpInfo.type.Audio.channel = 2;
}
}
}
_channelCritSect->Enter();
if (_saveBitStream) {
//fwrite(payloadData, sizeof(uint8_t), payloadSize, _bitStreamFile);
}
if (!_isStereo) {
CalcStatistics(rtpInfo, payloadSize);
}
_lastInTimestamp = timeStamp;
_totalBytes += payloadDataSize;
_channelCritSect->Leave();
if (_useFECTestWithPacketLoss) {
_packetLoss += 1;
if (_packetLoss == 3) {
_packetLoss = 0;
return 0;
}
}
if (num_packets_to_drop_ > 0) {
num_packets_to_drop_--;
return 0;
}
status = _receiverACM->IncomingPacket(_payloadData, payloadDataSize, rtpInfo);
return status;
}
// TODO(turajs): rewite this method.
void Channel::CalcStatistics(WebRtcRTPHeader& rtpInfo, uint16_t payloadSize) {
int n;
if ((rtpInfo.header.payloadType != _lastPayloadType)
&& (_lastPayloadType != -1)) {
// payload-type is changed.
// we have to terminate the calculations on the previous payload type
// we ignore the last packet in that payload type just to make things
// easier.
for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
if (_lastPayloadType == _payloadStats[n].payloadType) {
_payloadStats[n].newPacket = true;
break;
}
}
}
_lastPayloadType = rtpInfo.header.payloadType;
bool newPayload = true;
ACMTestPayloadStats* currentPayloadStr = NULL;
for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
if (rtpInfo.header.payloadType == _payloadStats[n].payloadType) {
newPayload = false;
currentPayloadStr = &_payloadStats[n];
break;
}
}
if (!newPayload) {
if (!currentPayloadStr->newPacket) {
uint32_t lastFrameSizeSample = (uint32_t)(
(uint32_t) rtpInfo.header.timestamp
- (uint32_t) currentPayloadStr->lastTimestamp);
assert(lastFrameSizeSample > 0);
int k = 0;
while ((currentPayloadStr->frameSizeStats[k].frameSizeSample
!= lastFrameSizeSample)
&& (currentPayloadStr->frameSizeStats[k].frameSizeSample != 0)) {
k++;
}
ACMTestFrameSizeStats* currentFrameSizeStats = &(currentPayloadStr
->frameSizeStats[k]);
currentFrameSizeStats->frameSizeSample = (int16_t) lastFrameSizeSample;
// increment the number of encoded samples.
currentFrameSizeStats->totalEncodedSamples += lastFrameSizeSample;
// increment the number of recveived packets
currentFrameSizeStats->numPackets++;
// increment the total number of bytes (this is based on
// the previous payload we don't know the frame-size of
// the current payload.
currentFrameSizeStats->totalPayloadLenByte += currentPayloadStr
->lastPayloadLenByte;
// store the maximum payload-size (this is based on
// the previous payload we don't know the frame-size of
// the current payload.
if (currentFrameSizeStats->maxPayloadLen
< currentPayloadStr->lastPayloadLenByte) {
currentFrameSizeStats->maxPayloadLen = currentPayloadStr
->lastPayloadLenByte;
}
// store the current values for the next time
currentPayloadStr->lastTimestamp = rtpInfo.header.timestamp;
currentPayloadStr->lastPayloadLenByte = payloadSize;
} else {
currentPayloadStr->newPacket = false;
currentPayloadStr->lastPayloadLenByte = payloadSize;
currentPayloadStr->lastTimestamp = rtpInfo.header.timestamp;
currentPayloadStr->payloadType = rtpInfo.header.payloadType;
memset(currentPayloadStr->frameSizeStats, 0, MAX_NUM_FRAMESIZES *
sizeof(ACMTestFrameSizeStats));
}
} else {
n = 0;
while (_payloadStats[n].payloadType != -1) {
n++;
}
// first packet
_payloadStats[n].newPacket = false;
_payloadStats[n].lastPayloadLenByte = payloadSize;
_payloadStats[n].lastTimestamp = rtpInfo.header.timestamp;
_payloadStats[n].payloadType = rtpInfo.header.payloadType;
memset(_payloadStats[n].frameSizeStats, 0, MAX_NUM_FRAMESIZES *
sizeof(ACMTestFrameSizeStats));
}
}
Channel::Channel(int16_t chID)
: _receiverACM(NULL),
_seqNo(0),
_channelCritSect(CriticalSectionWrapper::CreateCriticalSection()),
_bitStreamFile(NULL),
_saveBitStream(false),
_lastPayloadType(-1),
_isStereo(false),
_leftChannel(true),
_lastInTimestamp(0),
_packetLoss(0),
_useFECTestWithPacketLoss(false),
_beginTime(TickTime::MillisecondTimestamp()),
_totalBytes(0),
external_send_timestamp_(-1),
external_sequence_number_(-1),
num_packets_to_drop_(0) {
int n;
int k;
for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
_payloadStats[n].payloadType = -1;
_payloadStats[n].newPacket = true;
for (k = 0; k < MAX_NUM_FRAMESIZES; k++) {
_payloadStats[n].frameSizeStats[k].frameSizeSample = 0;
_payloadStats[n].frameSizeStats[k].maxPayloadLen = 0;
_payloadStats[n].frameSizeStats[k].numPackets = 0;
_payloadStats[n].frameSizeStats[k].totalPayloadLenByte = 0;
_payloadStats[n].frameSizeStats[k].totalEncodedSamples = 0;
}
}
if (chID >= 0) {
_saveBitStream = true;
char bitStreamFileName[500];
sprintf(bitStreamFileName, "bitStream_%d.dat", chID);
_bitStreamFile = fopen(bitStreamFileName, "wb");
} else {
_saveBitStream = false;
}
}
Channel::~Channel() {
delete _channelCritSect;
}
void Channel::RegisterReceiverACM(AudioCodingModule* acm) {
_receiverACM = acm;
return;
}
void Channel::ResetStats() {
int n;
int k;
_channelCritSect->Enter();
_lastPayloadType = -1;
for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
_payloadStats[n].payloadType = -1;
_payloadStats[n].newPacket = true;
for (k = 0; k < MAX_NUM_FRAMESIZES; k++) {
_payloadStats[n].frameSizeStats[k].frameSizeSample = 0;
_payloadStats[n].frameSizeStats[k].maxPayloadLen = 0;
_payloadStats[n].frameSizeStats[k].numPackets = 0;
_payloadStats[n].frameSizeStats[k].totalPayloadLenByte = 0;
_payloadStats[n].frameSizeStats[k].totalEncodedSamples = 0;
}
}
_beginTime = TickTime::MillisecondTimestamp();
_totalBytes = 0;
_channelCritSect->Leave();
}
int16_t Channel::Stats(CodecInst& codecInst,
ACMTestPayloadStats& payloadStats) {
_channelCritSect->Enter();
int n;
payloadStats.payloadType = -1;
for (n = 0; n < MAX_NUM_PAYLOADS; n++) {
if (_payloadStats[n].payloadType == codecInst.pltype) {
memcpy(&payloadStats, &_payloadStats[n], sizeof(ACMTestPayloadStats));
break;
}
}
if (payloadStats.payloadType == -1) {
_channelCritSect->Leave();
return -1;
}
for (n = 0; n < MAX_NUM_FRAMESIZES; n++) {
if (payloadStats.frameSizeStats[n].frameSizeSample == 0) {
_channelCritSect->Leave();
return 0;
}
payloadStats.frameSizeStats[n].usageLenSec = (double) payloadStats
.frameSizeStats[n].totalEncodedSamples / (double) codecInst.plfreq;
payloadStats.frameSizeStats[n].rateBitPerSec =
payloadStats.frameSizeStats[n].totalPayloadLenByte * 8
/ payloadStats.frameSizeStats[n].usageLenSec;
}
_channelCritSect->Leave();
return 0;
}
void Channel::Stats(uint32_t* numPackets) {
_channelCritSect->Enter();
int k;
int n;
memset(numPackets, 0, MAX_NUM_PAYLOADS * sizeof(uint32_t));
for (k = 0; k < MAX_NUM_PAYLOADS; k++) {
if (_payloadStats[k].payloadType == -1) {
break;
}
numPackets[k] = 0;
for (n = 0; n < MAX_NUM_FRAMESIZES; n++) {
if (_payloadStats[k].frameSizeStats[n].frameSizeSample == 0) {
break;
}
numPackets[k] += _payloadStats[k].frameSizeStats[n].numPackets;
}
}
_channelCritSect->Leave();
}
void Channel::Stats(uint8_t* payloadType, uint32_t* payloadLenByte) {
_channelCritSect->Enter();
int k;
int n;
memset(payloadLenByte, 0, MAX_NUM_PAYLOADS * sizeof(uint32_t));
for (k = 0; k < MAX_NUM_PAYLOADS; k++) {
if (_payloadStats[k].payloadType == -1) {
break;
}
payloadType[k] = (uint8_t) _payloadStats[k].payloadType;
payloadLenByte[k] = 0;
for (n = 0; n < MAX_NUM_FRAMESIZES; n++) {
if (_payloadStats[k].frameSizeStats[n].frameSizeSample == 0) {
break;
}
payloadLenByte[k] += (uint16_t) _payloadStats[k].frameSizeStats[n]
.totalPayloadLenByte;
}
}
_channelCritSect->Leave();
}
void Channel::PrintStats(CodecInst& codecInst) {
ACMTestPayloadStats payloadStats;
Stats(codecInst, payloadStats);
printf("%s %d kHz\n", codecInst.plname, codecInst.plfreq / 1000);
printf("=====================================================\n");
if (payloadStats.payloadType == -1) {
printf("No Packets are sent with payload-type %d (%s)\n\n",
codecInst.pltype, codecInst.plname);
return;
}
for (int k = 0; k < MAX_NUM_FRAMESIZES; k++) {
if (payloadStats.frameSizeStats[k].frameSizeSample == 0) {
break;
}
printf("Frame-size.................... %d samples\n",
payloadStats.frameSizeStats[k].frameSizeSample);
printf("Average Rate.................. %.0f bits/sec\n",
payloadStats.frameSizeStats[k].rateBitPerSec);
printf("Maximum Payload-Size.......... %d Bytes\n",
payloadStats.frameSizeStats[k].maxPayloadLen);
printf(
"Maximum Instantaneous Rate.... %.0f bits/sec\n",
((double) payloadStats.frameSizeStats[k].maxPayloadLen * 8.0
* (double) codecInst.plfreq)
/ (double) payloadStats.frameSizeStats[k].frameSizeSample);
printf("Number of Packets............. %u\n",
(unsigned int) payloadStats.frameSizeStats[k].numPackets);
printf("Duration...................... %0.3f sec\n\n",
payloadStats.frameSizeStats[k].usageLenSec);
}
}
uint32_t Channel::LastInTimestamp() {
uint32_t timestamp;
_channelCritSect->Enter();
timestamp = _lastInTimestamp;
_channelCritSect->Leave();
return timestamp;
}
double Channel::BitRate() {
double rate;
uint64_t currTime = TickTime::MillisecondTimestamp();
_channelCritSect->Enter();
rate = ((double) _totalBytes * 8.0) / (double) (currTime - _beginTime);
_channelCritSect->Leave();
return rate;
}
} // namespace webrtc

View File

@@ -0,0 +1,126 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_CHANNEL_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_CHANNEL_H_
#include <stdio.h>
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class CriticalSectionWrapper;
#define MAX_NUM_PAYLOADS 50
#define MAX_NUM_FRAMESIZES 6
// TODO(turajs): Write constructor for this structure.
struct ACMTestFrameSizeStats {
uint16_t frameSizeSample;
int16_t maxPayloadLen;
uint32_t numPackets;
uint64_t totalPayloadLenByte;
uint64_t totalEncodedSamples;
double rateBitPerSec;
double usageLenSec;
};
// TODO(turajs): Write constructor for this structure.
struct ACMTestPayloadStats {
bool newPacket;
int16_t payloadType;
int16_t lastPayloadLenByte;
uint32_t lastTimestamp;
ACMTestFrameSizeStats frameSizeStats[MAX_NUM_FRAMESIZES];
};
class Channel : public AudioPacketizationCallback {
public:
Channel(int16_t chID = -1);
~Channel();
int32_t SendData(const FrameType frameType, const uint8_t payloadType,
const uint32_t timeStamp, const uint8_t* payloadData,
const uint16_t payloadSize,
const RTPFragmentationHeader* fragmentation);
void RegisterReceiverACM(AudioCodingModule *acm);
void ResetStats();
int16_t Stats(CodecInst& codecInst, ACMTestPayloadStats& payloadStats);
void Stats(uint32_t* numPackets);
void Stats(uint8_t* payloadLenByte, uint32_t* payloadType);
void PrintStats(CodecInst& codecInst);
void SetIsStereo(bool isStereo) {
_isStereo = isStereo;
}
uint32_t LastInTimestamp();
void SetFECTestWithPacketLoss(bool usePacketLoss) {
_useFECTestWithPacketLoss = usePacketLoss;
}
double BitRate();
void set_send_timestamp(uint32_t new_send_ts) {
external_send_timestamp_ = new_send_ts;
}
void set_sequence_number(uint16_t new_sequence_number) {
external_sequence_number_ = new_sequence_number;
}
void set_num_packets_to_drop(int new_num_packets_to_drop) {
num_packets_to_drop_ = new_num_packets_to_drop;
}
private:
void CalcStatistics(WebRtcRTPHeader& rtpInfo, uint16_t payloadSize);
AudioCodingModule* _receiverACM;
uint16_t _seqNo;
// 60msec * 32 sample(max)/msec * 2 description (maybe) * 2 bytes/sample
uint8_t _payloadData[60 * 32 * 2 * 2];
CriticalSectionWrapper* _channelCritSect;
FILE* _bitStreamFile;
bool _saveBitStream;
int16_t _lastPayloadType;
ACMTestPayloadStats _payloadStats[MAX_NUM_PAYLOADS];
bool _isStereo;
WebRtcRTPHeader _rtpInfo;
bool _leftChannel;
uint32_t _lastInTimestamp;
// FEC Test variables
int16_t _packetLoss;
bool _useFECTestWithPacketLoss;
uint64_t _beginTime;
uint64_t _totalBytes;
// External timing info, defaulted to -1. Only used if they are
// non-negative.
int64_t external_send_timestamp_;
int32_t external_sequence_number_;
int num_packets_to_drop_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_CHANNEL_H_

View File

@@ -0,0 +1,353 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/test/EncodeDecodeTest.h"
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
TestPacketization::TestPacketization(RTPStream *rtpStream, uint16_t frequency)
: _rtpStream(rtpStream),
_frequency(frequency),
_seqNo(0) {
}
TestPacketization::~TestPacketization() {
}
int32_t TestPacketization::SendData(
const FrameType /* frameType */, const uint8_t payloadType,
const uint32_t timeStamp, const uint8_t* payloadData,
const uint16_t payloadSize,
const RTPFragmentationHeader* /* fragmentation */) {
_rtpStream->Write(payloadType, timeStamp, _seqNo++, payloadData, payloadSize,
_frequency);
return 1;
}
Sender::Sender()
: _acm(NULL),
_pcmFile(),
_audioFrame(),
_packetization(NULL) {
}
void Sender::Setup(AudioCodingModule *acm, RTPStream *rtpStream,
std::string in_file_name, int sample_rate, int channels) {
acm->InitializeSender();
struct CodecInst sendCodec;
int noOfCodecs = acm->NumberOfCodecs();
int codecNo;
// Open input file
const std::string file_name = webrtc::test::ResourcePath(in_file_name, "pcm");
_pcmFile.Open(file_name, sample_rate, "rb");
if (channels == 2) {
_pcmFile.ReadStereo(true);
}
// Set the codec for the current test.
if ((testMode == 0) || (testMode == 1)) {
// Set the codec id.
codecNo = codeId;
} else {
// Choose codec on command line.
printf("List of supported codec.\n");
for (int n = 0; n < noOfCodecs; n++) {
EXPECT_EQ(0, acm->Codec(n, &sendCodec));
printf("%d %s\n", n, sendCodec.plname);
}
printf("Choose your codec:");
ASSERT_GT(scanf("%d", &codecNo), 0);
}
EXPECT_EQ(0, acm->Codec(codecNo, &sendCodec));
sendCodec.channels = channels;
EXPECT_EQ(0, acm->RegisterSendCodec(sendCodec));
_packetization = new TestPacketization(rtpStream, sendCodec.plfreq);
EXPECT_EQ(0, acm->RegisterTransportCallback(_packetization));
_acm = acm;
}
void Sender::Teardown() {
_pcmFile.Close();
delete _packetization;
}
bool Sender::Add10MsData() {
if (!_pcmFile.EndOfFile()) {
EXPECT_GT(_pcmFile.Read10MsData(_audioFrame), 0);
int32_t ok = _acm->Add10MsData(_audioFrame);
EXPECT_EQ(0, ok);
if (ok != 0) {
return false;
}
return true;
}
return false;
}
void Sender::Run() {
while (true) {
if (!Add10MsData()) {
break;
}
EXPECT_GT(_acm->Process(), -1);
}
}
Receiver::Receiver()
: _playoutLengthSmpls(WEBRTC_10MS_PCM_AUDIO),
_payloadSizeBytes(MAX_INCOMING_PAYLOAD) {
}
void Receiver::Setup(AudioCodingModule *acm, RTPStream *rtpStream,
std::string out_file_name, int channels) {
struct CodecInst recvCodec = CodecInst();
int noOfCodecs;
EXPECT_EQ(0, acm->InitializeReceiver());
noOfCodecs = acm->NumberOfCodecs();
for (int i = 0; i < noOfCodecs; i++) {
EXPECT_EQ(0, acm->Codec(i, &recvCodec));
if (recvCodec.channels == channels)
EXPECT_EQ(0, acm->RegisterReceiveCodec(recvCodec));
// Forces mono/stereo for Opus.
if (!strcmp(recvCodec.plname, "opus")) {
recvCodec.channels = channels;
EXPECT_EQ(0, acm->RegisterReceiveCodec(recvCodec));
}
}
int playSampFreq;
std::string file_name;
std::stringstream file_stream;
file_stream << webrtc::test::OutputPath() << out_file_name
<< static_cast<int>(codeId) << ".pcm";
file_name = file_stream.str();
_rtpStream = rtpStream;
if (testMode == 1) {
playSampFreq = recvCodec.plfreq;
_pcmFile.Open(file_name, recvCodec.plfreq, "wb+");
} else if (testMode == 0) {
playSampFreq = 32000;
_pcmFile.Open(file_name, 32000, "wb+");
} else {
printf("\nValid output frequencies:\n");
printf("8000\n16000\n32000\n-1,");
printf("which means output frequency equal to received signal frequency");
printf("\n\nChoose output sampling frequency: ");
ASSERT_GT(scanf("%d", &playSampFreq), 0);
file_name = webrtc::test::OutputPath() + out_file_name + ".pcm";
_pcmFile.Open(file_name, playSampFreq, "wb+");
}
_realPayloadSizeBytes = 0;
_playoutBuffer = new int16_t[WEBRTC_10MS_PCM_AUDIO];
_frequency = playSampFreq;
_acm = acm;
_firstTime = true;
}
void Receiver::Teardown() {
delete[] _playoutBuffer;
_pcmFile.Close();
if (testMode > 1) {
Trace::ReturnTrace();
}
}
bool Receiver::IncomingPacket() {
if (!_rtpStream->EndOfFile()) {
if (_firstTime) {
_firstTime = false;
_realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload,
_payloadSizeBytes, &_nextTime);
if (_realPayloadSizeBytes == 0) {
if (_rtpStream->EndOfFile()) {
_firstTime = true;
return true;
} else {
return false;
}
}
}
EXPECT_EQ(0, _acm->IncomingPacket(_incomingPayload, _realPayloadSizeBytes,
_rtpInfo));
_realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload,
_payloadSizeBytes, &_nextTime);
if (_realPayloadSizeBytes == 0 && _rtpStream->EndOfFile()) {
_firstTime = true;
}
}
return true;
}
bool Receiver::PlayoutData() {
AudioFrame audioFrame;
int32_t ok =_acm->PlayoutData10Ms(_frequency, &audioFrame);
EXPECT_EQ(0, ok);
if (ok < 0){
return false;
}
if (_playoutLengthSmpls == 0) {
return false;
}
_pcmFile.Write10MsData(audioFrame.data_,
audioFrame.samples_per_channel_ * audioFrame.num_channels_);
return true;
}
void Receiver::Run() {
uint8_t counter500Ms = 50;
uint32_t clock = 0;
while (counter500Ms > 0) {
if (clock == 0 || clock >= _nextTime) {
EXPECT_TRUE(IncomingPacket());
if (clock == 0) {
clock = _nextTime;
}
}
if ((clock % 10) == 0) {
if (!PlayoutData()) {
clock++;
continue;
}
}
if (_rtpStream->EndOfFile()) {
counter500Ms--;
}
clock++;
}
}
EncodeDecodeTest::EncodeDecodeTest() {
_testMode = 2;
Trace::CreateTrace();
Trace::SetTraceFile(
(webrtc::test::OutputPath() + "acm_encdec_trace.txt").c_str());
}
EncodeDecodeTest::EncodeDecodeTest(int testMode) {
//testMode == 0 for autotest
//testMode == 1 for testing all codecs/parameters
//testMode > 1 for specific user-input test (as it was used before)
_testMode = testMode;
if (_testMode != 0) {
Trace::CreateTrace();
Trace::SetTraceFile(
(webrtc::test::OutputPath() + "acm_encdec_trace.txt").c_str());
}
}
void EncodeDecodeTest::Perform() {
int numCodecs = 1;
int codePars[3]; // Frequency, packet size, rate.
int numPars[52]; // Number of codec parameters sets (freq, pacsize, rate)
// to test, for a given codec.
codePars[0] = 0;
codePars[1] = 0;
codePars[2] = 0;
scoped_ptr<AudioCodingModule> acm(AudioCodingModule::Create(0));
struct CodecInst sendCodecTmp;
numCodecs = acm->NumberOfCodecs();
if (_testMode != 2) {
for (int n = 0; n < numCodecs; n++) {
EXPECT_EQ(0, acm->Codec(n, &sendCodecTmp));
if (STR_CASE_CMP(sendCodecTmp.plname, "telephone-event") == 0) {
numPars[n] = 0;
} else if (STR_CASE_CMP(sendCodecTmp.plname, "cn") == 0) {
numPars[n] = 0;
} else if (STR_CASE_CMP(sendCodecTmp.plname, "red") == 0) {
numPars[n] = 0;
} else if (sendCodecTmp.channels == 2) {
numPars[n] = 0;
} else {
numPars[n] = 1;
}
}
} else {
numCodecs = 1;
numPars[0] = 1;
}
_receiver.testMode = _testMode;
// Loop over all mono codecs:
for (int codeId = 0; codeId < numCodecs; codeId++) {
// Only encode using real mono encoders, not telephone-event and cng.
for (int loopPars = 1; loopPars <= numPars[codeId]; loopPars++) {
// Encode all data to file.
EncodeToFile(1, codeId, codePars, _testMode);
RTPFile rtpFile;
std::string fileName = webrtc::test::OutputPath() + "outFile.rtp";
rtpFile.Open(fileName.c_str(), "rb");
_receiver.codeId = codeId;
rtpFile.ReadHeader();
_receiver.Setup(acm.get(), &rtpFile, "encodeDecode_out", 1);
_receiver.Run();
_receiver.Teardown();
rtpFile.Close();
}
}
// End tracing.
if (_testMode == 1) {
Trace::ReturnTrace();
}
}
void EncodeDecodeTest::EncodeToFile(int fileType, int codeId, int* codePars,
int testMode) {
scoped_ptr<AudioCodingModule> acm(AudioCodingModule::Create(1));
RTPFile rtpFile;
std::string fileName = webrtc::test::OutputPath() + "outFile.rtp";
rtpFile.Open(fileName.c_str(), "wb+");
rtpFile.WriteHeader();
// Store for auto_test and logging.
_sender.testMode = testMode;
_sender.codeId = codeId;
_sender.Setup(acm.get(), &rtpFile, "audio_coding/testfile32kHz", 32000, 1);
struct CodecInst sendCodecInst;
if (acm->SendCodec(&sendCodecInst) >= 0) {
_sender.Run();
}
_sender.Teardown();
rtpFile.Close();
}
} // namespace webrtc

View File

@@ -0,0 +1,118 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ENCODEDECODETEST_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ENCODEDECODETEST_H_
#include <stdio.h>
#include <string.h>
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/RTPFile.h"
#include "webrtc/typedefs.h"
namespace webrtc {
#define MAX_INCOMING_PAYLOAD 8096
// TestPacketization callback which writes the encoded payloads to file
class TestPacketization : public AudioPacketizationCallback {
public:
TestPacketization(RTPStream *rtpStream, uint16_t frequency);
~TestPacketization();
virtual int32_t SendData(const FrameType frameType, const uint8_t payloadType,
const uint32_t timeStamp, const uint8_t* payloadData,
const uint16_t payloadSize,
const RTPFragmentationHeader* fragmentation);
private:
static void MakeRTPheader(uint8_t* rtpHeader, uint8_t payloadType,
int16_t seqNo, uint32_t timeStamp, uint32_t ssrc);
RTPStream* _rtpStream;
int32_t _frequency;
int16_t _seqNo;
};
class Sender {
public:
Sender();
void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
std::string in_file_name, int sample_rate, int channels);
void Teardown();
void Run();
bool Add10MsData();
//for auto_test and logging
uint8_t testMode;
uint8_t codeId;
protected:
AudioCodingModule* _acm;
private:
PCMFile _pcmFile;
AudioFrame _audioFrame;
TestPacketization* _packetization;
};
class Receiver {
public:
Receiver();
virtual ~Receiver() {};
void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
std::string out_file_name, int channels);
void Teardown();
void Run();
virtual bool IncomingPacket();
bool PlayoutData();
//for auto_test and logging
uint8_t codeId;
uint8_t testMode;
private:
PCMFile _pcmFile;
int16_t* _playoutBuffer;
uint16_t _playoutLengthSmpls;
int32_t _frequency;
bool _firstTime;
protected:
AudioCodingModule* _acm;
uint8_t _incomingPayload[MAX_INCOMING_PAYLOAD];
RTPStream* _rtpStream;
WebRtcRTPHeader _rtpInfo;
uint16_t _realPayloadSizeBytes;
uint16_t _payloadSizeBytes;
uint32_t _nextTime;
};
class EncodeDecodeTest : public ACMTest {
public:
EncodeDecodeTest();
explicit EncodeDecodeTest(int testMode);
virtual void Perform();
uint16_t _playoutFreq;
uint8_t _testMode;
private:
void EncodeToFile(int fileType, int codeId, int* codePars, int testMode);
protected:
Sender _sender;
Receiver _receiver;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ENCODEDECODETEST_H_

View File

@@ -0,0 +1,205 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "PCMFile.h"
#include <ctype.h>
#include <stdio.h>
#include <string.h>
#include "gtest/gtest.h"
#include "module_common_types.h"
namespace webrtc {
#define MAX_FILE_NAME_LENGTH_BYTE 500
PCMFile::PCMFile()
: pcm_file_(NULL),
samples_10ms_(160),
frequency_(16000),
end_of_file_(false),
auto_rewind_(false),
rewinded_(false),
read_stereo_(false),
save_stereo_(false) {
timestamp_ = (((uint32_t) rand() & 0x0000FFFF) << 16) |
((uint32_t) rand() & 0x0000FFFF);
}
PCMFile::PCMFile(uint32_t timestamp)
: pcm_file_(NULL),
samples_10ms_(160),
frequency_(16000),
end_of_file_(false),
auto_rewind_(false),
rewinded_(false),
read_stereo_(false),
save_stereo_(false) {
timestamp_ = timestamp;
}
int16_t PCMFile::ChooseFile(std::string* file_name, int16_t max_len,
uint16_t* frequency_hz) {
char tmp_name[MAX_FILE_NAME_LENGTH_BYTE];
EXPECT_TRUE(fgets(tmp_name, MAX_FILE_NAME_LENGTH_BYTE, stdin) != NULL);
tmp_name[MAX_FILE_NAME_LENGTH_BYTE - 1] = '\0';
int16_t n = 0;
// Removing trailing spaces.
while ((isspace(tmp_name[n]) || iscntrl(tmp_name[n])) && (tmp_name[n] != 0)
&& (n < MAX_FILE_NAME_LENGTH_BYTE)) {
n++;
}
if (n > 0) {
memmove(tmp_name, &tmp_name[n], MAX_FILE_NAME_LENGTH_BYTE - n);
}
// Removing trailing spaces.
n = (int16_t)(strlen(tmp_name) - 1);
if (n >= 0) {
while ((isspace(tmp_name[n]) || iscntrl(tmp_name[n])) && (n >= 0)) {
n--;
}
}
if (n >= 0) {
tmp_name[n + 1] = '\0';
}
int16_t len = (int16_t) strlen(tmp_name);
if (len > max_len) {
return -1;
}
if (len > 0) {
std::string tmp_string(tmp_name, len + 1);
*file_name = tmp_string;
}
printf("Enter the sampling frequency (in Hz) of the above file [%u]: ",
*frequency_hz);
EXPECT_TRUE(fgets(tmp_name, 10, stdin) != NULL);
uint16_t tmp_frequency = (uint16_t) atoi(tmp_name);
if (tmp_frequency > 0) {
*frequency_hz = tmp_frequency;
}
return 0;
}
void PCMFile::Open(const std::string& file_name, uint16_t frequency,
const char* mode, bool auto_rewind) {
if ((pcm_file_ = fopen(file_name.c_str(), mode)) == NULL) {
printf("Cannot open file %s.\n", file_name.c_str());
ADD_FAILURE() << "Unable to read file";
}
frequency_ = frequency;
samples_10ms_ = (uint16_t)(frequency_ / 100);
auto_rewind_ = auto_rewind;
end_of_file_ = false;
rewinded_ = false;
}
int32_t PCMFile::SamplingFrequency() const {
return frequency_;
}
uint16_t PCMFile::PayloadLength10Ms() const {
return samples_10ms_;
}
int32_t PCMFile::Read10MsData(AudioFrame& audio_frame) {
uint16_t channels = 1;
if (read_stereo_) {
channels = 2;
}
int32_t payload_size = (int32_t) fread(audio_frame.data_, sizeof(uint16_t),
samples_10ms_ * channels, pcm_file_);
if (payload_size < samples_10ms_ * channels) {
for (int k = payload_size; k < samples_10ms_ * channels; k++) {
audio_frame.data_[k] = 0;
}
if (auto_rewind_) {
rewind(pcm_file_);
rewinded_ = true;
} else {
end_of_file_ = true;
}
}
audio_frame.samples_per_channel_ = samples_10ms_;
audio_frame.sample_rate_hz_ = frequency_;
audio_frame.num_channels_ = channels;
audio_frame.timestamp_ = timestamp_;
timestamp_ += samples_10ms_;
return samples_10ms_;
}
void PCMFile::Write10MsData(AudioFrame& audio_frame) {
if (audio_frame.num_channels_ == 1) {
if (!save_stereo_) {
if (fwrite(audio_frame.data_, sizeof(uint16_t),
audio_frame.samples_per_channel_, pcm_file_) !=
static_cast<size_t>(audio_frame.samples_per_channel_)) {
return;
}
} else {
int16_t* stereo_audio = new int16_t[2 * audio_frame.samples_per_channel_];
int k;
for (k = 0; k < audio_frame.samples_per_channel_; k++) {
stereo_audio[k << 1] = audio_frame.data_[k];
stereo_audio[(k << 1) + 1] = audio_frame.data_[k];
}
if (fwrite(stereo_audio, sizeof(int16_t),
2 * audio_frame.samples_per_channel_, pcm_file_) !=
static_cast<size_t>(2 * audio_frame.samples_per_channel_)) {
return;
}
delete[] stereo_audio;
}
} else {
if (fwrite(audio_frame.data_, sizeof(int16_t),
audio_frame.num_channels_ * audio_frame.samples_per_channel_,
pcm_file_) !=
static_cast<size_t>(audio_frame.num_channels_ *
audio_frame.samples_per_channel_)) {
return;
}
}
}
void PCMFile::Write10MsData(int16_t* playout_buffer, uint16_t length_smpls) {
if (fwrite(playout_buffer, sizeof(uint16_t), length_smpls, pcm_file_) !=
length_smpls) {
return;
}
}
void PCMFile::Close() {
fclose(pcm_file_);
pcm_file_ = NULL;
}
void PCMFile::Rewind() {
rewind(pcm_file_);
end_of_file_ = false;
}
bool PCMFile::Rewinded() {
return rewinded_;
}
void PCMFile::SaveStereo(bool is_stereo) {
save_stereo_ = is_stereo;
}
void PCMFile::ReadStereo(bool is_stereo) {
read_stereo_ = is_stereo;
}
} // namespace webrtc

View File

@@ -0,0 +1,68 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PCMFILE_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PCMFILE_H_
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class PCMFile {
public:
PCMFile();
PCMFile(uint32_t timestamp);
~PCMFile() {
if (pcm_file_ != NULL) {
fclose(pcm_file_);
}
}
void Open(const std::string& filename, uint16_t frequency, const char* mode,
bool auto_rewind = false);
int32_t Read10MsData(AudioFrame& audio_frame);
void Write10MsData(int16_t *playout_buffer, uint16_t length_smpls);
void Write10MsData(AudioFrame& audio_frame);
uint16_t PayloadLength10Ms() const;
int32_t SamplingFrequency() const;
void Close();
bool EndOfFile() const {
return end_of_file_;
}
void Rewind();
static int16_t ChooseFile(std::string* file_name, int16_t max_len,
uint16_t* frequency_hz);
bool Rewinded();
void SaveStereo(bool is_stereo = true);
void ReadStereo(bool is_stereo = true);
private:
FILE* pcm_file_;
uint16_t samples_10ms_;
int32_t frequency_;
bool end_of_file_;
bool auto_rewind_;
bool rewinded_;
uint32_t timestamp_;
bool read_stereo_;
bool save_stereo_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PCMFILE_H_

View File

@@ -0,0 +1,167 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/test/PacketLossTest.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
ReceiverWithPacketLoss::ReceiverWithPacketLoss()
: loss_rate_(0),
burst_length_(1),
packet_counter_(0),
lost_packet_counter_(0),
burst_lost_counter_(burst_length_) {
}
void ReceiverWithPacketLoss::Setup(AudioCodingModule *acm,
RTPStream *rtpStream,
std::string out_file_name,
int channels,
int loss_rate,
int burst_length) {
loss_rate_ = loss_rate;
burst_length_ = burst_length;
burst_lost_counter_ = burst_length_; // To prevent first packet gets lost.
std::stringstream ss;
ss << out_file_name << "_" << loss_rate_ << "_" << burst_length_ << "_";
Receiver::Setup(acm, rtpStream, ss.str(), channels);
}
bool ReceiverWithPacketLoss::IncomingPacket() {
if (!_rtpStream->EndOfFile()) {
if (packet_counter_ == 0) {
_realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload,
_payloadSizeBytes, &_nextTime);
if (_realPayloadSizeBytes == 0) {
if (_rtpStream->EndOfFile()) {
packet_counter_ = 0;
return true;
} else {
return false;
}
}
}
if (!PacketLost()) {
_acm->IncomingPacket(_incomingPayload, _realPayloadSizeBytes, _rtpInfo);
}
packet_counter_++;
_realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload,
_payloadSizeBytes, &_nextTime);
if (_realPayloadSizeBytes == 0 && _rtpStream->EndOfFile()) {
packet_counter_ = 0;
lost_packet_counter_ = 0;
}
}
return true;
}
bool ReceiverWithPacketLoss::PacketLost() {
if (burst_lost_counter_ < burst_length_) {
lost_packet_counter_++;
burst_lost_counter_++;
return true;
}
if (lost_packet_counter_ * 100 < loss_rate_ * packet_counter_) {
lost_packet_counter_++;
burst_lost_counter_ = 1;
return true;
}
return false;
}
SenderWithFEC::SenderWithFEC()
: expected_loss_rate_(0) {
}
void SenderWithFEC::Setup(AudioCodingModule *acm, RTPStream *rtpStream,
std::string in_file_name, int sample_rate,
int channels, int expected_loss_rate) {
Sender::Setup(acm, rtpStream, in_file_name, sample_rate, channels);
EXPECT_TRUE(SetFEC(true));
EXPECT_TRUE(SetPacketLossRate(expected_loss_rate));
}
bool SenderWithFEC::SetFEC(bool enable_fec) {
if (_acm->SetCodecFEC(enable_fec) == 0) {
return true;
}
return false;
}
bool SenderWithFEC::SetPacketLossRate(int expected_loss_rate) {
if (_acm->SetPacketLossRate(expected_loss_rate) == 0) {
expected_loss_rate_ = expected_loss_rate;
return true;
}
return false;
}
PacketLossTest::PacketLossTest(int channels, int expected_loss_rate,
int actual_loss_rate, int burst_length)
: channels_(channels),
in_file_name_(channels_ == 1 ? "audio_coding/testfile32kHz" :
"audio_coding/teststereo32kHz"),
sample_rate_hz_(32000),
sender_(new SenderWithFEC),
receiver_(new ReceiverWithPacketLoss),
expected_loss_rate_(expected_loss_rate),
actual_loss_rate_(actual_loss_rate),
burst_length_(burst_length) {
}
void PacketLossTest::Perform() {
#ifndef WEBRTC_CODEC_OPUS
return;
#else
scoped_ptr<AudioCodingModule> acm(AudioCodingModule::Create(0));
int codec_id = acm->Codec("opus", 48000, channels_);
RTPFile rtpFile;
std::string fileName = webrtc::test::OutputPath() + "outFile.rtp";
// Encode to file
rtpFile.Open(fileName.c_str(), "wb+");
rtpFile.WriteHeader();
sender_->testMode = 0;
sender_->codeId = codec_id;
sender_->Setup(acm.get(), &rtpFile, in_file_name_, sample_rate_hz_, channels_,
expected_loss_rate_);
struct CodecInst sendCodecInst;
if (acm->SendCodec(&sendCodecInst) >= 0) {
sender_->Run();
}
sender_->Teardown();
rtpFile.Close();
// Decode to file
rtpFile.Open(fileName.c_str(), "rb");
rtpFile.ReadHeader();
receiver_->testMode = 0;
receiver_->codeId = codec_id;
receiver_->Setup(acm.get(), &rtpFile, "packetLoss_out", channels_,
actual_loss_rate_, burst_length_);
receiver_->Run();
receiver_->Teardown();
rtpFile.Close();
#endif
}
} // namespace webrtc

View File

@@ -0,0 +1,66 @@
/*
* Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PACKETLOSSTEST_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PACKETLOSSTEST_H_
#include <string>
#include "webrtc/modules/audio_coding/main/test/EncodeDecodeTest.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
class ReceiverWithPacketLoss : public Receiver {
public:
ReceiverWithPacketLoss();
void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
std::string out_file_name, int channels, int loss_rate,
int burst_length);
bool IncomingPacket() OVERRIDE;
protected:
bool PacketLost();
int loss_rate_;
int burst_length_;
int packet_counter_;
int lost_packet_counter_;
int burst_lost_counter_;
};
class SenderWithFEC : public Sender {
public:
SenderWithFEC();
void Setup(AudioCodingModule *acm, RTPStream *rtpStream,
std::string in_file_name, int sample_rate, int channels,
int expected_loss_rate);
bool SetPacketLossRate(int expected_loss_rate);
bool SetFEC(bool enable_fec);
protected:
int expected_loss_rate_;
};
class PacketLossTest : public ACMTest {
public:
PacketLossTest(int channels, int expected_loss_rate_, int actual_loss_rate,
int burst_length);
void Perform();
protected:
int channels_;
std::string in_file_name_;
int sample_rate_hz_;
scoped_ptr<SenderWithFEC> sender_;
scoped_ptr<ReceiverWithPacketLoss> receiver_;
int expected_loss_rate_;
int actual_loss_rate_;
int burst_length_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_PACKETLOSSTEST_H_

View File

@@ -0,0 +1,247 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "RTPFile.h"
#include <stdlib.h>
#ifdef WIN32
# include <Winsock2.h>
#else
# include <arpa/inet.h>
#endif
#include "audio_coding_module.h"
#include "engine_configurations.h"
#include "gtest/gtest.h" // TODO (tlegrand): Consider removing usage of gtest.
#include "rw_lock_wrapper.h"
namespace webrtc {
void RTPStream::ParseRTPHeader(WebRtcRTPHeader* rtpInfo,
const uint8_t* rtpHeader) {
rtpInfo->header.payloadType = rtpHeader[1];
rtpInfo->header.sequenceNumber = (static_cast<uint16_t>(rtpHeader[2]) << 8) |
rtpHeader[3];
rtpInfo->header.timestamp = (static_cast<uint32_t>(rtpHeader[4]) << 24) |
(static_cast<uint32_t>(rtpHeader[5]) << 16) |
(static_cast<uint32_t>(rtpHeader[6]) << 8) | rtpHeader[7];
rtpInfo->header.ssrc = (static_cast<uint32_t>(rtpHeader[8]) << 24) |
(static_cast<uint32_t>(rtpHeader[9]) << 16) |
(static_cast<uint32_t>(rtpHeader[10]) << 8) | rtpHeader[11];
}
void RTPStream::MakeRTPheader(uint8_t* rtpHeader, uint8_t payloadType,
int16_t seqNo, uint32_t timeStamp,
uint32_t ssrc) {
rtpHeader[0] = (unsigned char) 0x80;
rtpHeader[1] = (unsigned char) (payloadType & 0xFF);
rtpHeader[2] = (unsigned char) ((seqNo >> 8) & 0xFF);
rtpHeader[3] = (unsigned char) ((seqNo) & 0xFF);
rtpHeader[4] = (unsigned char) ((timeStamp >> 24) & 0xFF);
rtpHeader[5] = (unsigned char) ((timeStamp >> 16) & 0xFF);
rtpHeader[6] = (unsigned char) ((timeStamp >> 8) & 0xFF);
rtpHeader[7] = (unsigned char) (timeStamp & 0xFF);
rtpHeader[8] = (unsigned char) ((ssrc >> 24) & 0xFF);
rtpHeader[9] = (unsigned char) ((ssrc >> 16) & 0xFF);
rtpHeader[10] = (unsigned char) ((ssrc >> 8) & 0xFF);
rtpHeader[11] = (unsigned char) (ssrc & 0xFF);
}
RTPPacket::RTPPacket(uint8_t payloadType, uint32_t timeStamp, int16_t seqNo,
const uint8_t* payloadData, uint16_t payloadSize,
uint32_t frequency)
: payloadType(payloadType),
timeStamp(timeStamp),
seqNo(seqNo),
payloadSize(payloadSize),
frequency(frequency) {
if (payloadSize > 0) {
this->payloadData = new uint8_t[payloadSize];
memcpy(this->payloadData, payloadData, payloadSize);
}
}
RTPPacket::~RTPPacket() {
delete[] payloadData;
}
RTPBuffer::RTPBuffer() {
_queueRWLock = RWLockWrapper::CreateRWLock();
}
RTPBuffer::~RTPBuffer() {
delete _queueRWLock;
}
void RTPBuffer::Write(const uint8_t payloadType, const uint32_t timeStamp,
const int16_t seqNo, const uint8_t* payloadData,
const uint16_t payloadSize, uint32_t frequency) {
RTPPacket *packet = new RTPPacket(payloadType, timeStamp, seqNo, payloadData,
payloadSize, frequency);
_queueRWLock->AcquireLockExclusive();
_rtpQueue.push(packet);
_queueRWLock->ReleaseLockExclusive();
}
uint16_t RTPBuffer::Read(WebRtcRTPHeader* rtpInfo, uint8_t* payloadData,
uint16_t payloadSize, uint32_t* offset) {
_queueRWLock->AcquireLockShared();
RTPPacket *packet = _rtpQueue.front();
_rtpQueue.pop();
_queueRWLock->ReleaseLockShared();
rtpInfo->header.markerBit = 1;
rtpInfo->header.payloadType = packet->payloadType;
rtpInfo->header.sequenceNumber = packet->seqNo;
rtpInfo->header.ssrc = 0;
rtpInfo->header.timestamp = packet->timeStamp;
if (packet->payloadSize > 0 && payloadSize >= packet->payloadSize) {
memcpy(payloadData, packet->payloadData, packet->payloadSize);
} else {
return 0;
}
*offset = (packet->timeStamp / (packet->frequency / 1000));
return packet->payloadSize;
}
bool RTPBuffer::EndOfFile() const {
_queueRWLock->AcquireLockShared();
bool eof = _rtpQueue.empty();
_queueRWLock->ReleaseLockShared();
return eof;
}
void RTPFile::Open(const char *filename, const char *mode) {
if ((_rtpFile = fopen(filename, mode)) == NULL) {
printf("Cannot write file %s.\n", filename);
ADD_FAILURE() << "Unable to write file";
exit(1);
}
}
void RTPFile::Close() {
if (_rtpFile != NULL) {
fclose(_rtpFile);
_rtpFile = NULL;
}
}
void RTPFile::WriteHeader() {
// Write data in a format that NetEQ and RTP Play can parse
fprintf(_rtpFile, "#!RTPencode%s\n", "1.0");
uint32_t dummy_variable = 0;
// should be converted to network endian format, but does not matter when 0
if (fwrite(&dummy_variable, 4, 1, _rtpFile) != 1) {
return;
}
if (fwrite(&dummy_variable, 4, 1, _rtpFile) != 1) {
return;
}
if (fwrite(&dummy_variable, 4, 1, _rtpFile) != 1) {
return;
}
if (fwrite(&dummy_variable, 2, 1, _rtpFile) != 1) {
return;
}
if (fwrite(&dummy_variable, 2, 1, _rtpFile) != 1) {
return;
}
fflush(_rtpFile);
}
void RTPFile::ReadHeader() {
uint32_t start_sec, start_usec, source;
uint16_t port, padding;
char fileHeader[40];
EXPECT_TRUE(fgets(fileHeader, 40, _rtpFile) != 0);
EXPECT_EQ(1u, fread(&start_sec, 4, 1, _rtpFile));
start_sec = ntohl(start_sec);
EXPECT_EQ(1u, fread(&start_usec, 4, 1, _rtpFile));
start_usec = ntohl(start_usec);
EXPECT_EQ(1u, fread(&source, 4, 1, _rtpFile));
source = ntohl(source);
EXPECT_EQ(1u, fread(&port, 2, 1, _rtpFile));
port = ntohs(port);
EXPECT_EQ(1u, fread(&padding, 2, 1, _rtpFile));
padding = ntohs(padding);
}
void RTPFile::Write(const uint8_t payloadType, const uint32_t timeStamp,
const int16_t seqNo, const uint8_t* payloadData,
const uint16_t payloadSize, uint32_t frequency) {
/* write RTP packet to file */
uint8_t rtpHeader[12];
MakeRTPheader(rtpHeader, payloadType, seqNo, timeStamp, 0);
uint16_t lengthBytes = htons(12 + payloadSize + 8);
uint16_t plen = htons(12 + payloadSize);
uint32_t offsetMs;
offsetMs = (timeStamp / (frequency / 1000));
offsetMs = htonl(offsetMs);
if (fwrite(&lengthBytes, 2, 1, _rtpFile) != 1) {
return;
}
if (fwrite(&plen, 2, 1, _rtpFile) != 1) {
return;
}
if (fwrite(&offsetMs, 4, 1, _rtpFile) != 1) {
return;
}
if (fwrite(rtpHeader, 12, 1, _rtpFile) != 1) {
return;
}
if (fwrite(payloadData, 1, payloadSize, _rtpFile) != payloadSize) {
return;
}
}
uint16_t RTPFile::Read(WebRtcRTPHeader* rtpInfo, uint8_t* payloadData,
uint16_t payloadSize, uint32_t* offset) {
uint16_t lengthBytes;
uint16_t plen;
uint8_t rtpHeader[12];
size_t read_len = fread(&lengthBytes, 2, 1, _rtpFile);
/* Check if we have reached end of file. */
if ((read_len == 0) && feof(_rtpFile)) {
_rtpEOF = true;
return 0;
}
EXPECT_EQ(1u, fread(&plen, 2, 1, _rtpFile));
EXPECT_EQ(1u, fread(offset, 4, 1, _rtpFile));
lengthBytes = ntohs(lengthBytes);
plen = ntohs(plen);
*offset = ntohl(*offset);
EXPECT_GT(plen, 11);
EXPECT_EQ(1u, fread(rtpHeader, 12, 1, _rtpFile));
ParseRTPHeader(rtpInfo, rtpHeader);
rtpInfo->type.Audio.isCNG = false;
rtpInfo->type.Audio.channel = 1;
EXPECT_EQ(lengthBytes, plen + 8);
if (plen == 0) {
return 0;
}
if (payloadSize < (lengthBytes - 20)) {
return 0;
}
if (lengthBytes < 20) {
return 0;
}
lengthBytes -= 20;
EXPECT_EQ(lengthBytes, fread(payloadData, 1, lengthBytes, _rtpFile));
return lengthBytes;
}
} // namespace webrtc

View File

@@ -0,0 +1,118 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_RTPFILE_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_RTPFILE_H_
#include <stdio.h>
#include <queue>
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/rw_lock_wrapper.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class RTPStream {
public:
virtual ~RTPStream() {
}
virtual void Write(const uint8_t payloadType, const uint32_t timeStamp,
const int16_t seqNo, const uint8_t* payloadData,
const uint16_t payloadSize, uint32_t frequency) = 0;
// Returns the packet's payload size. Zero should be treated as an
// end-of-stream (in the case that EndOfFile() is true) or an error.
virtual uint16_t Read(WebRtcRTPHeader* rtpInfo, uint8_t* payloadData,
uint16_t payloadSize, uint32_t* offset) = 0;
virtual bool EndOfFile() const = 0;
protected:
void MakeRTPheader(uint8_t* rtpHeader, uint8_t payloadType, int16_t seqNo,
uint32_t timeStamp, uint32_t ssrc);
void ParseRTPHeader(WebRtcRTPHeader* rtpInfo, const uint8_t* rtpHeader);
};
class RTPPacket {
public:
RTPPacket(uint8_t payloadType, uint32_t timeStamp, int16_t seqNo,
const uint8_t* payloadData, uint16_t payloadSize,
uint32_t frequency);
~RTPPacket();
uint8_t payloadType;
uint32_t timeStamp;
int16_t seqNo;
uint8_t* payloadData;
uint16_t payloadSize;
uint32_t frequency;
};
class RTPBuffer : public RTPStream {
public:
RTPBuffer();
~RTPBuffer();
void Write(const uint8_t payloadType, const uint32_t timeStamp,
const int16_t seqNo, const uint8_t* payloadData,
const uint16_t payloadSize, uint32_t frequency);
uint16_t Read(WebRtcRTPHeader* rtpInfo, uint8_t* payloadData,
uint16_t payloadSize, uint32_t* offset);
virtual bool EndOfFile() const;
private:
RWLockWrapper* _queueRWLock;
std::queue<RTPPacket *> _rtpQueue;
};
class RTPFile : public RTPStream {
public:
~RTPFile() {
}
RTPFile()
: _rtpFile(NULL),
_rtpEOF(false) {
}
void Open(const char *outFilename, const char *mode);
void Close();
void WriteHeader();
void ReadHeader();
void Write(const uint8_t payloadType, const uint32_t timeStamp,
const int16_t seqNo, const uint8_t* payloadData,
const uint16_t payloadSize, uint32_t frequency);
uint16_t Read(WebRtcRTPHeader* rtpInfo, uint8_t* payloadData,
uint16_t payloadSize, uint32_t* offset);
bool EndOfFile() const {
return _rtpEOF;
}
private:
FILE* _rtpFile;
bool _rtpEOF;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_RTPFILE_H_

View File

@@ -0,0 +1,201 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "common_types.h"
#include "SpatialAudio.h"
#include "trace.h"
#include "utility.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
#define NUM_PANN_COEFFS 10
SpatialAudio::SpatialAudio(int testMode)
: _acmLeft(AudioCodingModule::Create(1)),
_acmRight(AudioCodingModule::Create(2)),
_acmReceiver(AudioCodingModule::Create(3)),
_testMode(testMode) {
}
SpatialAudio::~SpatialAudio() {
delete _channel;
_inFile.Close();
_outFile.Close();
}
int16_t SpatialAudio::Setup() {
_channel = new Channel;
// Register callback for the sender side.
CHECK_ERROR(_acmLeft->RegisterTransportCallback(_channel));
CHECK_ERROR(_acmRight->RegisterTransportCallback(_channel));
// Register the receiver ACM in channel
_channel->RegisterReceiverACM(_acmReceiver.get());
uint16_t sampFreqHz = 32000;
const std::string file_name = webrtc::test::ResourcePath(
"audio_coding/testfile32kHz", "pcm");
_inFile.Open(file_name, sampFreqHz, "rb", false);
std::string output_file = webrtc::test::OutputPath()
+ "out_spatial_autotest.pcm";
if (_testMode == 1) {
output_file = webrtc::test::OutputPath() + "testspatial_out.pcm";
printf("\n");
printf("Enter the output file [%s]: ", output_file.c_str());
PCMFile::ChooseFile(&output_file, MAX_FILE_NAME_LENGTH_BYTE, &sampFreqHz);
} else {
output_file = webrtc::test::OutputPath() + "testspatial_out.pcm";
}
_outFile.Open(output_file, sampFreqHz, "wb", false);
_outFile.SaveStereo(true);
// Register all available codes as receiving codecs.
CodecInst codecInst;
int status;
uint8_t num_encoders = _acmReceiver->NumberOfCodecs();
// Register all available codes as receiving codecs once more.
for (uint8_t n = 0; n < num_encoders; n++) {
status = _acmReceiver->Codec(n, &codecInst);
if (status < 0) {
printf("Error in Codec(), no matching codec found");
}
status = _acmReceiver->RegisterReceiveCodec(codecInst);
if (status < 0) {
printf("Error in RegisterReceiveCodec() for payload type %d",
codecInst.pltype);
}
}
return 0;
}
void SpatialAudio::Perform() {
if (_testMode == 0) {
printf("Running SpatialAudio Test");
WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1,
"---------- SpatialAudio ----------");
}
Setup();
CodecInst codecInst;
_acmLeft->Codec((uint8_t) 1, &codecInst);
CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
EncodeDecode();
int16_t pannCntr = 0;
double leftPanning[NUM_PANN_COEFFS] = { 1.00, 0.95, 0.90, 0.85, 0.80, 0.75,
0.70, 0.60, 0.55, 0.50 };
double rightPanning[NUM_PANN_COEFFS] = { 0.50, 0.55, 0.60, 0.70, 0.75, 0.80,
0.85, 0.90, 0.95, 1.00 };
while ((pannCntr + 1) < NUM_PANN_COEFFS) {
_acmLeft->Codec((uint8_t) 0, &codecInst);
codecInst.pacsize = 480;
CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
CHECK_ERROR(_acmRight->RegisterSendCodec(codecInst));
EncodeDecode(leftPanning[pannCntr], rightPanning[pannCntr]);
pannCntr++;
// Change codec
_acmLeft->Codec((uint8_t) 3, &codecInst);
codecInst.pacsize = 320;
CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
CHECK_ERROR(_acmRight->RegisterSendCodec(codecInst));
EncodeDecode(leftPanning[pannCntr], rightPanning[pannCntr]);
pannCntr++;
if (_testMode == 0) {
printf(".");
}
}
_acmLeft->Codec((uint8_t) 4, &codecInst);
CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
EncodeDecode();
_acmLeft->Codec((uint8_t) 0, &codecInst);
codecInst.pacsize = 480;
CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
CHECK_ERROR(_acmRight->RegisterSendCodec(codecInst));
pannCntr = NUM_PANN_COEFFS - 1;
while (pannCntr >= 0) {
EncodeDecode(leftPanning[pannCntr], rightPanning[pannCntr]);
pannCntr--;
if (_testMode == 0) {
printf(".");
}
}
if (_testMode == 0) {
printf("Done!\n");
}
}
void SpatialAudio::EncodeDecode(const double leftPanning,
const double rightPanning) {
AudioFrame audioFrame;
int32_t outFileSampFreq = _outFile.SamplingFrequency();
const double rightToLeftRatio = rightPanning / leftPanning;
_channel->SetIsStereo(true);
while (!_inFile.EndOfFile()) {
_inFile.Read10MsData(audioFrame);
for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
audioFrame.data_[n] = (int16_t) floor(
audioFrame.data_[n] * leftPanning + 0.5);
}
CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
for (int n = 0; n < audioFrame.samples_per_channel_; n++) {
audioFrame.data_[n] = (int16_t) floor(
audioFrame.data_[n] * rightToLeftRatio + 0.5);
}
CHECK_ERROR(_acmRight->Add10MsData(audioFrame));
CHECK_ERROR(_acmLeft->Process());
CHECK_ERROR(_acmRight->Process());
CHECK_ERROR(_acmReceiver->PlayoutData10Ms(outFileSampFreq, &audioFrame));
_outFile.Write10MsData(audioFrame);
}
_inFile.Rewind();
}
void SpatialAudio::EncodeDecode() {
AudioFrame audioFrame;
int32_t outFileSampFreq = _outFile.SamplingFrequency();
_channel->SetIsStereo(false);
while (!_inFile.EndOfFile()) {
_inFile.Read10MsData(audioFrame);
CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
CHECK_ERROR(_acmLeft->Process());
CHECK_ERROR(_acmReceiver->PlayoutData10Ms(outFileSampFreq, &audioFrame));
_outFile.Write10MsData(audioFrame);
}
_inFile.Rewind();
}
} // namespace webrtc

View File

@@ -0,0 +1,47 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_SPATIALAUDIO_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_SPATIALAUDIO_H_
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#define MAX_FILE_NAME_LENGTH_BYTE 500
namespace webrtc {
class SpatialAudio : public ACMTest {
public:
SpatialAudio(int testMode);
~SpatialAudio();
void Perform();
private:
int16_t Setup();
void EncodeDecode(double leftPanning, double rightPanning);
void EncodeDecode();
scoped_ptr<AudioCodingModule> _acmLeft;
scoped_ptr<AudioCodingModule> _acmRight;
scoped_ptr<AudioCodingModule> _acmReceiver;
Channel* _channel;
PCMFile _inFile;
PCMFile _outFile;
int _testMode;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_SPATIALAUDIO_H_

View File

@@ -0,0 +1,822 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/test/TestAllCodecs.h"
#include <stdio.h>
#include <string>
#include "gtest/gtest.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/typedefs.h"
// Description of the test:
// In this test we set up a one-way communication channel from a participant
// called "a" to a participant called "b".
// a -> channel_a_to_b -> b
//
// The test loops through all available mono codecs, encode at "a" sends over
// the channel, and decodes at "b".
namespace webrtc {
// Class for simulating packet handling.
TestPack::TestPack()
: receiver_acm_(NULL),
sequence_number_(0),
timestamp_diff_(0),
last_in_timestamp_(0),
total_bytes_(0),
payload_size_(0) {
}
TestPack::~TestPack() {
}
void TestPack::RegisterReceiverACM(AudioCodingModule* acm) {
receiver_acm_ = acm;
return;
}
int32_t TestPack::SendData(FrameType frame_type, uint8_t payload_type,
uint32_t timestamp, const uint8_t* payload_data,
uint16_t payload_size,
const RTPFragmentationHeader* fragmentation) {
WebRtcRTPHeader rtp_info;
int32_t status;
rtp_info.header.markerBit = false;
rtp_info.header.ssrc = 0;
rtp_info.header.sequenceNumber = sequence_number_++;
rtp_info.header.payloadType = payload_type;
rtp_info.header.timestamp = timestamp;
if (frame_type == kAudioFrameCN) {
rtp_info.type.Audio.isCNG = true;
} else {
rtp_info.type.Audio.isCNG = false;
}
if (frame_type == kFrameEmpty) {
// Skip this frame.
return 0;
}
// Only run mono for all test cases.
rtp_info.type.Audio.channel = 1;
memcpy(payload_data_, payload_data, payload_size);
status = receiver_acm_->IncomingPacket(payload_data_, payload_size, rtp_info);
payload_size_ = payload_size;
timestamp_diff_ = timestamp - last_in_timestamp_;
last_in_timestamp_ = timestamp;
total_bytes_ += payload_size;
return status;
}
uint16_t TestPack::payload_size() {
return payload_size_;
}
uint32_t TestPack::timestamp_diff() {
return timestamp_diff_;
}
void TestPack::reset_payload_size() {
payload_size_ = 0;
}
TestAllCodecs::TestAllCodecs(int test_mode)
: acm_a_(AudioCodingModule::Create(0)),
acm_b_(AudioCodingModule::Create(1)),
channel_a_to_b_(NULL),
test_count_(0),
packet_size_samples_(0),
packet_size_bytes_(0) {
// test_mode = 0 for silent test (auto test)
test_mode_ = test_mode;
}
TestAllCodecs::~TestAllCodecs() {
if (channel_a_to_b_ != NULL) {
delete channel_a_to_b_;
channel_a_to_b_ = NULL;
}
}
void TestAllCodecs::Perform() {
const std::string file_name = webrtc::test::ResourcePath(
"audio_coding/testfile32kHz", "pcm");
infile_a_.Open(file_name, 32000, "rb");
if (test_mode_ == 0) {
WEBRTC_TRACE(kTraceStateInfo, kTraceAudioCoding, -1,
"---------- TestAllCodecs ----------");
}
acm_a_->InitializeReceiver();
acm_b_->InitializeReceiver();
uint8_t num_encoders = acm_a_->NumberOfCodecs();
CodecInst my_codec_param;
for (uint8_t n = 0; n < num_encoders; n++) {
acm_b_->Codec(n, &my_codec_param);
if (!strcmp(my_codec_param.plname, "opus")) {
my_codec_param.channels = 1;
}
acm_b_->RegisterReceiveCodec(my_codec_param);
}
// Create and connect the channel
channel_a_to_b_ = new TestPack;
acm_a_->RegisterTransportCallback(channel_a_to_b_);
channel_a_to_b_->RegisterReceiverACM(acm_b_.get());
// All codecs are tested for all allowed sampling frequencies, rates and
// packet sizes.
#ifdef WEBRTC_CODEC_AMR
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_amr[] = "AMR";
RegisterSendCodec('A', codec_amr, 8000, 4750, 160, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 4750, 320, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 4750, 480, 3);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 5150, 160, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 5150, 320, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 5150, 480, 3);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 5900, 160, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 5900, 320, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 5900, 480, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 6700, 160, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 6700, 320, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 6700, 480, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 7400, 160, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 7400, 320, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 7400, 480, 3);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 7950, 160, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 7950, 320, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 7950, 480, 3);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 10200, 160, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 10200, 320, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 10200, 480, 3);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 12200, 160, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 12200, 320, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amr, 8000, 12200, 480, 3);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_AMRWB
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
char codec_amrwb[] = "AMR-WB";
OpenOutFile(test_count_);
RegisterSendCodec('A', codec_amrwb, 16000, 7000, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 7000, 640, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 7000, 960, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 9000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 9000, 640, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 9000, 960, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 12000, 320, 3);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 12000, 640, 6);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 12000, 960, 8);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 14000, 320, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 14000, 640, 4);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 14000, 960, 5);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 16000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 16000, 640, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 16000, 960, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 18000, 320, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 18000, 640, 4);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 18000, 960, 5);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 20000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 20000, 640, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 20000, 960, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 23000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 23000, 640, 3);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 23000, 960, 3);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 24000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 24000, 640, 2);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_amrwb, 16000, 24000, 960, 2);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_G722
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_g722[] = "G722";
RegisterSendCodec('A', codec_g722, 16000, 64000, 160, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g722, 16000, 64000, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g722, 16000, 64000, 480, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g722, 16000, 64000, 640, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g722, 16000, 64000, 800, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g722, 16000, 64000, 960, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_G722_1
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_g722_1[] = "G7221";
RegisterSendCodec('A', codec_g722_1, 16000, 32000, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g722_1, 16000, 24000, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g722_1, 16000, 16000, 320, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_G722_1C
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_g722_1c[] = "G7221";
RegisterSendCodec('A', codec_g722_1c, 32000, 48000, 640, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g722_1c, 32000, 32000, 640, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g722_1c, 32000, 24000, 640, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_G729
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_g729[] = "G729";
RegisterSendCodec('A', codec_g729, 8000, 8000, 80, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729, 8000, 8000, 160, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729, 8000, 8000, 240, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729, 8000, 8000, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729, 8000, 8000, 400, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729, 8000, 8000, 480, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_G729_1
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_g729_1[] = "G7291";
RegisterSendCodec('A', codec_g729_1, 16000, 8000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 8000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 8000, 960, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 12000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 12000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 12000, 960, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 14000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 14000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 14000, 960, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 16000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 16000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 16000, 960, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 18000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 18000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 18000, 960, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 20000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 20000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 20000, 960, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 22000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 22000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 22000, 960, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 24000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 24000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 24000, 960, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 26000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 26000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 26000, 960, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 28000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 28000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 28000, 960, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 30000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 30000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 30000, 960, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 32000, 320, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 32000, 640, 1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_g729_1, 16000, 32000, 960, 1);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_GSMFR
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_gsmfr[] = "GSM";
RegisterSendCodec('A', codec_gsmfr, 8000, 13200, 160, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_gsmfr, 8000, 13200, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_gsmfr, 8000, 13200, 480, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_ILBC
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_ilbc[] = "ILBC";
RegisterSendCodec('A', codec_ilbc, 8000, 13300, 240, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_ilbc, 8000, 13300, 480, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_ilbc, 8000, 15200, 160, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_ilbc, 8000, 15200, 320, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_isac[] = "ISAC";
RegisterSendCodec('A', codec_isac, 16000, -1, 480, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_isac, 16000, -1, 960, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_isac, 16000, 15000, 480, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_isac, 16000, 32000, 960, -1);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_ISAC
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
RegisterSendCodec('A', codec_isac, 32000, -1, 960, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_isac, 32000, 56000, 960, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_isac, 32000, 37000, 960, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_isac, 32000, 32000, 960, -1);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_PCM16
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_l16[] = "L16";
RegisterSendCodec('A', codec_l16, 8000, 128000, 80, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_l16, 8000, 128000, 160, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_l16, 8000, 128000, 240, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_l16, 8000, 128000, 320, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
RegisterSendCodec('A', codec_l16, 16000, 256000, 160, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_l16, 16000, 256000, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_l16, 16000, 256000, 480, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_l16, 16000, 256000, 640, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
RegisterSendCodec('A', codec_l16, 32000, 512000, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_l16, 32000, 512000, 640, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_pcma[] = "PCMA";
RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 160, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 240, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 400, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 480, 0);
Run(channel_a_to_b_);
if (test_mode_ != 0) {
printf("===============================================================\n");
}
char codec_pcmu[] = "PCMU";
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 160, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 240, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 400, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 480, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
#ifdef WEBRTC_CODEC_SPEEX
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_speex[] = "SPEEX";
RegisterSendCodec('A', codec_speex, 8000, 2400, 160, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_speex, 8000, 8000, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_speex, 8000, 18200, 480, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
RegisterSendCodec('A', codec_speex, 16000, 4000, 320, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_speex, 16000, 12800, 640, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_speex, 16000, 34200, 960, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_CELT
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_celt[] = "CELT";
RegisterSendCodec('A', codec_celt, 32000, 48000, 640, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_celt, 32000, 64000, 640, 0);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_celt, 32000, 128000, 640, 0);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
#ifdef WEBRTC_CODEC_OPUS
if (test_mode_ != 0) {
printf("===============================================================\n");
}
test_count_++;
OpenOutFile(test_count_);
char codec_opus[] = "OPUS";
RegisterSendCodec('A', codec_opus, 48000, 6000, 480, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_opus, 48000, 20000, 480*2, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_opus, 48000, 32000, 480*4, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_opus, 48000, 48000, 480, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_opus, 48000, 64000, 480*4, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_opus, 48000, 96000, 480*6, -1);
Run(channel_a_to_b_);
RegisterSendCodec('A', codec_opus, 48000, 500000, 480*2, -1);
Run(channel_a_to_b_);
outfile_b_.Close();
#endif
if (test_mode_ != 0) {
printf("===============================================================\n");
/* Print out all codecs that were not tested in the run */
printf("The following codecs was not included in the test:\n");
#ifndef WEBRTC_CODEC_AMR
printf(" GSMAMR\n");
#endif
#ifndef WEBRTC_CODEC_AMRWB
printf(" GSMAMR-wb\n");
#endif
#ifndef WEBRTC_CODEC_G722
printf(" G.722\n");
#endif
#ifndef WEBRTC_CODEC_G722_1
printf(" G.722.1\n");
#endif
#ifndef WEBRTC_CODEC_G722_1C
printf(" G.722.1C\n");
#endif
#ifndef WEBRTC_CODEC_G729
printf(" G.729\n");
#endif
#ifndef WEBRTC_CODEC_G729_1
printf(" G.729.1\n");
#endif
#ifndef WEBRTC_CODEC_GSMFR
printf(" GSMFR\n");
#endif
#ifndef WEBRTC_CODEC_ILBC
printf(" iLBC\n");
#endif
#ifndef WEBRTC_CODEC_ISAC
printf(" ISAC float\n");
#endif
#ifndef WEBRTC_CODEC_ISACFX
printf(" ISAC fix\n");
#endif
#ifndef WEBRTC_CODEC_PCM16
printf(" PCM16\n");
#endif
#ifndef WEBRTC_CODEC_SPEEX
printf(" Speex\n");
#endif
printf("\nTo complete the test, listen to the %d number of output files.\n",
test_count_);
}
}
// Register Codec to use in the test
//
// Input: side - which ACM to use, 'A' or 'B'
// codec_name - name to use when register the codec
// sampling_freq_hz - sampling frequency in Herz
// rate - bitrate in bytes
// packet_size - packet size in samples
// extra_byte - if extra bytes needed compared to the bitrate
// used when registering, can be an internal header
// set to -1 if the codec is a variable rate codec
void TestAllCodecs::RegisterSendCodec(char side, char* codec_name,
int32_t sampling_freq_hz, int rate,
int packet_size, int extra_byte) {
if (test_mode_ != 0) {
// Print out codec and settings.
printf("codec: %s Freq: %d Rate: %d PackSize: %d\n", codec_name,
sampling_freq_hz, rate, packet_size);
}
// Store packet-size in samples, used to validate the received packet.
// If G.722, store half the size to compensate for the timestamp bug in the
// RFC for G.722.
// If iSAC runs in adaptive mode, packet size in samples can change on the
// fly, so we exclude this test by setting |packet_size_samples_| to -1.
if (!strcmp(codec_name, "G722")) {
packet_size_samples_ = packet_size / 2;
} else if (!strcmp(codec_name, "ISAC") && (rate == -1)) {
packet_size_samples_ = -1;
} else {
packet_size_samples_ = packet_size;
}
// Store the expected packet size in bytes, used to validate the received
// packet. If variable rate codec (extra_byte == -1), set to -1.
if (extra_byte != -1) {
// Add 0.875 to always round up to a whole byte
packet_size_bytes_ = static_cast<int>(static_cast<float>(packet_size
* rate) / static_cast<float>(sampling_freq_hz * 8) + 0.875)
+ extra_byte;
} else {
// Packets will have a variable size.
packet_size_bytes_ = -1;
}
// Set pointer to the ACM where to register the codec.
AudioCodingModule* my_acm = NULL;
switch (side) {
case 'A': {
my_acm = acm_a_.get();
break;
}
case 'B': {
my_acm = acm_b_.get();
break;
}
default: {
break;
}
}
ASSERT_TRUE(my_acm != NULL);
// Get all codec parameters before registering
CodecInst my_codec_param;
CHECK_ERROR(AudioCodingModule::Codec(codec_name, &my_codec_param,
sampling_freq_hz, 1));
my_codec_param.rate = rate;
my_codec_param.pacsize = packet_size;
CHECK_ERROR(my_acm->RegisterSendCodec(my_codec_param));
}
void TestAllCodecs::Run(TestPack* channel) {
AudioFrame audio_frame;
int32_t out_freq_hz = outfile_b_.SamplingFrequency();
uint16_t receive_size;
uint32_t timestamp_diff;
channel->reset_payload_size();
int error_count = 0;
int counter = 0;
while (!infile_a_.EndOfFile()) {
// Add 10 msec to ACM.
infile_a_.Read10MsData(audio_frame);
CHECK_ERROR(acm_a_->Add10MsData(audio_frame));
// Run sender side of ACM.
CHECK_ERROR(acm_a_->Process());
// Verify that the received packet size matches the settings.
receive_size = channel->payload_size();
if (receive_size) {
if ((static_cast<int>(receive_size) != packet_size_bytes_) &&
(packet_size_bytes_ > -1)) {
error_count++;
}
// Verify that the timestamp is updated with expected length. The counter
// is used to avoid problems when switching codec or frame size in the
// test.
timestamp_diff = channel->timestamp_diff();
if ((counter > 10) &&
(static_cast<int>(timestamp_diff) != packet_size_samples_) &&
(packet_size_samples_ > -1))
error_count++;
}
// Run received side of ACM.
CHECK_ERROR(acm_b_->PlayoutData10Ms(out_freq_hz, &audio_frame));
// Write output speech to file.
outfile_b_.Write10MsData(audio_frame.data_,
audio_frame.samples_per_channel_);
// Update loop counter
counter++;
}
EXPECT_EQ(0, error_count);
if (infile_a_.EndOfFile()) {
infile_a_.Rewind();
}
}
void TestAllCodecs::OpenOutFile(int test_number) {
std::string filename = webrtc::test::OutputPath();
std::ostringstream test_number_str;
test_number_str << test_number;
filename += "testallcodecs_out_";
filename += test_number_str.str();
filename += ".pcm";
outfile_b_.Open(filename, 32000, "wb");
}
void TestAllCodecs::DisplaySendReceiveCodec() {
CodecInst my_codec_param;
acm_a_->SendCodec(&my_codec_param);
printf("%s -> ", my_codec_param.plname);
acm_b_->ReceiveCodec(&my_codec_param);
printf("%s\n", my_codec_param.plname);
}
} // namespace webrtc

View File

@@ -0,0 +1,82 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTALLCODECS_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTALLCODECS_H_
#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class Config;
class TestPack : public AudioPacketizationCallback {
public:
TestPack();
~TestPack();
void RegisterReceiverACM(AudioCodingModule* acm);
int32_t SendData(FrameType frame_type, uint8_t payload_type,
uint32_t timestamp, const uint8_t* payload_data,
uint16_t payload_size,
const RTPFragmentationHeader* fragmentation);
uint16_t payload_size();
uint32_t timestamp_diff();
void reset_payload_size();
private:
AudioCodingModule* receiver_acm_;
uint16_t sequence_number_;
uint8_t payload_data_[60 * 32 * 2 * 2];
uint32_t timestamp_diff_;
uint32_t last_in_timestamp_;
uint64_t total_bytes_;
uint16_t payload_size_;
};
class TestAllCodecs : public ACMTest {
public:
explicit TestAllCodecs(int test_mode);
~TestAllCodecs();
void Perform();
private:
// The default value of '-1' indicates that the registration is based only on
// codec name, and a sampling frequency matching is not required.
// This is useful for codecs which support several sampling frequency.
// Note! Only mono mode is tested in this test.
void RegisterSendCodec(char side, char* codec_name, int32_t sampling_freq_hz,
int rate, int packet_size, int extra_byte);
void Run(TestPack* channel);
void OpenOutFile(int test_number);
void DisplaySendReceiveCodec();
int test_mode_;
scoped_ptr<AudioCodingModule> acm_a_;
scoped_ptr<AudioCodingModule> acm_b_;
TestPack* channel_a_to_b_;
PCMFile infile_a_;
PCMFile outfile_b_;
int test_count_;
int packet_size_samples_;
int packet_size_bytes_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTALLCODECS_H_

View File

@@ -0,0 +1,333 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/test/TestRedFec.h"
#include <assert.h>
#include "webrtc/common.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
TestRedFec::TestRedFec()
: _acmA(AudioCodingModule::Create(0)),
_acmB(AudioCodingModule::Create(1)),
_channelA2B(NULL),
_testCntr(0) {
}
TestRedFec::~TestRedFec() {
if (_channelA2B != NULL) {
delete _channelA2B;
_channelA2B = NULL;
}
}
void TestRedFec::Perform() {
const std::string file_name = webrtc::test::ResourcePath(
"audio_coding/testfile32kHz", "pcm");
_inFileA.Open(file_name, 32000, "rb");
ASSERT_EQ(0, _acmA->InitializeReceiver());
ASSERT_EQ(0, _acmB->InitializeReceiver());
uint8_t numEncoders = _acmA->NumberOfCodecs();
CodecInst myCodecParam;
for (uint8_t n = 0; n < numEncoders; n++) {
EXPECT_EQ(0, _acmB->Codec(n, &myCodecParam));
// Default number of channels is 2 for opus, so we change to 1 in this test.
if (!strcmp(myCodecParam.plname, "opus")) {
myCodecParam.channels = 1;
}
EXPECT_EQ(0, _acmB->RegisterReceiveCodec(myCodecParam));
}
// Create and connect the channel
_channelA2B = new Channel;
_acmA->RegisterTransportCallback(_channelA2B);
_channelA2B->RegisterReceiverACM(_acmB.get());
#ifndef WEBRTC_CODEC_G722
EXPECT_TRUE(false);
printf("G722 needs to be activated to run this test\n");
return;
#endif
char nameG722[] = "G722";
EXPECT_EQ(0, RegisterSendCodec('A', nameG722, 16000));
char nameCN[] = "CN";
EXPECT_EQ(0, RegisterSendCodec('A', nameCN, 16000));
char nameRED[] = "RED";
EXPECT_EQ(0, RegisterSendCodec('A', nameRED));
OpenOutFile(_testCntr);
EXPECT_EQ(0, SetVAD(true, true, VADAggr));
EXPECT_EQ(0, _acmA->SetREDStatus(false));
EXPECT_FALSE(_acmA->REDStatus());
Run();
_outFileB.Close();
EXPECT_EQ(0, _acmA->SetREDStatus(true));
EXPECT_TRUE(_acmA->REDStatus());
OpenOutFile(_testCntr);
Run();
_outFileB.Close();
char nameISAC[] = "iSAC";
RegisterSendCodec('A', nameISAC, 16000);
OpenOutFile(_testCntr);
EXPECT_EQ(0, SetVAD(true, true, VADVeryAggr));
EXPECT_EQ(0, _acmA->SetREDStatus(false));
EXPECT_FALSE(_acmA->REDStatus());
Run();
_outFileB.Close();
EXPECT_EQ(0, _acmA->SetREDStatus(true));
EXPECT_TRUE(_acmA->REDStatus());
OpenOutFile(_testCntr);
Run();
_outFileB.Close();
RegisterSendCodec('A', nameISAC, 32000);
OpenOutFile(_testCntr);
EXPECT_EQ(0, SetVAD(true, true, VADVeryAggr));
EXPECT_EQ(0, _acmA->SetREDStatus(false));
EXPECT_FALSE(_acmA->REDStatus());
Run();
_outFileB.Close();
EXPECT_EQ(0, _acmA->SetREDStatus(true));
EXPECT_TRUE(_acmA->REDStatus());
OpenOutFile(_testCntr);
Run();
_outFileB.Close();
RegisterSendCodec('A', nameISAC, 32000);
OpenOutFile(_testCntr);
EXPECT_EQ(0, SetVAD(false, false, VADNormal));
EXPECT_EQ(0, _acmA->SetREDStatus(true));
EXPECT_TRUE(_acmA->REDStatus());
Run();
RegisterSendCodec('A', nameISAC, 16000);
EXPECT_TRUE(_acmA->REDStatus());
Run();
RegisterSendCodec('A', nameISAC, 32000);
EXPECT_TRUE(_acmA->REDStatus());
Run();
RegisterSendCodec('A', nameISAC, 16000);
EXPECT_TRUE(_acmA->REDStatus());
Run();
_outFileB.Close();
_channelA2B->SetFECTestWithPacketLoss(true);
EXPECT_EQ(0, RegisterSendCodec('A', nameG722));
EXPECT_EQ(0, RegisterSendCodec('A', nameCN, 16000));
OpenOutFile(_testCntr);
EXPECT_EQ(0, SetVAD(true, true, VADAggr));
EXPECT_EQ(0, _acmA->SetREDStatus(false));
EXPECT_FALSE(_acmA->REDStatus());
Run();
_outFileB.Close();
EXPECT_EQ(0, _acmA->SetREDStatus(true));
EXPECT_TRUE(_acmA->REDStatus());
OpenOutFile(_testCntr);
Run();
_outFileB.Close();
RegisterSendCodec('A', nameISAC, 16000);
OpenOutFile(_testCntr);
EXPECT_EQ(0, SetVAD(true, true, VADVeryAggr));
EXPECT_EQ(0, _acmA->SetREDStatus(false));
EXPECT_FALSE(_acmA->REDStatus());
Run();
_outFileB.Close();
EXPECT_EQ(0, _acmA->SetREDStatus(true));
EXPECT_TRUE(_acmA->REDStatus());
OpenOutFile(_testCntr);
Run();
_outFileB.Close();
RegisterSendCodec('A', nameISAC, 32000);
OpenOutFile(_testCntr);
EXPECT_EQ(0, SetVAD(true, true, VADVeryAggr));
EXPECT_EQ(0, _acmA->SetREDStatus(false));
EXPECT_FALSE(_acmA->REDStatus());
Run();
_outFileB.Close();
EXPECT_EQ(0, _acmA->SetREDStatus(true));
EXPECT_TRUE(_acmA->REDStatus());
OpenOutFile(_testCntr);
Run();
_outFileB.Close();
RegisterSendCodec('A', nameISAC, 32000);
OpenOutFile(_testCntr);
EXPECT_EQ(0, SetVAD(false, false, VADNormal));
EXPECT_EQ(0, _acmA->SetREDStatus(true));
EXPECT_TRUE(_acmA->REDStatus());
Run();
RegisterSendCodec('A', nameISAC, 16000);
EXPECT_TRUE(_acmA->REDStatus());
Run();
RegisterSendCodec('A', nameISAC, 32000);
EXPECT_TRUE(_acmA->REDStatus());
Run();
RegisterSendCodec('A', nameISAC, 16000);
EXPECT_TRUE(_acmA->REDStatus());
Run();
_outFileB.Close();
#ifndef WEBRTC_CODEC_OPUS
EXPECT_TRUE(false);
printf("Opus needs to be activated to run this test\n");
return;
#endif
char nameOpus[] = "opus";
RegisterSendCodec('A', nameOpus, 48000);
EXPECT_TRUE(_acmA->REDStatus());
// _channelA2B imposes 25% packet loss rate.
EXPECT_EQ(0, _acmA->SetPacketLossRate(25));
// Codec FEC and RED are mutually exclusive.
EXPECT_EQ(-1, _acmA->SetCodecFEC(true));
EXPECT_EQ(0, _acmA->SetREDStatus(false));
EXPECT_EQ(0, _acmA->SetCodecFEC(true));
// Codec FEC and RED are mutually exclusive.
EXPECT_EQ(-1, _acmA->SetREDStatus(true));
EXPECT_TRUE(_acmA->CodecFEC());
OpenOutFile(_testCntr);
Run();
// Switch to ISAC with RED.
RegisterSendCodec('A', nameISAC, 32000);
EXPECT_EQ(0, SetVAD(false, false, VADNormal));
// ISAC does not support FEC, so FEC should be turned off automatically.
EXPECT_FALSE(_acmA->CodecFEC());
EXPECT_EQ(0, _acmA->SetREDStatus(true));
EXPECT_TRUE(_acmA->REDStatus());
Run();
// Switch to Opus again.
RegisterSendCodec('A', nameOpus, 48000);
EXPECT_EQ(0, _acmA->SetCodecFEC(false));
EXPECT_EQ(0, _acmA->SetREDStatus(false));
Run();
EXPECT_EQ(0, _acmA->SetCodecFEC(true));
_outFileB.Close();
// Codecs does not support internal FEC.
RegisterSendCodec('A', nameG722, 16000);
EXPECT_FALSE(_acmA->REDStatus());
EXPECT_EQ(-1, _acmA->SetCodecFEC(true));
EXPECT_FALSE(_acmA->CodecFEC());
RegisterSendCodec('A', nameISAC, 16000);
EXPECT_FALSE(_acmA->REDStatus());
EXPECT_EQ(-1, _acmA->SetCodecFEC(true));
EXPECT_FALSE(_acmA->CodecFEC());
}
int32_t TestRedFec::SetVAD(bool enableDTX, bool enableVAD, ACMVADMode vadMode) {
return _acmA->SetVAD(enableDTX, enableVAD, vadMode);
}
int16_t TestRedFec::RegisterSendCodec(char side, char* codecName,
int32_t samplingFreqHz) {
std::cout << std::flush;
AudioCodingModule* myACM;
switch (side) {
case 'A': {
myACM = _acmA.get();
break;
}
case 'B': {
myACM = _acmB.get();
break;
}
default:
return -1;
}
if (myACM == NULL) {
assert(false);
return -1;
}
CodecInst myCodecParam;
EXPECT_GT(AudioCodingModule::Codec(codecName, &myCodecParam,
samplingFreqHz, 1), -1);
EXPECT_GT(myACM->RegisterSendCodec(myCodecParam), -1);
// Initialization was successful.
return 0;
}
void TestRedFec::Run() {
AudioFrame audioFrame;
uint16_t msecPassed = 0;
uint32_t secPassed = 0;
int32_t outFreqHzB = _outFileB.SamplingFrequency();
while (!_inFileA.EndOfFile()) {
EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
EXPECT_GT(_acmA->Process(), -1);
EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame));
_outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
msecPassed += 10;
if (msecPassed >= 1000) {
msecPassed = 0;
secPassed++;
}
// Test that toggling RED on and off works.
if (((secPassed % 5) == 4) && (msecPassed == 0) && (_testCntr > 14)) {
EXPECT_EQ(0, _acmA->SetREDStatus(false));
}
if (((secPassed % 5) == 4) && (msecPassed >= 990) && (_testCntr > 14)) {
EXPECT_EQ(0, _acmA->SetREDStatus(true));
}
}
_inFileA.Rewind();
}
void TestRedFec::OpenOutFile(int16_t test_number) {
std::string file_name;
std::stringstream file_stream;
file_stream << webrtc::test::OutputPath();
file_stream << "TestRedFec_outFile_";
file_stream << test_number << ".pcm";
file_name = file_stream.str();
_outFileB.Open(file_name, 16000, "wb");
}
} // namespace webrtc

View File

@@ -0,0 +1,51 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TESTREDFEC_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TESTREDFEC_H_
#include <string>
#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
class Config;
class TestRedFec : public ACMTest {
public:
explicit TestRedFec();
~TestRedFec();
void Perform();
private:
// The default value of '-1' indicates that the registration is based only on
// codec name and a sampling frequency matching is not required. This is
// useful for codecs which support several sampling frequency.
int16_t RegisterSendCodec(char side, char* codecName,
int32_t sampFreqHz = -1);
void Run();
void OpenOutFile(int16_t testNumber);
int32_t SetVAD(bool enableDTX, bool enableVAD, ACMVADMode vadMode);
scoped_ptr<AudioCodingModule> _acmA;
scoped_ptr<AudioCodingModule> _acmB;
Channel* _channelA2B;
PCMFile _inFileA;
PCMFile _outFileB;
int16_t _testCntr;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TESTREDFEC_H_

View File

@@ -0,0 +1,914 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/test/TestStereo.h"
#include <assert.h>
#include <string>
#include "gtest/gtest.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
// Class for simulating packet handling
TestPackStereo::TestPackStereo()
: receiver_acm_(NULL),
seq_no_(0),
timestamp_diff_(0),
last_in_timestamp_(0),
total_bytes_(0),
payload_size_(0),
codec_mode_(kNotSet),
lost_packet_(false) {
}
TestPackStereo::~TestPackStereo() {
}
void TestPackStereo::RegisterReceiverACM(AudioCodingModule* acm) {
receiver_acm_ = acm;
return;
}
int32_t TestPackStereo::SendData(const FrameType frame_type,
const uint8_t payload_type,
const uint32_t timestamp,
const uint8_t* payload_data,
const uint16_t payload_size,
const RTPFragmentationHeader* fragmentation) {
WebRtcRTPHeader rtp_info;
int32_t status = 0;
rtp_info.header.markerBit = false;
rtp_info.header.ssrc = 0;
rtp_info.header.sequenceNumber = seq_no_++;
rtp_info.header.payloadType = payload_type;
rtp_info.header.timestamp = timestamp;
if (frame_type == kFrameEmpty) {
// Skip this frame
return 0;
}
if (lost_packet_ == false) {
if (frame_type != kAudioFrameCN) {
rtp_info.type.Audio.isCNG = false;
rtp_info.type.Audio.channel = static_cast<int>(codec_mode_);
} else {
rtp_info.type.Audio.isCNG = true;
rtp_info.type.Audio.channel = static_cast<int>(kMono);
}
status = receiver_acm_->IncomingPacket(payload_data, payload_size,
rtp_info);
if (frame_type != kAudioFrameCN) {
payload_size_ = static_cast<int>(payload_size);
} else {
payload_size_ = -1;
}
timestamp_diff_ = timestamp - last_in_timestamp_;
last_in_timestamp_ = timestamp;
total_bytes_ += payload_size;
}
return status;
}
uint16_t TestPackStereo::payload_size() {
return static_cast<uint16_t>(payload_size_);
}
uint32_t TestPackStereo::timestamp_diff() {
return timestamp_diff_;
}
void TestPackStereo::reset_payload_size() {
payload_size_ = 0;
}
void TestPackStereo::set_codec_mode(enum StereoMonoMode mode) {
codec_mode_ = mode;
}
void TestPackStereo::set_lost_packet(bool lost) {
lost_packet_ = lost;
}
TestStereo::TestStereo(int test_mode)
: acm_a_(AudioCodingModule::Create(0)),
acm_b_(AudioCodingModule::Create(1)),
channel_a2b_(NULL),
test_cntr_(0),
pack_size_samp_(0),
pack_size_bytes_(0),
counter_(0),
g722_pltype_(0),
l16_8khz_pltype_(-1),
l16_16khz_pltype_(-1),
l16_32khz_pltype_(-1),
pcma_pltype_(-1),
pcmu_pltype_(-1),
celt_pltype_(-1),
opus_pltype_(-1),
cn_8khz_pltype_(-1),
cn_16khz_pltype_(-1),
cn_32khz_pltype_(-1) {
// test_mode = 0 for silent test (auto test)
test_mode_ = test_mode;
}
TestStereo::~TestStereo() {
if (channel_a2b_ != NULL) {
delete channel_a2b_;
channel_a2b_ = NULL;
}
}
void TestStereo::Perform() {
uint16_t frequency_hz;
int audio_channels;
int codec_channels;
bool dtx;
bool vad;
ACMVADMode vad_mode;
// Open both mono and stereo test files in 32 kHz.
const std::string file_name_stereo = webrtc::test::ResourcePath(
"audio_coding/teststereo32kHz", "pcm");
const std::string file_name_mono = webrtc::test::ResourcePath(
"audio_coding/testfile32kHz", "pcm");
frequency_hz = 32000;
in_file_stereo_ = new PCMFile();
in_file_mono_ = new PCMFile();
in_file_stereo_->Open(file_name_stereo, frequency_hz, "rb");
in_file_stereo_->ReadStereo(true);
in_file_mono_->Open(file_name_mono, frequency_hz, "rb");
in_file_mono_->ReadStereo(false);
// Create and initialize two ACMs, one for each side of a one-to-one call.
ASSERT_TRUE((acm_a_.get() != NULL) && (acm_b_.get() != NULL));
EXPECT_EQ(0, acm_a_->InitializeReceiver());
EXPECT_EQ(0, acm_b_->InitializeReceiver());
// Register all available codes as receiving codecs.
uint8_t num_encoders = acm_a_->NumberOfCodecs();
CodecInst my_codec_param;
for (uint8_t n = 0; n < num_encoders; n++) {
EXPECT_EQ(0, acm_b_->Codec(n, &my_codec_param));
EXPECT_EQ(0, acm_b_->RegisterReceiveCodec(my_codec_param));
}
// Test that unregister all receive codecs works.
for (uint8_t n = 0; n < num_encoders; n++) {
EXPECT_EQ(0, acm_b_->Codec(n, &my_codec_param));
EXPECT_EQ(0, acm_b_->UnregisterReceiveCodec(my_codec_param.pltype));
}
// Register all available codes as receiving codecs once more.
for (uint8_t n = 0; n < num_encoders; n++) {
EXPECT_EQ(0, acm_b_->Codec(n, &my_codec_param));
EXPECT_EQ(0, acm_b_->RegisterReceiveCodec(my_codec_param));
}
// Create and connect the channel.
channel_a2b_ = new TestPackStereo;
EXPECT_EQ(0, acm_a_->RegisterTransportCallback(channel_a2b_));
channel_a2b_->RegisterReceiverACM(acm_b_.get());
// Start with setting VAD/DTX, before we know we will send stereo.
// Continue with setting a stereo codec as send codec and verify that
// VAD/DTX gets turned off.
EXPECT_EQ(0, acm_a_->SetVAD(true, true, VADNormal));
EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
EXPECT_TRUE(dtx);
EXPECT_TRUE(vad);
char codec_pcma_temp[] = "PCMA";
RegisterSendCodec('A', codec_pcma_temp, 8000, 64000, 80, 2, pcma_pltype_);
EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
EXPECT_FALSE(dtx);
EXPECT_FALSE(vad);
if (test_mode_ != 0) {
printf("\n");
}
//
// Test Stereo-To-Stereo for all codecs.
//
audio_channels = 2;
codec_channels = 2;
// All codecs are tested for all allowed sampling frequencies, rates and
// packet sizes.
#ifdef WEBRTC_CODEC_G722
if (test_mode_ != 0) {
printf("===========================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-stereo\n");
}
channel_a2b_->set_codec_mode(kStereo);
test_cntr_++;
OpenOutFile(test_cntr_);
char codec_g722[] = "G722";
RegisterSendCodec('A', codec_g722, 16000, 64000, 160, codec_channels,
g722_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_g722, 16000, 64000, 320, codec_channels,
g722_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_g722, 16000, 64000, 480, codec_channels,
g722_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_g722, 16000, 64000, 640, codec_channels,
g722_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_g722, 16000, 64000, 800, codec_channels,
g722_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_g722, 16000, 64000, 960, codec_channels,
g722_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#ifdef WEBRTC_CODEC_PCM16
if (test_mode_ != 0) {
printf("===========================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-stereo\n");
}
channel_a2b_->set_codec_mode(kStereo);
test_cntr_++;
OpenOutFile(test_cntr_);
char codec_l16[] = "L16";
RegisterSendCodec('A', codec_l16, 8000, 128000, 80, codec_channels,
l16_8khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_l16, 8000, 128000, 160, codec_channels,
l16_8khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_l16, 8000, 128000, 240, codec_channels,
l16_8khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_l16, 8000, 128000, 320, codec_channels,
l16_8khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
if (test_mode_ != 0) {
printf("===========================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-stereo\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_l16, 16000, 256000, 160, codec_channels,
l16_16khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_l16, 16000, 256000, 320, codec_channels,
l16_16khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_l16, 16000, 256000, 480, codec_channels,
l16_16khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_l16, 16000, 256000, 640, codec_channels,
l16_16khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
if (test_mode_ != 0) {
printf("===========================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-stereo\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_l16, 32000, 512000, 320, codec_channels,
l16_32khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_l16, 32000, 512000, 640, codec_channels,
l16_32khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#define PCMA_AND_PCMU
#ifdef PCMA_AND_PCMU
if (test_mode_ != 0) {
printf("===========================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-stereo\n");
}
channel_a2b_->set_codec_mode(kStereo);
audio_channels = 2;
codec_channels = 2;
test_cntr_++;
OpenOutFile(test_cntr_);
char codec_pcma[] = "PCMA";
RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, codec_channels,
pcma_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 160, codec_channels,
pcma_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 240, codec_channels,
pcma_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 320, codec_channels,
pcma_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 400, codec_channels,
pcma_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 480, codec_channels,
pcma_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
// Test that VAD/DTX cannot be turned on while sending stereo.
EXPECT_EQ(-1, acm_a_->SetVAD(true, true, VADNormal));
EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
EXPECT_FALSE(dtx);
EXPECT_FALSE(vad);
EXPECT_EQ(-1, acm_a_->SetVAD(true, false, VADNormal));
EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
EXPECT_FALSE(dtx);
EXPECT_FALSE(vad);
EXPECT_EQ(-1, acm_a_->SetVAD(false, true, VADNormal));
EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
EXPECT_FALSE(dtx);
EXPECT_FALSE(vad);
EXPECT_EQ(0, acm_a_->SetVAD(false, false, VADNormal));
EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
EXPECT_FALSE(dtx);
EXPECT_FALSE(vad);
out_file_.Close();
if (test_mode_ != 0) {
printf("===========================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-stereo\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
char codec_pcmu[] = "PCMU";
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, codec_channels,
pcmu_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 160, codec_channels,
pcmu_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 240, codec_channels,
pcmu_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 320, codec_channels,
pcmu_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 400, codec_channels,
pcmu_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 480, codec_channels,
pcmu_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#ifdef WEBRTC_CODEC_CELT
if (test_mode_ != 0) {
printf("===========================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-stereo\n");
}
channel_a2b_->set_codec_mode(kStereo);
audio_channels = 2;
codec_channels = 2;
test_cntr_++;
OpenOutFile(test_cntr_);
char codec_celt[] = "CELT";
RegisterSendCodec('A', codec_celt, 32000, 48000, 640, codec_channels,
celt_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_celt, 32000, 64000, 640, codec_channels,
celt_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_celt, 32000, 128000, 640, codec_channels,
celt_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#ifdef WEBRTC_CODEC_OPUS
if (test_mode_ != 0) {
printf("===========================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-stereo\n");
}
channel_a2b_->set_codec_mode(kStereo);
audio_channels = 2;
codec_channels = 2;
test_cntr_++;
OpenOutFile(test_cntr_);
char codec_opus[] = "opus";
// Run Opus with 10 ms frame size.
RegisterSendCodec('A', codec_opus, 48000, 64000, 480, codec_channels,
opus_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
// Run Opus with 20 ms frame size.
RegisterSendCodec('A', codec_opus, 48000, 64000, 480*2, codec_channels,
opus_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
// Run Opus with 40 ms frame size.
RegisterSendCodec('A', codec_opus, 48000, 64000, 480*4, codec_channels,
opus_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
// Run Opus with 60 ms frame size.
RegisterSendCodec('A', codec_opus, 48000, 64000, 480*6, codec_channels,
opus_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
// Run Opus with 20 ms frame size and different bitrates.
RegisterSendCodec('A', codec_opus, 48000, 40000, 960, codec_channels,
opus_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_opus, 48000, 510000, 960, codec_channels,
opus_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
//
// Test Mono-To-Stereo for all codecs.
//
audio_channels = 1;
codec_channels = 2;
#ifdef WEBRTC_CODEC_G722
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Mono-to-stereo\n");
}
test_cntr_++;
channel_a2b_->set_codec_mode(kStereo);
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_g722, 16000, 64000, 160, codec_channels,
g722_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#ifdef WEBRTC_CODEC_PCM16
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Mono-to-stereo\n");
}
test_cntr_++;
channel_a2b_->set_codec_mode(kStereo);
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_l16, 8000, 128000, 80, codec_channels,
l16_8khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Mono-to-stereo\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_l16, 16000, 256000, 160, codec_channels,
l16_16khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Mono-to-stereo\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_l16, 32000, 512000, 320, codec_channels,
l16_32khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#ifdef PCMA_AND_PCMU
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Mono-to-stereo\n");
}
test_cntr_++;
channel_a2b_->set_codec_mode(kStereo);
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, codec_channels,
pcmu_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, codec_channels,
pcma_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#ifdef WEBRTC_CODEC_CELT
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Mono-to-stereo\n");
}
test_cntr_++;
channel_a2b_->set_codec_mode(kStereo);
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_celt, 32000, 64000, 640, codec_channels,
celt_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#ifdef WEBRTC_CODEC_OPUS
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Mono-to-stereo\n");
}
// Keep encode and decode in stereo.
test_cntr_++;
channel_a2b_->set_codec_mode(kStereo);
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_opus, 48000, 64000, 960, codec_channels,
opus_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
// Encode in mono, decode in stereo mode.
RegisterSendCodec('A', codec_opus, 48000, 64000, 960, 1, opus_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
//
// Test Stereo-To-Mono for all codecs.
//
audio_channels = 2;
codec_channels = 1;
channel_a2b_->set_codec_mode(kMono);
#ifdef WEBRTC_CODEC_G722
// Run stereo audio and mono codec.
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-mono\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_g722, 16000, 64000, 160, codec_channels,
g722_pltype_);
// Make sure it is possible to set VAD/CNG, now that we are sending mono
// again.
EXPECT_EQ(0, acm_a_->SetVAD(true, true, VADNormal));
EXPECT_EQ(0, acm_a_->VAD(&dtx, &vad, &vad_mode));
EXPECT_TRUE(dtx);
EXPECT_TRUE(vad);
EXPECT_EQ(0, acm_a_->SetVAD(false, false, VADNormal));
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#ifdef WEBRTC_CODEC_PCM16
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-mono\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_l16, 8000, 128000, 80, codec_channels,
l16_8khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-mono\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_l16, 16000, 256000, 160, codec_channels,
l16_16khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
if (test_mode_ != 0) {
printf("==============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-mono\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_l16, 32000, 512000, 320, codec_channels,
l16_32khz_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#ifdef PCMA_AND_PCMU
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-mono\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_pcmu, 8000, 64000, 80, codec_channels,
pcmu_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
RegisterSendCodec('A', codec_pcma, 8000, 64000, 80, codec_channels,
pcma_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#ifdef WEBRTC_CODEC_CELT
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-mono\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
RegisterSendCodec('A', codec_celt, 32000, 64000, 640, codec_channels,
celt_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
#ifdef WEBRTC_CODEC_OPUS
if (test_mode_ != 0) {
printf("===============================================================\n");
printf("Test number: %d\n", test_cntr_ + 1);
printf("Test type: Stereo-to-mono\n");
}
test_cntr_++;
OpenOutFile(test_cntr_);
// Encode and decode in mono.
RegisterSendCodec('A', codec_opus, 48000, 32000, 960, codec_channels,
opus_pltype_);
CodecInst opus_codec_param;
for (uint8_t n = 0; n < num_encoders; n++) {
EXPECT_EQ(0, acm_b_->Codec(n, &opus_codec_param));
if (!strcmp(opus_codec_param.plname, "opus")) {
opus_codec_param.channels = 1;
EXPECT_EQ(0, acm_b_->RegisterReceiveCodec(opus_codec_param));
break;
}
}
Run(channel_a2b_, audio_channels, codec_channels);
// Encode in stereo, decode in mono.
RegisterSendCodec('A', codec_opus, 48000, 32000, 960, 2, opus_pltype_);
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
// Test switching between decoding mono and stereo for Opus.
// Decode in mono.
test_cntr_++;
OpenOutFile(test_cntr_);
if (test_mode_ != 0) {
// Print out codec and settings
printf("Test number: %d\nCodec: Opus Freq: 48000 Rate :32000 PackSize: 960"
" Decode: mono\n", test_cntr_);
}
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
// Decode in stereo.
test_cntr_++;
OpenOutFile(test_cntr_);
if (test_mode_ != 0) {
// Print out codec and settings
printf("Test number: %d\nCodec: Opus Freq: 48000 Rate :32000 PackSize: 960"
" Decode: stereo\n", test_cntr_);
}
opus_codec_param.channels = 2;
EXPECT_EQ(0, acm_b_->RegisterReceiveCodec(opus_codec_param));
Run(channel_a2b_, audio_channels, 2);
out_file_.Close();
// Decode in mono.
test_cntr_++;
OpenOutFile(test_cntr_);
if (test_mode_ != 0) {
// Print out codec and settings
printf("Test number: %d\nCodec: Opus Freq: 48000 Rate :32000 PackSize: 960"
" Decode: mono\n", test_cntr_);
}
opus_codec_param.channels = 1;
EXPECT_EQ(0, acm_b_->RegisterReceiveCodec(opus_codec_param));
Run(channel_a2b_, audio_channels, codec_channels);
out_file_.Close();
#endif
// Print out which codecs were tested, and which were not, in the run.
if (test_mode_ != 0) {
printf("\nThe following codecs was INCLUDED in the test:\n");
#ifdef WEBRTC_CODEC_G722
printf(" G.722\n");
#endif
#ifdef WEBRTC_CODEC_PCM16
printf(" PCM16\n");
#endif
printf(" G.711\n");
#ifdef WEBRTC_CODEC_CELT
printf(" CELT\n");
#endif
#ifdef WEBRTC_CODEC_OPUS
printf(" Opus\n");
#endif
printf("\nTo complete the test, listen to the %d number of output "
"files.\n",
test_cntr_);
}
// Delete the file pointers.
delete in_file_stereo_;
delete in_file_mono_;
}
// Register Codec to use in the test
//
// Input: side - which ACM to use, 'A' or 'B'
// codec_name - name to use when register the codec
// sampling_freq_hz - sampling frequency in Herz
// rate - bitrate in bytes
// pack_size - packet size in samples
// channels - number of channels; 1 for mono, 2 for stereo
// payload_type - payload type for the codec
void TestStereo::RegisterSendCodec(char side, char* codec_name,
int32_t sampling_freq_hz, int rate,
int pack_size, int channels,
int payload_type) {
if (test_mode_ != 0) {
// Print out codec and settings
printf("Codec: %s Freq: %d Rate: %d PackSize: %d\n", codec_name,
sampling_freq_hz, rate, pack_size);
}
// Store packet size in samples, used to validate the received packet
pack_size_samp_ = pack_size;
// Store the expected packet size in bytes, used to validate the received
// packet. Add 0.875 to always round up to a whole byte.
// For Celt the packet size in bytes is already counting the stereo part.
if (!strcmp(codec_name, "CELT")) {
pack_size_bytes_ = (uint16_t)(
static_cast<float>(pack_size * rate) /
static_cast<float>(sampling_freq_hz * 8) + 0.875) / channels;
} else {
pack_size_bytes_ = (uint16_t)(
static_cast<float>(pack_size * rate) /
static_cast<float>(sampling_freq_hz * 8) + 0.875);
}
// Set pointer to the ACM where to register the codec
AudioCodingModule* my_acm = NULL;
switch (side) {
case 'A': {
my_acm = acm_a_.get();
break;
}
case 'B': {
my_acm = acm_b_.get();
break;
}
default:
break;
}
ASSERT_TRUE(my_acm != NULL);
CodecInst my_codec_param;
// Get all codec parameters before registering
EXPECT_GT(AudioCodingModule::Codec(codec_name, &my_codec_param,
sampling_freq_hz, channels), -1);
my_codec_param.rate = rate;
my_codec_param.pacsize = pack_size;
EXPECT_EQ(0, my_acm->RegisterSendCodec(my_codec_param));
send_codec_name_ = codec_name;
}
void TestStereo::Run(TestPackStereo* channel, int in_channels, int out_channels,
int percent_loss) {
AudioFrame audio_frame;
int32_t out_freq_hz_b = out_file_.SamplingFrequency();
uint16_t rec_size;
uint32_t time_stamp_diff;
channel->reset_payload_size();
int error_count = 0;
int variable_bytes = 0;
int variable_packets = 0;
while (1) {
// Simulate packet loss by setting |packet_loss_| to "true" in
// |percent_loss| percent of the loops.
if (percent_loss > 0) {
if (counter_ == floor((100 / percent_loss) + 0.5)) {
counter_ = 0;
channel->set_lost_packet(true);
} else {
channel->set_lost_packet(false);
}
counter_++;
}
// Add 10 msec to ACM
if (in_channels == 1) {
if (in_file_mono_->EndOfFile()) {
break;
}
in_file_mono_->Read10MsData(audio_frame);
} else {
if (in_file_stereo_->EndOfFile()) {
break;
}
in_file_stereo_->Read10MsData(audio_frame);
}
EXPECT_EQ(0, acm_a_->Add10MsData(audio_frame));
// Run sender side of ACM
EXPECT_GT(acm_a_->Process(), -1);
// Verify that the received packet size matches the settings.
rec_size = channel->payload_size();
if ((0 < rec_size) & (rec_size < 65535)) {
if (strcmp(send_codec_name_, "opus") == 0) {
// Opus is a variable rate codec, hence calculate the average packet
// size, and later make sure the average is in the right range.
variable_bytes += rec_size;
variable_packets++;
} else {
// For fixed rate codecs, check that packet size is correct.
if ((rec_size != pack_size_bytes_ * out_channels)
&& (pack_size_bytes_ < 65535)) {
error_count++;
}
}
// Verify that the timestamp is updated with expected length
time_stamp_diff = channel->timestamp_diff();
if ((counter_ > 10) && (time_stamp_diff != pack_size_samp_)) {
error_count++;
}
}
// Run received side of ACM
EXPECT_EQ(0, acm_b_->PlayoutData10Ms(out_freq_hz_b, &audio_frame));
// Write output speech to file
out_file_.Write10MsData(
audio_frame.data_,
audio_frame.samples_per_channel_ * audio_frame.num_channels_);
}
EXPECT_EQ(0, error_count);
// Check that packet size is in the right range for variable rate codecs,
// such as Opus.
if (variable_packets > 0) {
variable_bytes /= variable_packets;
EXPECT_NEAR(variable_bytes, pack_size_bytes_, 3);
}
if (in_file_mono_->EndOfFile()) {
in_file_mono_->Rewind();
}
if (in_file_stereo_->EndOfFile()) {
in_file_stereo_->Rewind();
}
// Reset in case we ended with a lost packet
channel->set_lost_packet(false);
}
void TestStereo::OpenOutFile(int16_t test_number) {
std::string file_name;
std::stringstream file_stream;
file_stream << webrtc::test::OutputPath() << "teststereo_out_" << test_number
<< ".pcm";
file_name = file_stream.str();
out_file_.Open(file_name, 32000, "wb");
}
void TestStereo::DisplaySendReceiveCodec() {
CodecInst my_codec_param;
acm_a_->SendCodec(&my_codec_param);
if (test_mode_ != 0) {
printf("%s -> ", my_codec_param.plname);
}
acm_b_->ReceiveCodec(&my_codec_param);
if (test_mode_ != 0) {
printf("%s\n", my_codec_param.plname);
}
}
} // namespace webrtc

View File

@@ -0,0 +1,117 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTSTEREO_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTSTEREO_H_
#include <math.h>
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
namespace webrtc {
enum StereoMonoMode {
kNotSet,
kMono,
kStereo
};
class TestPackStereo : public AudioPacketizationCallback {
public:
TestPackStereo();
~TestPackStereo();
void RegisterReceiverACM(AudioCodingModule* acm);
virtual int32_t SendData(const FrameType frame_type,
const uint8_t payload_type,
const uint32_t timestamp,
const uint8_t* payload_data,
const uint16_t payload_size,
const RTPFragmentationHeader* fragmentation);
uint16_t payload_size();
uint32_t timestamp_diff();
void reset_payload_size();
void set_codec_mode(StereoMonoMode mode);
void set_lost_packet(bool lost);
private:
AudioCodingModule* receiver_acm_;
int16_t seq_no_;
uint32_t timestamp_diff_;
uint32_t last_in_timestamp_;
uint64_t total_bytes_;
int payload_size_;
StereoMonoMode codec_mode_;
// Simulate packet losses
bool lost_packet_;
};
class TestStereo : public ACMTest {
public:
explicit TestStereo(int test_mode);
~TestStereo();
void Perform();
private:
// The default value of '-1' indicates that the registration is based only on
// codec name and a sampling frequncy matching is not required. This is useful
// for codecs which support several sampling frequency.
void RegisterSendCodec(char side, char* codec_name, int32_t samp_freq_hz,
int rate, int pack_size, int channels,
int payload_type);
void Run(TestPackStereo* channel, int in_channels, int out_channels,
int percent_loss = 0);
void OpenOutFile(int16_t test_number);
void DisplaySendReceiveCodec();
int32_t SendData(const FrameType frame_type, const uint8_t payload_type,
const uint32_t timestamp, const uint8_t* payload_data,
const uint16_t payload_size,
const RTPFragmentationHeader* fragmentation);
int test_mode_;
scoped_ptr<AudioCodingModule> acm_a_;
scoped_ptr<AudioCodingModule> acm_b_;
TestPackStereo* channel_a2b_;
PCMFile* in_file_stereo_;
PCMFile* in_file_mono_;
PCMFile out_file_;
int16_t test_cntr_;
uint16_t pack_size_samp_;
uint16_t pack_size_bytes_;
int counter_;
char* send_codec_name_;
// Payload types for stereo codecs and CNG
int g722_pltype_;
int l16_8khz_pltype_;
int l16_16khz_pltype_;
int l16_32khz_pltype_;
int pcma_pltype_;
int pcmu_pltype_;
int celt_pltype_;
int opus_pltype_;
int cn_8khz_pltype_;
int cn_16khz_pltype_;
int cn_32khz_pltype_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTSTEREO_H_

View File

@@ -0,0 +1,395 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/test/TestVADDTX.h"
#include <iostream>
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
TestVADDTX::TestVADDTX()
: _acmA(AudioCodingModule::Create(0)),
_acmB(AudioCodingModule::Create(1)),
_channelA2B(NULL) {}
TestVADDTX::~TestVADDTX() {
if (_channelA2B != NULL) {
delete _channelA2B;
_channelA2B = NULL;
}
}
void TestVADDTX::Perform() {
const std::string file_name = webrtc::test::ResourcePath(
"audio_coding/testfile32kHz", "pcm");
_inFileA.Open(file_name, 32000, "rb");
EXPECT_EQ(0, _acmA->InitializeReceiver());
EXPECT_EQ(0, _acmB->InitializeReceiver());
uint8_t numEncoders = _acmA->NumberOfCodecs();
CodecInst myCodecParam;
for (uint8_t n = 0; n < numEncoders; n++) {
EXPECT_EQ(0, _acmB->Codec(n, &myCodecParam));
if (!strcmp(myCodecParam.plname, "opus")) {
// Register Opus as mono.
myCodecParam.channels = 1;
}
EXPECT_EQ(0, _acmB->RegisterReceiveCodec(myCodecParam));
}
// Create and connect the channel
_channelA2B = new Channel;
_acmA->RegisterTransportCallback(_channelA2B);
_channelA2B->RegisterReceiverACM(_acmB.get());
_acmA->RegisterVADCallback(&_monitor);
int16_t testCntr = 1;
#ifdef WEBRTC_CODEC_ISAC
// Open outputfile
OpenOutFile(testCntr++);
// Register iSAC WB as send codec
char nameISAC[] = "ISAC";
RegisterSendCodec('A', nameISAC, 16000);
// Run the five test cased
runTestCases();
// Close file
_outFileB.Close();
// Open outputfile
OpenOutFile(testCntr++);
// Register iSAC SWB as send codec
RegisterSendCodec('A', nameISAC, 32000);
// Run the five test cased
runTestCases();
// Close file
_outFileB.Close();
#endif
#ifdef WEBRTC_CODEC_ILBC
// Open outputfile
OpenOutFile(testCntr++);
// Register iLBC as send codec
char nameILBC[] = "ilbc";
RegisterSendCodec('A', nameILBC);
// Run the five test cased
runTestCases();
// Close file
_outFileB.Close();
#endif
#ifdef WEBRTC_CODEC_OPUS
// Open outputfile
OpenOutFile(testCntr++);
// Register Opus as send codec
char nameOPUS[] = "opus";
RegisterSendCodec('A', nameOPUS);
// Run the five test cased
runTestCases();
// Close file
_outFileB.Close();
#endif
}
void TestVADDTX::runTestCases() {
// #1 DTX = OFF, VAD = ON, VADNormal
SetVAD(false, true, VADNormal);
Run();
VerifyTest();
// #2 DTX = OFF, VAD = ON, VADAggr
SetVAD(false, true, VADAggr);
Run();
VerifyTest();
// #3 DTX = ON, VAD = ON, VADLowBitrate
SetVAD(true, true, VADLowBitrate);
Run();
VerifyTest();
// #4 DTX = ON, VAD = ON, VADVeryAggr
SetVAD(true, true, VADVeryAggr);
Run();
VerifyTest();
// #5 DTX = ON, VAD = OFF, VADNormal
SetVAD(true, false, VADNormal);
Run();
VerifyTest();
}
void TestVADDTX::runTestInternalDTX(int expected_result) {
// #6 DTX = ON, VAD = ON, VADNormal
SetVAD(true, true, VADNormal);
EXPECT_EQ(expected_result, _acmA->ReplaceInternalDTXWithWebRtc(true));
if (expected_result == 0) {
Run();
VerifyTest();
}
}
void TestVADDTX::SetVAD(bool statusDTX, bool statusVAD, int16_t vadMode) {
bool dtxEnabled, vadEnabled;
ACMVADMode vadModeSet;
EXPECT_EQ(0, _acmA->SetVAD(statusDTX, statusVAD, (ACMVADMode) vadMode));
EXPECT_EQ(0, _acmA->VAD(&dtxEnabled, &vadEnabled, &vadModeSet));
// Requested VAD/DTX settings
_setStruct.statusDTX = statusDTX;
_setStruct.statusVAD = statusVAD;
_setStruct.vadMode = (ACMVADMode) vadMode;
// VAD settings after setting VAD in ACM
_getStruct.statusDTX = dtxEnabled;
_getStruct.statusVAD = vadEnabled;
_getStruct.vadMode = vadModeSet;
}
VADDTXstruct TestVADDTX::GetVAD() {
VADDTXstruct retStruct;
bool dtxEnabled, vadEnabled;
ACMVADMode vadModeSet;
EXPECT_EQ(0, _acmA->VAD(&dtxEnabled, &vadEnabled, &vadModeSet));
retStruct.statusDTX = dtxEnabled;
retStruct.statusVAD = vadEnabled;
retStruct.vadMode = vadModeSet;
return retStruct;
}
int16_t TestVADDTX::RegisterSendCodec(char side, char* codecName,
int32_t samplingFreqHz,
int32_t rateKbps) {
std::cout << std::flush;
AudioCodingModule* myACM;
switch (side) {
case 'A': {
myACM = _acmA.get();
break;
}
case 'B': {
myACM = _acmB.get();
break;
}
default:
return -1;
}
if (myACM == NULL) {
return -1;
}
CodecInst myCodecParam;
for (int16_t codecCntr = 0; codecCntr < myACM->NumberOfCodecs();
codecCntr++) {
EXPECT_EQ(0, myACM->Codec((uint8_t) codecCntr, &myCodecParam));
if (!STR_CASE_CMP(myCodecParam.plname, codecName)) {
if ((samplingFreqHz == -1) || (myCodecParam.plfreq == samplingFreqHz)) {
if ((rateKbps == -1) || (myCodecParam.rate == rateKbps)) {
break;
}
}
}
}
// We only allow VAD/DTX when sending mono.
myCodecParam.channels = 1;
EXPECT_EQ(0, myACM->RegisterSendCodec(myCodecParam));
// initialization was succesful
return 0;
}
void TestVADDTX::Run() {
AudioFrame audioFrame;
uint16_t SamplesIn10MsecA = _inFileA.PayloadLength10Ms();
uint32_t timestampA = 1;
int32_t outFreqHzB = _outFileB.SamplingFrequency();
while (!_inFileA.EndOfFile()) {
_inFileA.Read10MsData(audioFrame);
audioFrame.timestamp_ = timestampA;
timestampA += SamplesIn10MsecA;
EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
EXPECT_GT(_acmA->Process(), -1);
EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame));
_outFileB.Write10MsData(audioFrame.data_, audioFrame.samples_per_channel_);
}
#ifdef PRINT_STAT
_monitor.PrintStatistics();
#endif
_inFileA.Rewind();
_monitor.GetStatistics(_statCounter);
_monitor.ResetStatistics();
}
void TestVADDTX::OpenOutFile(int16_t test_number) {
std::string file_name;
std::stringstream file_stream;
file_stream << webrtc::test::OutputPath();
file_stream << "testVADDTX_outFile_";
file_stream << test_number << ".pcm";
file_name = file_stream.str();
_outFileB.Open(file_name, 16000, "wb");
}
int16_t TestVADDTX::VerifyTest() {
// Verify empty frame result
uint8_t statusEF = 0;
uint8_t vadPattern = 0;
uint8_t emptyFramePattern[6];
CodecInst myCodecParam;
_acmA->SendCodec(&myCodecParam);
bool dtxInUse = true;
bool isReplaced = false;
if ((STR_CASE_CMP(myCodecParam.plname, "G729") == 0)
|| (STR_CASE_CMP(myCodecParam.plname, "G723") == 0)
|| (STR_CASE_CMP(myCodecParam.plname, "AMR") == 0)
|| (STR_CASE_CMP(myCodecParam.plname, "AMR-wb") == 0)
|| (STR_CASE_CMP(myCodecParam.plname, "speex") == 0)) {
_acmA->IsInternalDTXReplacedWithWebRtc(&isReplaced);
if (!isReplaced) {
dtxInUse = false;
}
} else if (STR_CASE_CMP(myCodecParam.plname, "opus") == 0) {
if (_getStruct.statusDTX != false) {
// DTX status doesn't match expected.
vadPattern |= 4;
} else if (_getStruct.statusVAD != false) {
// Mismatch in VAD setting.
vadPattern |= 2;
} else {
_setStruct.statusDTX = false;
_setStruct.statusVAD = false;
}
}
// Check for error in VAD/DTX settings
if (_getStruct.statusDTX != _setStruct.statusDTX) {
// DTX status doesn't match expected
vadPattern |= 4;
}
if (_getStruct.statusDTX) {
if ((!_getStruct.statusVAD && dtxInUse)
|| (!dtxInUse && (_getStruct.statusVAD != _setStruct.statusVAD))) {
// Missmatch in VAD setting
vadPattern |= 2;
}
} else {
if (_getStruct.statusVAD != _setStruct.statusVAD) {
// VAD status doesn't match expected
vadPattern |= 2;
}
}
if (_getStruct.vadMode != _setStruct.vadMode) {
// VAD Mode doesn't match expected
vadPattern |= 1;
}
// Set expected empty frame pattern
int ii;
for (ii = 0; ii < 6; ii++) {
emptyFramePattern[ii] = 0;
}
// 0 - "kNoEncoding", not important to check.
// Codecs with packetsize != 80 samples will get this output.
// 1 - "kActiveNormalEncoded", expect to receive some frames with this label .
// 2 - "kPassiveNormalEncoded".
// 3 - "kPassiveDTXNB".
// 4 - "kPassiveDTXWB".
// 5 - "kPassiveDTXSWB".
emptyFramePattern[0] = 1;
emptyFramePattern[1] = 1;
emptyFramePattern[2] = (((!_getStruct.statusDTX && _getStruct.statusVAD)
|| (!dtxInUse && _getStruct.statusDTX)));
emptyFramePattern[3] = ((_getStruct.statusDTX && dtxInUse
&& (_acmA->SendFrequency() == 8000)));
emptyFramePattern[4] = ((_getStruct.statusDTX && dtxInUse
&& (_acmA->SendFrequency() == 16000)));
emptyFramePattern[5] = ((_getStruct.statusDTX && dtxInUse
&& (_acmA->SendFrequency() == 32000)));
// Check pattern 1-5 (skip 0)
for (int ii = 1; ii < 6; ii++) {
if (emptyFramePattern[ii]) {
statusEF |= (_statCounter[ii] == 0);
} else {
statusEF |= (_statCounter[ii] > 0);
}
}
EXPECT_EQ(0, statusEF);
EXPECT_EQ(0, vadPattern);
return 0;
}
ActivityMonitor::ActivityMonitor() {
_counter[0] = _counter[1] = _counter[2] = _counter[3] = _counter[4] =
_counter[5] = 0;
}
ActivityMonitor::~ActivityMonitor() {
}
int32_t ActivityMonitor::InFrameType(int16_t frameType) {
_counter[frameType]++;
return 0;
}
void ActivityMonitor::PrintStatistics() {
printf("\n");
printf("kActiveNormalEncoded kPassiveNormalEncoded kPassiveDTXWB ");
printf("kPassiveDTXNB kPassiveDTXSWB kFrameEmpty\n");
printf("%19u", _counter[1]);
printf("%22u", _counter[2]);
printf("%14u", _counter[3]);
printf("%14u", _counter[4]);
printf("%14u", _counter[5]);
printf("%11u", _counter[0]);
printf("\n\n");
}
void ActivityMonitor::ResetStatistics() {
_counter[0] = _counter[1] = _counter[2] = _counter[3] = _counter[4] =
_counter[5] = 0;
}
void ActivityMonitor::GetStatistics(uint32_t* getCounter) {
for (int ii = 0; ii < 6; ii++) {
getCounter[ii] = _counter[ii];
}
}
} // namespace webrtc

View File

@@ -0,0 +1,85 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTVADDTX_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTVADDTX_H_
#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
typedef struct {
bool statusDTX;
bool statusVAD;
ACMVADMode vadMode;
} VADDTXstruct;
class ActivityMonitor : public ACMVADCallback {
public:
ActivityMonitor();
~ActivityMonitor();
int32_t InFrameType(int16_t frameType);
void PrintStatistics();
void ResetStatistics();
void GetStatistics(uint32_t* getCounter);
private:
// Counting according to
// enum WebRtcACMEncodingType {
// kNoEncoding,
// kActiveNormalEncoded,
// kPassiveNormalEncoded,
// kPassiveDTXNB,
// kPassiveDTXWB,
// kPassiveDTXSWB
// };
uint32_t _counter[6];
};
class TestVADDTX : public ACMTest {
public:
TestVADDTX();
~TestVADDTX();
void Perform();
private:
// Registration can be based on codec name only, codec name and sampling
// frequency, or codec name, sampling frequency and rate.
int16_t RegisterSendCodec(char side,
char* codecName,
int32_t samplingFreqHz = -1,
int32_t rateKhz = -1);
void Run();
void OpenOutFile(int16_t testNumber);
void runTestCases();
void runTestInternalDTX(int expected_result);
void SetVAD(bool statusDTX, bool statusVAD, int16_t vadMode);
VADDTXstruct GetVAD();
int16_t VerifyTest();
scoped_ptr<AudioCodingModule> _acmA;
scoped_ptr<AudioCodingModule> _acmB;
Channel* _channelA2B;
PCMFile _inFileA;
PCMFile _outFileB;
ActivityMonitor _monitor;
uint32_t _statCounter[6];
VADDTXstruct _setStruct;
VADDTXstruct _getStruct;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TESTVADDTX_H_

View File

@@ -0,0 +1,143 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include <string>
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/test/APITest.h"
#include "webrtc/modules/audio_coding/main/test/EncodeDecodeTest.h"
#include "webrtc/modules/audio_coding/main/test/iSACTest.h"
#include "webrtc/modules/audio_coding/main/test/opus_test.h"
#include "webrtc/modules/audio_coding/main/test/PacketLossTest.h"
#include "webrtc/modules/audio_coding/main/test/TestAllCodecs.h"
#include "webrtc/modules/audio_coding/main/test/TestRedFec.h"
#include "webrtc/modules/audio_coding/main/test/TestStereo.h"
#include "webrtc/modules/audio_coding/main/test/TestVADDTX.h"
#include "webrtc/modules/audio_coding/main/test/TwoWayCommunication.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/gtest_disable.h"
using webrtc::Trace;
// This parameter is used to describe how to run the tests. It is normally
// set to 0, and all tests are run in quite mode.
#define ACM_TEST_MODE 0
TEST(AudioCodingModuleTest, TestAllCodecs) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_allcodecs_trace.txt").c_str());
webrtc::TestAllCodecs(ACM_TEST_MODE).Perform();
Trace::ReturnTrace();
}
TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestEncodeDecode)) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_encodedecode_trace.txt").c_str());
webrtc::EncodeDecodeTest(ACM_TEST_MODE).Perform();
Trace::ReturnTrace();
}
TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestRedFec)) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_fec_trace.txt").c_str());
webrtc::TestRedFec().Perform();
Trace::ReturnTrace();
}
TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestIsac)) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_isac_trace.txt").c_str());
webrtc::ISACTest(ACM_TEST_MODE).Perform();
Trace::ReturnTrace();
}
TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TwoWayCommunication)) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_twowaycom_trace.txt").c_str());
webrtc::TwoWayCommunication(ACM_TEST_MODE).Perform();
Trace::ReturnTrace();
}
TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestStereo)) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_stereo_trace.txt").c_str());
webrtc::TestStereo(ACM_TEST_MODE).Perform();
Trace::ReturnTrace();
}
TEST(AudioCodingModuleTest, DISABLED_ON_ANDROID(TestVADDTX)) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_vaddtx_trace.txt").c_str());
webrtc::TestVADDTX().Perform();
Trace::ReturnTrace();
}
TEST(AudioCodingModuleTest, TestOpus) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_opus_trace.txt").c_str());
webrtc::OpusTest().Perform();
Trace::ReturnTrace();
}
TEST(AudioCodingModuleTest, TestPacketLoss) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_packetloss_trace.txt").c_str());
webrtc::PacketLossTest(1, 10, 10, 1).Perform();
Trace::ReturnTrace();
}
TEST(AudioCodingModuleTest, TestPacketLossBurst) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_packetloss_burst_trace.txt").c_str());
webrtc::PacketLossTest(1, 10, 10, 2).Perform();
Trace::ReturnTrace();
}
TEST(AudioCodingModuleTest, TestPacketLossStereo) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_packetloss_trace.txt").c_str());
webrtc::PacketLossTest(2, 10, 10, 1).Perform();
Trace::ReturnTrace();
}
TEST(AudioCodingModuleTest, TestPacketLossStereoBurst) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_packetloss_burst_trace.txt").c_str());
webrtc::PacketLossTest(2, 10, 10, 2).Perform();
Trace::ReturnTrace();
}
// The full API test is too long to run automatically on bots, but can be used
// for offline testing. User interaction is needed.
#ifdef ACM_TEST_FULL_API
TEST(AudioCodingModuleTest, TestAPI) {
Trace::CreateTrace();
Trace::SetTraceFile((webrtc::test::OutputPath() +
"acm_apitest_trace.txt").c_str());
webrtc::APITest().Perform();
Trace::ReturnTrace();
}
#endif

View File

@@ -0,0 +1,58 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "TimedTrace.h"
#include <math.h>
double TimedTrace::_timeEllapsedSec = 0;
FILE* TimedTrace::_timedTraceFile = NULL;
TimedTrace::TimedTrace() {
}
TimedTrace::~TimedTrace() {
if (_timedTraceFile != NULL) {
fclose(_timedTraceFile);
}
_timedTraceFile = NULL;
}
int16_t TimedTrace::SetUp(char* fileName) {
if (_timedTraceFile == NULL) {
_timedTraceFile = fopen(fileName, "w");
}
if (_timedTraceFile == NULL) {
return -1;
}
return 0;
}
void TimedTrace::SetTimeEllapsed(double timeEllapsedSec) {
_timeEllapsedSec = timeEllapsedSec;
}
double TimedTrace::TimeEllapsed() {
return _timeEllapsedSec;
}
void TimedTrace::Tick10Msec() {
_timeEllapsedSec += 0.010;
}
void TimedTrace::TimedLogg(char* message) {
unsigned int minutes = (uint32_t) floor(_timeEllapsedSec / 60.0);
double seconds = _timeEllapsedSec - minutes * 60;
//char myFormat[100] = "%8.2f, %3u:%05.2f: %s\n";
if (_timedTraceFile != NULL) {
fprintf(_timedTraceFile, "%8.2f, %3u:%05.2f: %s\n", _timeEllapsedSec,
minutes, seconds, message);
}
}

View File

@@ -0,0 +1,36 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef TIMED_TRACE_H
#define TIMED_TRACE_H
#include "typedefs.h"
#include <stdio.h>
#include <stdlib.h>
class TimedTrace {
public:
TimedTrace();
~TimedTrace();
void SetTimeEllapsed(double myTime);
double TimeEllapsed();
void Tick10Msec();
int16_t SetUp(char* fileName);
void TimedLogg(char* message);
private:
static double _timeEllapsedSec;
static FILE* _timedTraceFile;
};
#endif

View File

@@ -0,0 +1,352 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "TwoWayCommunication.h"
#include <ctype.h>
#include <stdio.h>
#include <string.h>
#ifdef WIN32
#include <Windows.h>
#endif
#include "gtest/gtest.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
#define MAX_FILE_NAME_LENGTH_BYTE 500
TwoWayCommunication::TwoWayCommunication(int testMode)
: _acmA(AudioCodingModule::Create(1)),
_acmB(AudioCodingModule::Create(2)),
_acmRefA(AudioCodingModule::Create(3)),
_acmRefB(AudioCodingModule::Create(4)),
_testMode(testMode) {}
TwoWayCommunication::~TwoWayCommunication() {
delete _channel_A2B;
delete _channel_B2A;
delete _channelRef_A2B;
delete _channelRef_B2A;
#ifdef WEBRTC_DTMF_DETECTION
if (_dtmfDetectorA != NULL) {
delete _dtmfDetectorA;
}
if (_dtmfDetectorB != NULL) {
delete _dtmfDetectorB;
}
#endif
_inFileA.Close();
_inFileB.Close();
_outFileA.Close();
_outFileB.Close();
_outFileRefA.Close();
_outFileRefB.Close();
}
void TwoWayCommunication::ChooseCodec(uint8_t* codecID_A,
uint8_t* codecID_B) {
scoped_ptr<AudioCodingModule> tmpACM(AudioCodingModule::Create(0));
uint8_t noCodec = tmpACM->NumberOfCodecs();
CodecInst codecInst;
printf("List of Supported Codecs\n");
printf("========================\n");
for (uint8_t codecCntr = 0; codecCntr < noCodec; codecCntr++) {
EXPECT_EQ(tmpACM->Codec(codecCntr, &codecInst), 0);
printf("%d- %s\n", codecCntr, codecInst.plname);
}
printf("\nChoose a send codec for side A [0]: ");
char myStr[15] = "";
EXPECT_TRUE(fgets(myStr, 10, stdin) != NULL);
*codecID_A = (uint8_t) atoi(myStr);
printf("\nChoose a send codec for side B [0]: ");
EXPECT_TRUE(fgets(myStr, 10, stdin) != NULL);
*codecID_B = (uint8_t) atoi(myStr);
printf("\n");
}
void TwoWayCommunication::SetUp() {
uint8_t codecID_A;
uint8_t codecID_B;
ChooseCodec(&codecID_A, &codecID_B);
CodecInst codecInst_A;
CodecInst codecInst_B;
CodecInst dummyCodec;
EXPECT_EQ(0, _acmA->Codec(codecID_A, &codecInst_A));
EXPECT_EQ(0, _acmB->Codec(codecID_B, &codecInst_B));
EXPECT_EQ(0, _acmA->Codec(6, &dummyCodec));
//--- Set A codecs
EXPECT_EQ(0, _acmA->RegisterSendCodec(codecInst_A));
EXPECT_EQ(0, _acmA->RegisterReceiveCodec(codecInst_B));
#ifdef WEBRTC_DTMF_DETECTION
_dtmfDetectorA = new(DTMFDetector);
EXPECT_GT(_acmA->RegisterIncomingMessagesCallback(_dtmfDetectorA, ACMUSA),
-1);
#endif
//--- Set ref-A codecs
EXPECT_EQ(0, _acmRefA->RegisterSendCodec(codecInst_A));
EXPECT_EQ(0, _acmRefA->RegisterReceiveCodec(codecInst_B));
//--- Set B codecs
EXPECT_EQ(0, _acmB->RegisterSendCodec(codecInst_B));
EXPECT_EQ(0, _acmB->RegisterReceiveCodec(codecInst_A));
#ifdef WEBRTC_DTMF_DETECTION
_dtmfDetectorB = new(DTMFDetector);
EXPECT_GT(_acmB->RegisterIncomingMessagesCallback(_dtmfDetectorB, ACMUSA),
-1);
#endif
//--- Set ref-B codecs
EXPECT_EQ(0, _acmRefB->RegisterSendCodec(codecInst_B));
EXPECT_EQ(0, _acmRefB->RegisterReceiveCodec(codecInst_A));
uint16_t frequencyHz;
//--- Input A
std::string in_file_name = webrtc::test::ResourcePath(
"audio_coding/testfile32kHz", "pcm");
frequencyHz = 32000;
printf("Enter input file at side A [%s]: ", in_file_name.c_str());
PCMFile::ChooseFile(&in_file_name, 499, &frequencyHz);
_inFileA.Open(in_file_name, frequencyHz, "rb");
//--- Output A
std::string out_file_a = webrtc::test::OutputPath() + "outA.pcm";
printf("Output file at side A: %s\n", out_file_a.c_str());
printf("Sampling frequency (in Hz) of the above file: %u\n", frequencyHz);
_outFileA.Open(out_file_a, frequencyHz, "wb");
std::string ref_file_name = webrtc::test::OutputPath() + "ref_outA.pcm";
_outFileRefA.Open(ref_file_name, frequencyHz, "wb");
//--- Input B
in_file_name = webrtc::test::ResourcePath("audio_coding/testfile32kHz",
"pcm");
frequencyHz = 32000;
printf("\n\nEnter input file at side B [%s]: ", in_file_name.c_str());
PCMFile::ChooseFile(&in_file_name, 499, &frequencyHz);
_inFileB.Open(in_file_name, frequencyHz, "rb");
//--- Output B
std::string out_file_b = webrtc::test::OutputPath() + "outB.pcm";
printf("Output file at side B: %s\n", out_file_b.c_str());
printf("Sampling frequency (in Hz) of the above file: %u\n", frequencyHz);
_outFileB.Open(out_file_b, frequencyHz, "wb");
ref_file_name = webrtc::test::OutputPath() + "ref_outB.pcm";
_outFileRefB.Open(ref_file_name, frequencyHz, "wb");
//--- Set A-to-B channel
_channel_A2B = new Channel;
_acmA->RegisterTransportCallback(_channel_A2B);
_channel_A2B->RegisterReceiverACM(_acmB.get());
//--- Do the same for the reference
_channelRef_A2B = new Channel;
_acmRefA->RegisterTransportCallback(_channelRef_A2B);
_channelRef_A2B->RegisterReceiverACM(_acmRefB.get());
//--- Set B-to-A channel
_channel_B2A = new Channel;
_acmB->RegisterTransportCallback(_channel_B2A);
_channel_B2A->RegisterReceiverACM(_acmA.get());
//--- Do the same for reference
_channelRef_B2A = new Channel;
_acmRefB->RegisterTransportCallback(_channelRef_B2A);
_channelRef_B2A->RegisterReceiverACM(_acmRefA.get());
// The clicks will be more obvious when we
// are in FAX mode.
EXPECT_EQ(_acmB->SetPlayoutMode(fax), 0);
EXPECT_EQ(_acmRefB->SetPlayoutMode(fax), 0);
}
void TwoWayCommunication::SetUpAutotest() {
CodecInst codecInst_A;
CodecInst codecInst_B;
CodecInst dummyCodec;
EXPECT_EQ(0, _acmA->Codec("ISAC", &codecInst_A, 16000, 1));
EXPECT_EQ(0, _acmB->Codec("L16", &codecInst_B, 8000, 1));
EXPECT_EQ(0, _acmA->Codec(6, &dummyCodec));
//--- Set A codecs
EXPECT_EQ(0, _acmA->RegisterSendCodec(codecInst_A));
EXPECT_EQ(0, _acmA->RegisterReceiveCodec(codecInst_B));
#ifdef WEBRTC_DTMF_DETECTION
_dtmfDetectorA = new(DTMFDetector);
EXPECT_EQ(0, _acmA->RegisterIncomingMessagesCallback(_dtmfDetectorA, ACMUSA));
#endif
//--- Set ref-A codecs
EXPECT_GT(_acmRefA->RegisterSendCodec(codecInst_A), -1);
EXPECT_GT(_acmRefA->RegisterReceiveCodec(codecInst_B), -1);
//--- Set B codecs
EXPECT_GT(_acmB->RegisterSendCodec(codecInst_B), -1);
EXPECT_GT(_acmB->RegisterReceiveCodec(codecInst_A), -1);
#ifdef WEBRTC_DTMF_DETECTION
_dtmfDetectorB = new(DTMFDetector);
EXPECT_EQ(0, _acmB->RegisterIncomingMessagesCallback(_dtmfDetectorB, ACMUSA));
#endif
//--- Set ref-B codecs
EXPECT_EQ(0, _acmRefB->RegisterSendCodec(codecInst_B));
EXPECT_EQ(0, _acmRefB->RegisterReceiveCodec(codecInst_A));
uint16_t frequencyHz;
//--- Input A and B
std::string in_file_name = webrtc::test::ResourcePath(
"audio_coding/testfile32kHz", "pcm");
frequencyHz = 16000;
_inFileA.Open(in_file_name, frequencyHz, "rb");
_inFileB.Open(in_file_name, frequencyHz, "rb");
//--- Output A
std::string output_file_a = webrtc::test::OutputPath() + "outAutotestA.pcm";
frequencyHz = 16000;
_outFileA.Open(output_file_a, frequencyHz, "wb");
std::string output_ref_file_a = webrtc::test::OutputPath()
+ "ref_outAutotestA.pcm";
_outFileRefA.Open(output_ref_file_a, frequencyHz, "wb");
//--- Output B
std::string output_file_b = webrtc::test::OutputPath() + "outAutotestB.pcm";
frequencyHz = 16000;
_outFileB.Open(output_file_b, frequencyHz, "wb");
std::string output_ref_file_b = webrtc::test::OutputPath()
+ "ref_outAutotestB.pcm";
_outFileRefB.Open(output_ref_file_b, frequencyHz, "wb");
//--- Set A-to-B channel
_channel_A2B = new Channel;
_acmA->RegisterTransportCallback(_channel_A2B);
_channel_A2B->RegisterReceiverACM(_acmB.get());
//--- Do the same for the reference
_channelRef_A2B = new Channel;
_acmRefA->RegisterTransportCallback(_channelRef_A2B);
_channelRef_A2B->RegisterReceiverACM(_acmRefB.get());
//--- Set B-to-A channel
_channel_B2A = new Channel;
_acmB->RegisterTransportCallback(_channel_B2A);
_channel_B2A->RegisterReceiverACM(_acmA.get());
//--- Do the same for reference
_channelRef_B2A = new Channel;
_acmRefB->RegisterTransportCallback(_channelRef_B2A);
_channelRef_B2A->RegisterReceiverACM(_acmRefA.get());
// The clicks will be more obvious when we
// are in FAX mode.
EXPECT_EQ(0, _acmB->SetPlayoutMode(fax));
EXPECT_EQ(0, _acmRefB->SetPlayoutMode(fax));
}
void TwoWayCommunication::Perform() {
if (_testMode == 0) {
SetUpAutotest();
} else {
SetUp();
}
unsigned int msecPassed = 0;
unsigned int secPassed = 0;
int32_t outFreqHzA = _outFileA.SamplingFrequency();
int32_t outFreqHzB = _outFileB.SamplingFrequency();
AudioFrame audioFrame;
CodecInst codecInst_B;
CodecInst dummy;
EXPECT_EQ(0, _acmB->SendCodec(&codecInst_B));
// In the following loop we tests that the code can handle misuse of the APIs.
// In the middle of a session with data flowing between two sides, called A
// and B, APIs will be called, like ResetEncoder(), and the code should
// continue to run, and be able to recover.
bool expect_error_add = false;
bool expect_error_process = false;
while (!_inFileA.EndOfFile() && !_inFileB.EndOfFile()) {
msecPassed += 10;
EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
EXPECT_EQ(0, _acmRefA->Add10MsData(audioFrame));
EXPECT_GT(_inFileB.Read10MsData(audioFrame), 0);
// Expect call to pass except for the time when no send codec is registered.
if (!expect_error_add) {
EXPECT_EQ(0, _acmB->Add10MsData(audioFrame));
} else {
EXPECT_EQ(-1, _acmB->Add10MsData(audioFrame));
}
// Expect to pass except for the time when there either is no send codec
// registered, or no receive codec.
if (!expect_error_process) {
EXPECT_GT(_acmB->Process(), -1);
} else {
EXPECT_EQ(_acmB->Process(), -1);
}
EXPECT_EQ(0, _acmRefB->Add10MsData(audioFrame));
EXPECT_GT(_acmA->Process(), -1);
EXPECT_GT(_acmRefA->Process(), -1);
EXPECT_GT(_acmRefB->Process(), -1);
EXPECT_EQ(0, _acmA->PlayoutData10Ms(outFreqHzA, &audioFrame));
_outFileA.Write10MsData(audioFrame);
EXPECT_EQ(0, _acmRefA->PlayoutData10Ms(outFreqHzA, &audioFrame));
_outFileRefA.Write10MsData(audioFrame);
EXPECT_EQ(0, _acmB->PlayoutData10Ms(outFreqHzB, &audioFrame));
_outFileB.Write10MsData(audioFrame);
EXPECT_EQ(0, _acmRefB->PlayoutData10Ms(outFreqHzB, &audioFrame));
_outFileRefB.Write10MsData(audioFrame);
// Update time counters each time a second of data has passed.
if (msecPassed >= 1000) {
msecPassed = 0;
secPassed++;
}
// Call RestEncoder for ACM on side A, and InitializeSender for ACM on
// side B.
if (((secPassed % 5) == 4) && (msecPassed == 0)) {
EXPECT_EQ(0, _acmA->ResetEncoder());
EXPECT_EQ(0, _acmB->InitializeSender());
expect_error_add = true;
expect_error_process = true;
}
// Re-register send codec on side B.
if (((secPassed % 5) == 4) && (msecPassed >= 990)) {
EXPECT_EQ(0, _acmB->RegisterSendCodec(codecInst_B));
EXPECT_EQ(0, _acmB->SendCodec(&dummy));
expect_error_add = false;
expect_error_process = false;
}
// Reset decoder on side B, and initialize receiver on side A.
if (((secPassed % 7) == 6) && (msecPassed == 0)) {
EXPECT_EQ(0, _acmB->ResetDecoder());
EXPECT_EQ(0, _acmA->InitializeReceiver());
}
// Re-register codec on side A.
if (((secPassed % 7) == 6) && (msecPassed >= 990)) {
EXPECT_EQ(0, _acmA->RegisterReceiveCodec(codecInst_B));
}
}
}
} // namespace webrtc

View File

@@ -0,0 +1,60 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TWOWAYCOMMUNICATION_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TWOWAYCOMMUNICATION_H_
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
class TwoWayCommunication : public ACMTest {
public:
explicit TwoWayCommunication(int testMode);
~TwoWayCommunication();
void Perform();
private:
void ChooseCodec(uint8_t* codecID_A, uint8_t* codecID_B);
void SetUp();
void SetUpAutotest();
scoped_ptr<AudioCodingModule> _acmA;
scoped_ptr<AudioCodingModule> _acmB;
scoped_ptr<AudioCodingModule> _acmRefA;
scoped_ptr<AudioCodingModule> _acmRefB;
Channel* _channel_A2B;
Channel* _channel_B2A;
Channel* _channelRef_A2B;
Channel* _channelRef_B2A;
PCMFile _inFileA;
PCMFile _inFileB;
PCMFile _outFileA;
PCMFile _outFileB;
PCMFile _outFileRefA;
PCMFile _outFileRefB;
int _testMode;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_TWOWAYCOMMUNICATION_H_

View File

@@ -0,0 +1,271 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include <math.h>
#include <iostream>
#include "gflags/gflags.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/test/testsupport/fileutils.h"
DEFINE_string(codec, "isac", "Codec Name");
DEFINE_int32(sample_rate_hz, 16000, "Sampling rate in Hertz.");
DEFINE_int32(num_channels, 1, "Number of Channels.");
DEFINE_string(input_file, "", "Input file, PCM16 32 kHz, optional.");
DEFINE_int32(delay, 0, "Delay in millisecond.");
DEFINE_int32(init_delay, 0, "Initial delay in millisecond.");
DEFINE_bool(dtx, false, "Enable DTX at the sender side.");
DEFINE_bool(packet_loss, false, "Apply packet loss, c.f. Channel{.cc, .h}.");
DEFINE_bool(fec, false, "Use Forward Error Correction (FEC).");
namespace webrtc {
namespace {
struct CodecSettings {
char name[50];
int sample_rate_hz;
int num_channels;
};
struct AcmSettings {
bool dtx;
bool fec;
};
struct TestSettings {
CodecSettings codec;
AcmSettings acm;
bool packet_loss;
};
} // namespace
class DelayTest {
public:
DelayTest()
: acm_a_(AudioCodingModule::Create(0)),
acm_b_(AudioCodingModule::Create(1)),
channel_a2b_(new Channel),
test_cntr_(0),
encoding_sample_rate_hz_(8000) {}
~DelayTest() {
if (channel_a2b_ != NULL) {
delete channel_a2b_;
channel_a2b_ = NULL;
}
in_file_a_.Close();
}
void Initialize() {
test_cntr_ = 0;
std::string file_name = webrtc::test::ResourcePath(
"audio_coding/testfile32kHz", "pcm");
if (FLAGS_input_file.size() > 0)
file_name = FLAGS_input_file;
in_file_a_.Open(file_name, 32000, "rb");
ASSERT_EQ(0, acm_a_->InitializeReceiver()) <<
"Couldn't initialize receiver.\n";
ASSERT_EQ(0, acm_b_->InitializeReceiver()) <<
"Couldn't initialize receiver.\n";
if (FLAGS_init_delay > 0) {
ASSERT_EQ(0, acm_b_->SetInitialPlayoutDelay(FLAGS_init_delay)) <<
"Failed to set initial delay.\n";
}
if (FLAGS_delay > 0) {
ASSERT_EQ(0, acm_b_->SetMinimumPlayoutDelay(FLAGS_delay)) <<
"Failed to set minimum delay.\n";
}
int num_encoders = acm_a_->NumberOfCodecs();
CodecInst my_codec_param;
for (int n = 0; n < num_encoders; n++) {
EXPECT_EQ(0, acm_b_->Codec(n, &my_codec_param)) <<
"Failed to get codec.";
if (STR_CASE_CMP(my_codec_param.plname, "opus") == 0)
my_codec_param.channels = 1;
else if (my_codec_param.channels > 1)
continue;
if (STR_CASE_CMP(my_codec_param.plname, "CN") == 0 &&
my_codec_param.plfreq == 48000)
continue;
if (STR_CASE_CMP(my_codec_param.plname, "telephone-event") == 0)
continue;
ASSERT_EQ(0, acm_b_->RegisterReceiveCodec(my_codec_param)) <<
"Couldn't register receive codec.\n";
}
// Create and connect the channel
ASSERT_EQ(0, acm_a_->RegisterTransportCallback(channel_a2b_)) <<
"Couldn't register Transport callback.\n";
channel_a2b_->RegisterReceiverACM(acm_b_.get());
}
void Perform(const TestSettings* config, size_t num_tests, int duration_sec,
const char* output_prefix) {
for (size_t n = 0; n < num_tests; ++n) {
ApplyConfig(config[n]);
Run(duration_sec, output_prefix);
}
}
private:
void ApplyConfig(const TestSettings& config) {
printf("====================================\n");
printf("Test %d \n"
"Codec: %s, %d kHz, %d channel(s)\n"
"ACM: DTX %s, FEC %s\n"
"Channel: %s\n",
++test_cntr_, config.codec.name, config.codec.sample_rate_hz,
config.codec.num_channels, config.acm.dtx ? "on" : "off",
config.acm.fec ? "on" : "off",
config.packet_loss ? "with packet-loss" : "no packet-loss");
SendCodec(config.codec);
ConfigAcm(config.acm);
ConfigChannel(config.packet_loss);
}
void SendCodec(const CodecSettings& config) {
CodecInst my_codec_param;
ASSERT_EQ(0, AudioCodingModule::Codec(
config.name, &my_codec_param, config.sample_rate_hz,
config.num_channels)) << "Specified codec is not supported.\n";
encoding_sample_rate_hz_ = my_codec_param.plfreq;
ASSERT_EQ(0, acm_a_->RegisterSendCodec(my_codec_param)) <<
"Failed to register send-codec.\n";
}
void ConfigAcm(const AcmSettings& config) {
ASSERT_EQ(0, acm_a_->SetVAD(config.dtx, config.dtx, VADAggr)) <<
"Failed to set VAD.\n";
ASSERT_EQ(0, acm_a_->SetREDStatus(config.fec)) <<
"Failed to set RED.\n";
}
void ConfigChannel(bool packet_loss) {
channel_a2b_->SetFECTestWithPacketLoss(packet_loss);
}
void OpenOutFile(const char* output_id) {
std::stringstream file_stream;
file_stream << "delay_test_" << FLAGS_codec << "_" << FLAGS_sample_rate_hz
<< "Hz" << "_" << FLAGS_init_delay << "ms_" << FLAGS_delay << "ms.pcm";
std::cout << "Output file: " << file_stream.str() << std::endl << std::endl;
std::string file_name = webrtc::test::OutputPath() + file_stream.str();
out_file_b_.Open(file_name.c_str(), 32000, "wb");
}
void Run(int duration_sec, const char* output_prefix) {
OpenOutFile(output_prefix);
AudioFrame audio_frame;
uint32_t out_freq_hz_b = out_file_b_.SamplingFrequency();
int num_frames = 0;
int in_file_frames = 0;
uint32_t playout_ts;
uint32_t received_ts;
double average_delay = 0;
double inst_delay_sec = 0;
while (num_frames < (duration_sec * 100)) {
if (in_file_a_.EndOfFile()) {
in_file_a_.Rewind();
}
// Print delay information every 16 frame
if ((num_frames & 0x3F) == 0x3F) {
ACMNetworkStatistics statistics;
acm_b_->NetworkStatistics(&statistics);
fprintf(stdout, "delay: min=%3d max=%3d mean=%3d median=%3d"
" ts-based average = %6.3f, "
"curr buff-lev = %4u opt buff-lev = %4u \n",
statistics.minWaitingTimeMs, statistics.maxWaitingTimeMs,
statistics.meanWaitingTimeMs, statistics.medianWaitingTimeMs,
average_delay, statistics.currentBufferSize,
statistics.preferredBufferSize);
fflush (stdout);
}
in_file_a_.Read10MsData(audio_frame);
ASSERT_EQ(0, acm_a_->Add10MsData(audio_frame));
ASSERT_LE(0, acm_a_->Process());
ASSERT_EQ(0, acm_b_->PlayoutData10Ms(out_freq_hz_b, &audio_frame));
out_file_b_.Write10MsData(
audio_frame.data_,
audio_frame.samples_per_channel_ * audio_frame.num_channels_);
acm_b_->PlayoutTimestamp(&playout_ts);
received_ts = channel_a2b_->LastInTimestamp();
inst_delay_sec = static_cast<uint32_t>(received_ts - playout_ts)
/ static_cast<double>(encoding_sample_rate_hz_);
if (num_frames > 10)
average_delay = 0.95 * average_delay + 0.05 * inst_delay_sec;
++num_frames;
++in_file_frames;
}
out_file_b_.Close();
}
scoped_ptr<AudioCodingModule> acm_a_;
scoped_ptr<AudioCodingModule> acm_b_;
Channel* channel_a2b_;
PCMFile in_file_a_;
PCMFile out_file_b_;
int test_cntr_;
int encoding_sample_rate_hz_;
};
} // namespace webrtc
int main(int argc, char* argv[]) {
google::ParseCommandLineFlags(&argc, &argv, true);
webrtc::TestSettings test_setting;
strcpy(test_setting.codec.name, FLAGS_codec.c_str());
if (FLAGS_sample_rate_hz != 8000 &&
FLAGS_sample_rate_hz != 16000 &&
FLAGS_sample_rate_hz != 32000 &&
FLAGS_sample_rate_hz != 48000) {
std::cout << "Invalid sampling rate.\n";
return 1;
}
test_setting.codec.sample_rate_hz = FLAGS_sample_rate_hz;
if (FLAGS_num_channels < 1 || FLAGS_num_channels > 2) {
std::cout << "Only mono and stereo are supported.\n";
return 1;
}
test_setting.codec.num_channels = FLAGS_num_channels;
test_setting.acm.dtx = FLAGS_dtx;
test_setting.acm.fec = FLAGS_fec;
test_setting.packet_loss = FLAGS_packet_loss;
webrtc::DelayTest delay_test;
delay_test.Initialize();
delay_test.Perform(&test_setting, 1, 240, "delay_test");
return 0;
}

View File

@@ -0,0 +1,539 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "gtest/gtest.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/typedefs.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
class DualStreamTest : public AudioPacketizationCallback,
public ::testing::Test {
protected:
DualStreamTest();
~DualStreamTest();
void RunTest(int frame_size_primary_samples,
int num_channels_primary,
int sampling_rate,
bool start_in_sync,
int num_channels_input);
void ApiTest();
int32_t SendData(FrameType frameType, uint8_t payload_type,
uint32_t timestamp, const uint8_t* payload_data,
uint16_t payload_size,
const RTPFragmentationHeader* fragmentation);
void Perform(bool start_in_sync, int num_channels_input);
void InitializeSender(int frame_size_primary_samples,
int num_channels_primary, int sampling_rate);
void PopulateCodecInstances(int frame_size_primary_ms,
int num_channels_primary, int sampling_rate);
void Validate(bool start_in_sync, int tolerance);
bool EqualTimestamp(int stream, int position);
int EqualPayloadLength(int stream, int position);
bool EqualPayloadData(int stream, int position);
static const int kMaxNumStoredPayloads = 2;
enum {
kPrimary = 0,
kSecondary,
kMaxNumStreams
};
scoped_ptr<AudioCodingModule> acm_dual_stream_;
scoped_ptr<AudioCodingModule> acm_ref_primary_;
scoped_ptr<AudioCodingModule> acm_ref_secondary_;
CodecInst primary_encoder_;
CodecInst secondary_encoder_;
CodecInst red_encoder_;
int payload_ref_is_stored_[kMaxNumStreams][kMaxNumStoredPayloads];
int payload_dual_is_stored_[kMaxNumStreams][kMaxNumStoredPayloads];
uint32_t timestamp_ref_[kMaxNumStreams][kMaxNumStoredPayloads];
uint32_t timestamp_dual_[kMaxNumStreams][kMaxNumStoredPayloads];
int payload_len_ref_[kMaxNumStreams][kMaxNumStoredPayloads];
int payload_len_dual_[kMaxNumStreams][kMaxNumStoredPayloads];
uint8_t payload_data_ref_[kMaxNumStreams][MAX_PAYLOAD_SIZE_BYTE
* kMaxNumStoredPayloads];
uint8_t payload_data_dual_[kMaxNumStreams][MAX_PAYLOAD_SIZE_BYTE
* kMaxNumStoredPayloads];
int num_received_payloads_dual_[kMaxNumStreams];
int num_received_payloads_ref_[kMaxNumStreams];
int num_compared_payloads_[kMaxNumStreams];
uint32_t last_timestamp_[kMaxNumStreams];
bool received_payload_[kMaxNumStreams];
};
DualStreamTest::DualStreamTest()
: acm_dual_stream_(AudioCodingModule::Create(0)),
acm_ref_primary_(AudioCodingModule::Create(1)),
acm_ref_secondary_(AudioCodingModule::Create(2)),
payload_ref_is_stored_(),
payload_dual_is_stored_(),
timestamp_ref_(),
num_received_payloads_dual_(),
num_received_payloads_ref_(),
num_compared_payloads_(),
last_timestamp_(),
received_payload_() {}
DualStreamTest::~DualStreamTest() {}
void DualStreamTest::PopulateCodecInstances(int frame_size_primary_ms,
int num_channels_primary,
int sampling_rate) {
CodecInst my_codec;
// Invalid values. To check later on if the codec are found in the database.
primary_encoder_.pltype = -1;
secondary_encoder_.pltype = -1;
red_encoder_.pltype = -1;
for (int n = 0; n < AudioCodingModule::NumberOfCodecs(); n++) {
AudioCodingModule::Codec(n, &my_codec);
if (strcmp(my_codec.plname, "ISAC") == 0
&& my_codec.plfreq == sampling_rate) {
my_codec.rate = 32000;
my_codec.pacsize = 30 * sampling_rate / 1000;
memcpy(&secondary_encoder_, &my_codec, sizeof(my_codec));
} else if (strcmp(my_codec.plname, "L16") == 0
&& my_codec.channels == num_channels_primary
&& my_codec.plfreq == sampling_rate) {
my_codec.pacsize = frame_size_primary_ms * sampling_rate / 1000;
memcpy(&primary_encoder_, &my_codec, sizeof(my_codec));
} else if (strcmp(my_codec.plname, "red") == 0) {
memcpy(&red_encoder_, &my_codec, sizeof(my_codec));
}
}
ASSERT_GE(primary_encoder_.pltype, 0);
ASSERT_GE(secondary_encoder_.pltype, 0);
ASSERT_GE(red_encoder_.pltype, 0);
}
void DualStreamTest::InitializeSender(int frame_size_primary_samples,
int num_channels_primary,
int sampling_rate) {
ASSERT_TRUE(acm_dual_stream_.get() != NULL);
ASSERT_TRUE(acm_ref_primary_.get() != NULL);
ASSERT_TRUE(acm_ref_secondary_.get() != NULL);
ASSERT_EQ(0, acm_dual_stream_->InitializeSender());
ASSERT_EQ(0, acm_ref_primary_->InitializeSender());
ASSERT_EQ(0, acm_ref_secondary_->InitializeSender());
PopulateCodecInstances(frame_size_primary_samples, num_channels_primary,
sampling_rate);
ASSERT_EQ(0, acm_ref_primary_->RegisterSendCodec(primary_encoder_));
ASSERT_EQ(0, acm_ref_secondary_->RegisterSendCodec(secondary_encoder_));
ASSERT_EQ(0, acm_dual_stream_->RegisterSendCodec(primary_encoder_));
ASSERT_EQ(0,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
ASSERT_EQ(0, acm_ref_primary_->RegisterTransportCallback(this));
ASSERT_EQ(0, acm_ref_secondary_->RegisterTransportCallback(this));
ASSERT_EQ(0, acm_dual_stream_->RegisterTransportCallback(this));
}
void DualStreamTest::Perform(bool start_in_sync, int num_channels_input) {
PCMFile pcm_file;
std::string file_name = test::ResourcePath(
(num_channels_input == 1) ?
"audio_coding/testfile32kHz" : "audio_coding/teststereo32kHz",
"pcm");
pcm_file.Open(file_name, 32000, "rb");
pcm_file.ReadStereo(num_channels_input == 2);
AudioFrame audio_frame;
int tolerance = 0;
if (num_channels_input == 2 && primary_encoder_.channels == 2
&& secondary_encoder_.channels == 1) {
tolerance = 12;
}
if (!start_in_sync) {
pcm_file.Read10MsData(audio_frame);
// Unregister secondary codec and feed only the primary
acm_dual_stream_->UnregisterSecondarySendCodec();
EXPECT_EQ(0, acm_dual_stream_->Add10MsData(audio_frame));
EXPECT_EQ(0, acm_ref_primary_->Add10MsData(audio_frame));
ASSERT_EQ(0,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
}
const int kNumFramesToProcess = 100;
int frame_cntr = 0;
while (!pcm_file.EndOfFile() && frame_cntr < kNumFramesToProcess) {
pcm_file.Read10MsData(audio_frame);
frame_cntr++;
EXPECT_EQ(0, acm_dual_stream_->Add10MsData(audio_frame));
EXPECT_EQ(0, acm_ref_primary_->Add10MsData(audio_frame));
EXPECT_EQ(0, acm_ref_secondary_->Add10MsData(audio_frame));
EXPECT_GE(acm_dual_stream_->Process(), 0);
EXPECT_GE(acm_ref_primary_->Process(), 0);
EXPECT_GE(acm_ref_secondary_->Process(), 0);
if (start_in_sync || frame_cntr > 7) {
// If we haven't started in sync the first few audio frames might
// slightly differ due to the difference in the state of the resamplers
// of dual-ACM and reference-ACM.
Validate(start_in_sync, tolerance);
} else {
// SendData stores the payloads, if we are not comparing we have to free
// the space by resetting these flags.
memset(payload_ref_is_stored_, 0, sizeof(payload_ref_is_stored_));
memset(payload_dual_is_stored_, 0, sizeof(payload_dual_is_stored_));
}
}
pcm_file.Close();
// Make sure that number of received payloads match. In case of secondary
// encoder, the dual-stream might deliver one lesser payload. The reason is
// that some secondary payloads are stored to be sent with a payload generated
// later and the input file may end before the "next" payload .
EXPECT_EQ(num_received_payloads_ref_[kPrimary],
num_received_payloads_dual_[kPrimary]);
EXPECT_TRUE(
num_received_payloads_ref_[kSecondary]
== num_received_payloads_dual_[kSecondary]
|| num_received_payloads_ref_[kSecondary]
== (num_received_payloads_dual_[kSecondary] + 1));
// Make sure all received payloads are compared.
if (start_in_sync) {
EXPECT_EQ(num_received_payloads_dual_[kPrimary],
num_compared_payloads_[kPrimary]);
EXPECT_EQ(num_received_payloads_dual_[kSecondary],
num_compared_payloads_[kSecondary]);
} else {
// In asynchronous test we don't compare couple of first frames, so we
// should account for them in our counting.
EXPECT_GE(num_compared_payloads_[kPrimary],
num_received_payloads_dual_[kPrimary] - 4);
EXPECT_GE(num_compared_payloads_[kSecondary],
num_received_payloads_dual_[kSecondary] - 4);
}
}
bool DualStreamTest::EqualTimestamp(int stream_index, int position) {
if (timestamp_dual_[stream_index][position]
!= timestamp_ref_[stream_index][position]) {
return false;
}
return true;
}
int DualStreamTest::EqualPayloadLength(int stream_index, int position) {
return abs(
payload_len_dual_[stream_index][position]
- payload_len_ref_[stream_index][position]);
}
bool DualStreamTest::EqualPayloadData(int stream_index, int position) {
assert(
payload_len_dual_[stream_index][position]
== payload_len_ref_[stream_index][position]);
int offset = position * MAX_PAYLOAD_SIZE_BYTE;
for (int n = 0; n < payload_len_dual_[stream_index][position]; n++) {
if (payload_data_dual_[stream_index][offset + n]
!= payload_data_ref_[stream_index][offset + n]) {
return false;
}
}
return true;
}
void DualStreamTest::Validate(bool start_in_sync, int tolerance) {
for (int stream_index = 0; stream_index < kMaxNumStreams; stream_index++) {
int my_tolerance = stream_index == kPrimary ? 0 : tolerance;
for (int position = 0; position < kMaxNumStoredPayloads; position++) {
if (payload_ref_is_stored_[stream_index][position] == 1
&& payload_dual_is_stored_[stream_index][position] == 1) {
// Check timestamps only if codecs started in sync or it is primary.
if (start_in_sync || stream_index == 0)
EXPECT_TRUE(EqualTimestamp(stream_index, position));
EXPECT_LE(EqualPayloadLength(stream_index, position), my_tolerance);
if (my_tolerance == 0)
EXPECT_TRUE(EqualPayloadData(stream_index, position));
num_compared_payloads_[stream_index]++;
payload_ref_is_stored_[stream_index][position] = 0;
payload_dual_is_stored_[stream_index][position] = 0;
}
}
}
}
int32_t DualStreamTest::SendData(FrameType frameType, uint8_t payload_type,
uint32_t timestamp,
const uint8_t* payload_data,
uint16_t payload_size,
const RTPFragmentationHeader* fragmentation) {
int position;
int stream_index;
if (payload_type == red_encoder_.pltype) {
if (fragmentation == NULL) {
assert(false);
return -1;
}
// As the oldest payloads are in the higher indices of fragmentation,
// to be able to check the increment of timestamps are correct we loop
// backward.
for (int n = fragmentation->fragmentationVectorSize - 1; n >= 0; --n) {
if (fragmentation->fragmentationPlType[n] == primary_encoder_.pltype) {
// Received primary payload from dual stream.
stream_index = kPrimary;
} else if (fragmentation->fragmentationPlType[n]
== secondary_encoder_.pltype) {
// Received secondary payload from dual stream.
stream_index = kSecondary;
} else {
assert(false);
return -1;
}
num_received_payloads_dual_[stream_index]++;
if (payload_dual_is_stored_[stream_index][0] == 0) {
position = 0;
} else if (payload_dual_is_stored_[stream_index][1] == 0) {
position = 1;
} else {
assert(false);
return -1;
}
timestamp_dual_[stream_index][position] = timestamp
- fragmentation->fragmentationTimeDiff[n];
payload_len_dual_[stream_index][position] = fragmentation
->fragmentationLength[n];
memcpy(
&payload_data_dual_[stream_index][position * MAX_PAYLOAD_SIZE_BYTE],
&payload_data[fragmentation->fragmentationOffset[n]],
fragmentation->fragmentationLength[n]);
payload_dual_is_stored_[stream_index][position] = 1;
// Check if timestamps are incremented correctly.
if (received_payload_[stream_index]) {
int t = timestamp_dual_[stream_index][position]
- last_timestamp_[stream_index];
if ((stream_index == kPrimary) && (t != primary_encoder_.pacsize)) {
assert(false);
return -1;
}
if ((stream_index == kSecondary) && (t != secondary_encoder_.pacsize)) {
assert(false);
return -1;
}
} else {
received_payload_[stream_index] = true;
}
last_timestamp_[stream_index] = timestamp_dual_[stream_index][position];
}
} else {
if (fragmentation != NULL) {
assert(false);
return -1;
}
if (payload_type == primary_encoder_.pltype) {
stream_index = kPrimary;
} else if (payload_type == secondary_encoder_.pltype) {
stream_index = kSecondary;
} else {
assert(false);
return -1;
}
num_received_payloads_ref_[stream_index]++;
if (payload_ref_is_stored_[stream_index][0] == 0) {
position = 0;
} else if (payload_ref_is_stored_[stream_index][1] == 0) {
position = 1;
} else {
assert(false);
return -1;
}
timestamp_ref_[stream_index][position] = timestamp;
payload_len_ref_[stream_index][position] = payload_size;
memcpy(&payload_data_ref_[stream_index][position * MAX_PAYLOAD_SIZE_BYTE],
payload_data, payload_size);
payload_ref_is_stored_[stream_index][position] = 1;
}
return 0;
}
// Mono input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInputMonoPrimaryWb20Ms)) {
InitializeSender(20, 1, 16000);
Perform(true, 1);
}
// Mono input, stereo primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInput_StereoPrimaryWb20Ms)) {
InitializeSender(20, 2, 16000);
Perform(true, 1);
}
// Mono input, mono primary SWB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInputMonoPrimarySwb20Ms)) {
InitializeSender(20, 1, 32000);
Perform(true, 1);
}
// Mono input, stereo primary SWB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInputStereoPrimarySwb20Ms)) {
InitializeSender(20, 2, 32000);
Perform(true, 1);
}
// Mono input, mono primary WB 40 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInputMonoPrimaryWb40Ms)) {
InitializeSender(40, 1, 16000);
Perform(true, 1);
}
// Mono input, stereo primary WB 40 ms frame
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncMonoInputStereoPrimaryWb40Ms)) {
InitializeSender(40, 2, 16000);
Perform(true, 1);
}
// Stereo input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputMonoPrimaryWb20Ms)) {
InitializeSender(20, 1, 16000);
Perform(true, 2);
}
// Stereo input, stereo primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputStereoPrimaryWb20Ms)) {
InitializeSender(20, 2, 16000);
Perform(true, 2);
}
// Stereo input, mono primary SWB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputMonoPrimarySwb20Ms)) {
InitializeSender(20, 1, 32000);
Perform(true, 2);
}
// Stereo input, stereo primary SWB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputStereoPrimarySwb20Ms)) {
InitializeSender(20, 2, 32000);
Perform(true, 2);
}
// Stereo input, mono primary WB 40 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputMonoPrimaryWb40Ms)) {
InitializeSender(40, 1, 16000);
Perform(true, 2);
}
// Stereo input, stereo primary WB 40 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactSyncStereoInputStereoPrimaryWb40Ms)) {
InitializeSender(40, 2, 16000);
Perform(true, 2);
}
// Asynchronous test, ACM is fed with data then secondary coder is registered.
// Mono input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactAsyncMonoInputMonoPrimaryWb20Ms)) {
InitializeSender(20, 1, 16000);
Perform(false, 1);
}
// Mono input, mono primary WB 20 ms frame.
TEST_F(DualStreamTest,
DISABLED_ON_ANDROID(BitExactAsyncMonoInputMonoPrimaryWb40Ms)) {
InitializeSender(40, 1, 16000);
Perform(false, 1);
}
TEST_F(DualStreamTest, DISABLED_ON_ANDROID(Api)) {
PopulateCodecInstances(20, 1, 16000);
CodecInst my_codec;
ASSERT_EQ(0, acm_dual_stream_->InitializeSender());
ASSERT_EQ(-1, acm_dual_stream_->SecondarySendCodec(&my_codec));
// Not allowed to register secondary codec if primary is not registered yet.
ASSERT_EQ(-1,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
ASSERT_EQ(-1, acm_dual_stream_->SecondarySendCodec(&my_codec));
ASSERT_EQ(0, acm_dual_stream_->RegisterSendCodec(primary_encoder_));
ASSERT_EQ(0, acm_dual_stream_->SetVAD(true, true, VADNormal));
// Make sure vad is activated.
bool vad_status;
bool dtx_status;
ACMVADMode vad_mode;
EXPECT_EQ(0, acm_dual_stream_->VAD(&vad_status, &dtx_status, &vad_mode));
EXPECT_TRUE(vad_status);
EXPECT_TRUE(dtx_status);
EXPECT_EQ(VADNormal, vad_mode);
ASSERT_EQ(0,
acm_dual_stream_->RegisterSecondarySendCodec(secondary_encoder_));
ASSERT_EQ(0, acm_dual_stream_->SecondarySendCodec(&my_codec));
ASSERT_EQ(0, memcmp(&my_codec, &secondary_encoder_, sizeof(my_codec)));
// Test if VAD get disabled after registering secondary codec.
EXPECT_EQ(0, acm_dual_stream_->VAD(&vad_status, &dtx_status, &vad_mode));
EXPECT_FALSE(vad_status);
EXPECT_FALSE(dtx_status);
// Activating VAD should fail.
ASSERT_EQ(-1, acm_dual_stream_->SetVAD(true, true, VADNormal));
// Unregister secondary encoder and it should be possible to activate VAD.
acm_dual_stream_->UnregisterSecondarySendCodec();
// Should fail.
ASSERT_EQ(-1, acm_dual_stream_->SecondarySendCodec(&my_codec));
ASSERT_EQ(0, acm_dual_stream_->SetVAD(true, true, VADVeryAggr));
// Make sure VAD is activated.
EXPECT_EQ(0, acm_dual_stream_->VAD(&vad_status, &dtx_status, &vad_mode));
EXPECT_TRUE(vad_status);
EXPECT_TRUE(dtx_status);
EXPECT_EQ(VADVeryAggr, vad_mode);
}
} // namespace webrtc

View File

@@ -0,0 +1,395 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/test/iSACTest.h"
#include <ctype.h>
#include <stdio.h>
#include <string.h>
#if _WIN32
#include <windows.h>
#elif WEBRTC_LINUX
#include <time.h>
#else
#include <sys/time.h>
#include <time.h>
#endif
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/tick_util.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
void SetISACConfigDefault(ACMTestISACConfig& isacConfig) {
isacConfig.currentRateBitPerSec = 0;
isacConfig.currentFrameSizeMsec = 0;
isacConfig.maxRateBitPerSec = 0;
isacConfig.maxPayloadSizeByte = 0;
isacConfig.encodingMode = -1;
isacConfig.initRateBitPerSec = 0;
isacConfig.initFrameSizeInMsec = 0;
isacConfig.enforceFrameSize = false;
return;
}
int16_t SetISAConfig(ACMTestISACConfig& isacConfig, AudioCodingModule* acm,
int testMode) {
if ((isacConfig.currentRateBitPerSec != 0)
|| (isacConfig.currentFrameSizeMsec != 0)) {
CodecInst sendCodec;
EXPECT_EQ(0, acm->SendCodec(&sendCodec));
if (isacConfig.currentRateBitPerSec < 0) {
// Register iSAC in adaptive (channel-dependent) mode.
sendCodec.rate = -1;
EXPECT_EQ(0, acm->RegisterSendCodec(sendCodec));
} else {
if (isacConfig.currentRateBitPerSec != 0) {
sendCodec.rate = isacConfig.currentRateBitPerSec;
}
if (isacConfig.currentFrameSizeMsec != 0) {
sendCodec.pacsize = isacConfig.currentFrameSizeMsec
* (sendCodec.plfreq / 1000);
}
EXPECT_EQ(0, acm->RegisterSendCodec(sendCodec));
}
}
if (isacConfig.maxRateBitPerSec > 0) {
// Set max rate.
EXPECT_EQ(0, acm->SetISACMaxRate(isacConfig.maxRateBitPerSec));
}
if (isacConfig.maxPayloadSizeByte > 0) {
// Set max payload size.
EXPECT_EQ(0, acm->SetISACMaxPayloadSize(isacConfig.maxPayloadSizeByte));
}
if ((isacConfig.initFrameSizeInMsec != 0)
|| (isacConfig.initRateBitPerSec != 0)) {
EXPECT_EQ(0, acm->ConfigISACBandwidthEstimator(
static_cast<uint8_t>(isacConfig.initFrameSizeInMsec),
static_cast<uint16_t>(isacConfig.initRateBitPerSec),
isacConfig.enforceFrameSize));
}
return 0;
}
ISACTest::ISACTest(int testMode)
: _acmA(AudioCodingModule::Create(1)),
_acmB(AudioCodingModule::Create(2)),
_testMode(testMode) {}
ISACTest::~ISACTest() {}
void ISACTest::Setup() {
int codecCntr;
CodecInst codecParam;
for (codecCntr = 0; codecCntr < AudioCodingModule::NumberOfCodecs();
codecCntr++) {
EXPECT_EQ(0, AudioCodingModule::Codec(codecCntr, &codecParam));
if (!STR_CASE_CMP(codecParam.plname, "ISAC")
&& codecParam.plfreq == 16000) {
memcpy(&_paramISAC16kHz, &codecParam, sizeof(CodecInst));
_idISAC16kHz = codecCntr;
}
if (!STR_CASE_CMP(codecParam.plname, "ISAC")
&& codecParam.plfreq == 32000) {
memcpy(&_paramISAC32kHz, &codecParam, sizeof(CodecInst));
_idISAC32kHz = codecCntr;
}
}
// Register both iSAC-wb & iSAC-swb in both sides as receiver codecs.
EXPECT_EQ(0, _acmA->RegisterReceiveCodec(_paramISAC16kHz));
EXPECT_EQ(0, _acmA->RegisterReceiveCodec(_paramISAC32kHz));
EXPECT_EQ(0, _acmB->RegisterReceiveCodec(_paramISAC16kHz));
EXPECT_EQ(0, _acmB->RegisterReceiveCodec(_paramISAC32kHz));
//--- Set A-to-B channel
_channel_A2B.reset(new Channel);
EXPECT_EQ(0, _acmA->RegisterTransportCallback(_channel_A2B.get()));
_channel_A2B->RegisterReceiverACM(_acmB.get());
//--- Set B-to-A channel
_channel_B2A.reset(new Channel);
EXPECT_EQ(0, _acmB->RegisterTransportCallback(_channel_B2A.get()));
_channel_B2A->RegisterReceiverACM(_acmA.get());
file_name_swb_ = webrtc::test::ResourcePath("audio_coding/testfile32kHz",
"pcm");
EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC16kHz));
EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC32kHz));
_inFileA.Open(file_name_swb_, 32000, "rb");
std::string fileNameA = webrtc::test::OutputPath() + "testisac_a.pcm";
std::string fileNameB = webrtc::test::OutputPath() + "testisac_b.pcm";
_outFileA.Open(fileNameA, 32000, "wb");
_outFileB.Open(fileNameB, 32000, "wb");
while (!_inFileA.EndOfFile()) {
Run10ms();
}
CodecInst receiveCodec;
EXPECT_EQ(0, _acmA->ReceiveCodec(&receiveCodec));
EXPECT_EQ(0, _acmB->ReceiveCodec(&receiveCodec));
_inFileA.Close();
_outFileA.Close();
_outFileB.Close();
}
void ISACTest::Perform() {
Setup();
int16_t testNr = 0;
ACMTestISACConfig wbISACConfig;
ACMTestISACConfig swbISACConfig;
SetISACConfigDefault(wbISACConfig);
SetISACConfigDefault(swbISACConfig);
wbISACConfig.currentRateBitPerSec = -1;
swbISACConfig.currentRateBitPerSec = -1;
testNr++;
EncodeDecode(testNr, wbISACConfig, swbISACConfig);
if (_testMode != 0) {
SetISACConfigDefault(wbISACConfig);
SetISACConfigDefault(swbISACConfig);
wbISACConfig.currentRateBitPerSec = -1;
swbISACConfig.currentRateBitPerSec = -1;
wbISACConfig.initRateBitPerSec = 13000;
wbISACConfig.initFrameSizeInMsec = 60;
swbISACConfig.initRateBitPerSec = 20000;
swbISACConfig.initFrameSizeInMsec = 30;
testNr++;
EncodeDecode(testNr, wbISACConfig, swbISACConfig);
SetISACConfigDefault(wbISACConfig);
SetISACConfigDefault(swbISACConfig);
wbISACConfig.currentRateBitPerSec = 20000;
swbISACConfig.currentRateBitPerSec = 48000;
testNr++;
EncodeDecode(testNr, wbISACConfig, swbISACConfig);
wbISACConfig.currentRateBitPerSec = 16000;
swbISACConfig.currentRateBitPerSec = 30000;
wbISACConfig.currentFrameSizeMsec = 60;
testNr++;
EncodeDecode(testNr, wbISACConfig, swbISACConfig);
}
SetISACConfigDefault(wbISACConfig);
SetISACConfigDefault(swbISACConfig);
testNr++;
EncodeDecode(testNr, wbISACConfig, swbISACConfig);
int user_input;
if ((_testMode == 0) || (_testMode == 1)) {
swbISACConfig.maxPayloadSizeByte = static_cast<uint16_t>(200);
wbISACConfig.maxPayloadSizeByte = static_cast<uint16_t>(200);
} else {
printf("Enter the max payload-size for side A: ");
CHECK_ERROR(scanf("%d", &user_input));
swbISACConfig.maxPayloadSizeByte = (uint16_t) user_input;
printf("Enter the max payload-size for side B: ");
CHECK_ERROR(scanf("%d", &user_input));
wbISACConfig.maxPayloadSizeByte = (uint16_t) user_input;
}
testNr++;
EncodeDecode(testNr, wbISACConfig, swbISACConfig);
_acmA->ResetEncoder();
_acmB->ResetEncoder();
SetISACConfigDefault(wbISACConfig);
SetISACConfigDefault(swbISACConfig);
if ((_testMode == 0) || (_testMode == 1)) {
swbISACConfig.maxRateBitPerSec = static_cast<uint32_t>(48000);
wbISACConfig.maxRateBitPerSec = static_cast<uint32_t>(48000);
} else {
printf("Enter the max rate for side A: ");
CHECK_ERROR(scanf("%d", &user_input));
swbISACConfig.maxRateBitPerSec = (uint32_t) user_input;
printf("Enter the max rate for side B: ");
CHECK_ERROR(scanf("%d", &user_input));
wbISACConfig.maxRateBitPerSec = (uint32_t) user_input;
}
testNr++;
EncodeDecode(testNr, wbISACConfig, swbISACConfig);
testNr++;
if (_testMode == 0) {
SwitchingSamplingRate(testNr, 4);
} else {
SwitchingSamplingRate(testNr, 80);
}
}
void ISACTest::Run10ms() {
AudioFrame audioFrame;
EXPECT_GT(_inFileA.Read10MsData(audioFrame), 0);
EXPECT_EQ(0, _acmA->Add10MsData(audioFrame));
EXPECT_EQ(0, _acmB->Add10MsData(audioFrame));
EXPECT_GT(_acmA->Process(), -1);
EXPECT_GT(_acmB->Process(), -1);
EXPECT_EQ(0, _acmA->PlayoutData10Ms(32000, &audioFrame));
_outFileA.Write10MsData(audioFrame);
EXPECT_EQ(0, _acmB->PlayoutData10Ms(32000, &audioFrame));
_outFileB.Write10MsData(audioFrame);
}
void ISACTest::EncodeDecode(int testNr, ACMTestISACConfig& wbISACConfig,
ACMTestISACConfig& swbISACConfig) {
// Files in Side A and B
_inFileA.Open(file_name_swb_, 32000, "rb", true);
_inFileB.Open(file_name_swb_, 32000, "rb", true);
std::string file_name_out;
std::stringstream file_stream_a;
std::stringstream file_stream_b;
file_stream_a << webrtc::test::OutputPath();
file_stream_b << webrtc::test::OutputPath();
file_stream_a << "out_iSACTest_A_" << testNr << ".pcm";
file_stream_b << "out_iSACTest_B_" << testNr << ".pcm";
file_name_out = file_stream_a.str();
_outFileA.Open(file_name_out, 32000, "wb");
file_name_out = file_stream_b.str();
_outFileB.Open(file_name_out, 32000, "wb");
EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC16kHz));
EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC32kHz));
EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC32kHz));
EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC16kHz));
// Side A is sending super-wideband, and side B is sending wideband.
SetISAConfig(swbISACConfig, _acmA.get(), _testMode);
SetISAConfig(wbISACConfig, _acmB.get(), _testMode);
bool adaptiveMode = false;
if ((swbISACConfig.currentRateBitPerSec == -1)
|| (wbISACConfig.currentRateBitPerSec == -1)) {
adaptiveMode = true;
}
_myTimer.Reset();
_channel_A2B->ResetStats();
_channel_B2A->ResetStats();
char currentTime[500];
CodecInst sendCodec;
EventWrapper* myEvent = EventWrapper::Create();
EXPECT_TRUE(myEvent->StartTimer(true, 10));
while (!(_inFileA.EndOfFile() || _inFileA.Rewinded())) {
Run10ms();
_myTimer.Tick10ms();
_myTimer.CurrentTimeHMS(currentTime);
if ((adaptiveMode) && (_testMode != 0)) {
myEvent->Wait(5000);
EXPECT_EQ(0, _acmA->SendCodec(&sendCodec));
EXPECT_EQ(0, _acmB->SendCodec(&sendCodec));
}
}
if (_testMode != 0) {
printf("\n\nSide A statistics\n\n");
_channel_A2B->PrintStats(_paramISAC32kHz);
printf("\n\nSide B statistics\n\n");
_channel_B2A->PrintStats(_paramISAC16kHz);
}
_channel_A2B->ResetStats();
_channel_B2A->ResetStats();
_outFileA.Close();
_outFileB.Close();
_inFileA.Close();
_inFileB.Close();
}
void ISACTest::SwitchingSamplingRate(int testNr, int maxSampRateChange) {
// Files in Side A
_inFileA.Open(file_name_swb_, 32000, "rb");
_inFileB.Open(file_name_swb_, 32000, "rb");
std::string file_name_out;
std::stringstream file_stream_a;
std::stringstream file_stream_b;
file_stream_a << webrtc::test::OutputPath();
file_stream_b << webrtc::test::OutputPath();
file_stream_a << "out_iSACTest_A_" << testNr << ".pcm";
file_stream_b << "out_iSACTest_B_" << testNr << ".pcm";
file_name_out = file_stream_a.str();
_outFileA.Open(file_name_out, 32000, "wb");
file_name_out = file_stream_b.str();
_outFileB.Open(file_name_out, 32000, "wb");
// Start with side A sending super-wideband and side B seding wideband.
// Toggle sending wideband/super-wideband in this test.
EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC32kHz));
EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC16kHz));
int numSendCodecChanged = 0;
_myTimer.Reset();
char currentTime[50];
while (numSendCodecChanged < (maxSampRateChange << 1)) {
Run10ms();
_myTimer.Tick10ms();
_myTimer.CurrentTimeHMS(currentTime);
if (_testMode == 2)
printf("\r%s", currentTime);
if (_inFileA.EndOfFile()) {
if (_inFileA.SamplingFrequency() == 16000) {
// Switch side A to send super-wideband.
_inFileA.Close();
_inFileA.Open(file_name_swb_, 32000, "rb");
EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC32kHz));
} else {
// Switch side A to send wideband.
_inFileA.Close();
_inFileA.Open(file_name_swb_, 32000, "rb");
EXPECT_EQ(0, _acmA->RegisterSendCodec(_paramISAC16kHz));
}
numSendCodecChanged++;
}
if (_inFileB.EndOfFile()) {
if (_inFileB.SamplingFrequency() == 16000) {
// Switch side B to send super-wideband.
_inFileB.Close();
_inFileB.Open(file_name_swb_, 32000, "rb");
EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC32kHz));
} else {
// Switch side B to send wideband.
_inFileB.Close();
_inFileB.Open(file_name_swb_, 32000, "rb");
EXPECT_EQ(0, _acmB->RegisterSendCodec(_paramISAC16kHz));
}
numSendCodecChanged++;
}
}
_outFileA.Close();
_outFileB.Close();
_inFileA.Close();
_inFileB.Close();
}
} // namespace webrtc

View File

@@ -0,0 +1,81 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ISACTEST_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ISACTEST_H_
#include <string.h>
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#define MAX_FILE_NAME_LENGTH_BYTE 500
#define NO_OF_CLIENTS 15
namespace webrtc {
struct ACMTestISACConfig {
int32_t currentRateBitPerSec;
int16_t currentFrameSizeMsec;
uint32_t maxRateBitPerSec;
int16_t maxPayloadSizeByte;
int16_t encodingMode;
uint32_t initRateBitPerSec;
int16_t initFrameSizeInMsec;
bool enforceFrameSize;
};
class ISACTest : public ACMTest {
public:
explicit ISACTest(int testMode);
~ISACTest();
void Perform();
private:
void Setup();
void Run10ms();
void EncodeDecode(int testNr, ACMTestISACConfig& wbISACConfig,
ACMTestISACConfig& swbISACConfig);
void SwitchingSamplingRate(int testNr, int maxSampRateChange);
scoped_ptr<AudioCodingModule> _acmA;
scoped_ptr<AudioCodingModule> _acmB;
scoped_ptr<Channel> _channel_A2B;
scoped_ptr<Channel> _channel_B2A;
PCMFile _inFileA;
PCMFile _inFileB;
PCMFile _outFileA;
PCMFile _outFileB;
uint8_t _idISAC16kHz;
uint8_t _idISAC32kHz;
CodecInst _paramISAC16kHz;
CodecInst _paramISAC32kHz;
std::string file_name_swb_;
ACMTestTimer _myTimer;
int _testMode;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_ISACTEST_H_

View File

@@ -0,0 +1,176 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include <assert.h>
#include <math.h>
#include <iostream>
#include "gtest/gtest.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
namespace {
double FrameRms(AudioFrame& frame) {
int samples = frame.num_channels_ * frame.samples_per_channel_;
double rms = 0;
for (int n = 0; n < samples; ++n)
rms += frame.data_[n] * frame.data_[n];
rms /= samples;
rms = sqrt(rms);
return rms;
}
}
class InitialPlayoutDelayTest : public ::testing::Test {
protected:
InitialPlayoutDelayTest()
: acm_a_(AudioCodingModule::Create(0)),
acm_b_(AudioCodingModule::Create(1)),
channel_a2b_(NULL) {}
~InitialPlayoutDelayTest() {
if (channel_a2b_ != NULL) {
delete channel_a2b_;
channel_a2b_ = NULL;
}
}
void SetUp() {
ASSERT_TRUE(acm_a_.get() != NULL);
ASSERT_TRUE(acm_b_.get() != NULL);
EXPECT_EQ(0, acm_b_->InitializeReceiver());
EXPECT_EQ(0, acm_a_->InitializeReceiver());
// Register all L16 codecs in receiver.
CodecInst codec;
const int kFsHz[3] = { 8000, 16000, 32000 };
const int kChannels[2] = { 1, 2 };
for (int n = 0; n < 3; ++n) {
for (int k = 0; k < 2; ++k) {
AudioCodingModule::Codec("L16", &codec, kFsHz[n], kChannels[k]);
acm_b_->RegisterReceiveCodec(codec);
}
}
// Create and connect the channel
channel_a2b_ = new Channel;
acm_a_->RegisterTransportCallback(channel_a2b_);
channel_a2b_->RegisterReceiverACM(acm_b_.get());
}
void NbMono() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 8000, 1);
codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
Run(codec, 1000);
}
void WbMono() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 16000, 1);
codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
Run(codec, 1000);
}
void SwbMono() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 32000, 1);
codec.pacsize = codec.plfreq * 10 / 1000; // 10 ms packets.
Run(codec, 400); // Memory constraints limit the buffer at <500 ms.
}
void NbStereo() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 8000, 2);
codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
Run(codec, 1000);
}
void WbStereo() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 16000, 2);
codec.pacsize = codec.plfreq * 30 / 1000; // 30 ms packets.
Run(codec, 1000);
}
void SwbStereo() {
CodecInst codec;
AudioCodingModule::Codec("L16", &codec, 32000, 2);
codec.pacsize = codec.plfreq * 10 / 1000; // 10 ms packets.
Run(codec, 400); // Memory constraints limit the buffer at <500 ms.
}
private:
void Run(CodecInst codec, int initial_delay_ms) {
AudioFrame in_audio_frame;
AudioFrame out_audio_frame;
int num_frames = 0;
const int kAmp = 10000;
in_audio_frame.sample_rate_hz_ = codec.plfreq;
in_audio_frame.num_channels_ = codec.channels;
in_audio_frame.samples_per_channel_ = codec.plfreq / 100; // 10 ms.
int samples = in_audio_frame.num_channels_ *
in_audio_frame.samples_per_channel_;
for (int n = 0; n < samples; ++n) {
in_audio_frame.data_[n] = kAmp;
}
uint32_t timestamp = 0;
double rms = 0;
ASSERT_EQ(0, acm_a_->RegisterSendCodec(codec));
acm_b_->SetInitialPlayoutDelay(initial_delay_ms);
while (rms < kAmp / 2) {
in_audio_frame.timestamp_ = timestamp;
timestamp += in_audio_frame.samples_per_channel_;
ASSERT_EQ(0, acm_a_->Add10MsData(in_audio_frame));
ASSERT_LE(0, acm_a_->Process());
ASSERT_EQ(0, acm_b_->PlayoutData10Ms(codec.plfreq, &out_audio_frame));
rms = FrameRms(out_audio_frame);
++num_frames;
}
ASSERT_GE(num_frames * 10, initial_delay_ms);
ASSERT_LE(num_frames * 10, initial_delay_ms + 100);
}
scoped_ptr<AudioCodingModule> acm_a_;
scoped_ptr<AudioCodingModule> acm_b_;
Channel* channel_a2b_;
};
TEST_F(InitialPlayoutDelayTest, NbMono) { NbMono(); }
TEST_F(InitialPlayoutDelayTest, WbMono) { WbMono(); }
TEST_F(InitialPlayoutDelayTest, SwbMono) { SwbMono(); }
TEST_F(InitialPlayoutDelayTest, NbStereo) { NbStereo(); }
TEST_F(InitialPlayoutDelayTest, WbStereo) { WbStereo(); }
TEST_F(InitialPlayoutDelayTest, SwbStereo) { SwbStereo(); }
} // namespace webrtc

View File

@@ -0,0 +1,315 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <stdio.h>
#include "gflags/gflags.h"
#include "gtest/gtest.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/clock.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/test/testsupport/fileutils.h"
// Codec.
DEFINE_string(codec, "opus", "Codec Name");
DEFINE_int32(codec_sample_rate_hz, 48000, "Sampling rate in Hertz.");
DEFINE_int32(codec_channels, 1, "Number of channels of the codec.");
// PCM input/output.
DEFINE_string(input, "", "Input PCM file at 16 kHz.");
DEFINE_bool(input_stereo, false, "Input is stereo.");
DEFINE_int32(input_fs_hz, 32000, "Input sample rate Hz.");
DEFINE_string(output, "insert_rtp_with_timing_out.pcm", "OutputFile");
DEFINE_int32(output_fs_hz, 32000, "Output sample rate Hz");
// Timing files
DEFINE_string(seq_num, "seq_num", "Sequence number file.");
DEFINE_string(send_ts, "send_timestamp", "Send timestamp file.");
DEFINE_string(receive_ts, "last_rec_timestamp", "Receive timestamp file");
// Delay logging
DEFINE_string(delay, "", "Log for delay.");
// Other setups
DEFINE_int32(init_delay, 0, "Initial delay.");
DEFINE_bool(verbose, false, "Verbosity.");
DEFINE_double(loss_rate, 0, "Rate of packet loss < 1");
const int32_t kAudioPlayedOut = 0x00000001;
const int32_t kPacketPushedIn = 0x00000001 << 1;
const int kPlayoutPeriodMs = 10;
namespace webrtc {
class InsertPacketWithTiming {
public:
InsertPacketWithTiming()
: sender_clock_(new SimulatedClock(0)),
receiver_clock_(new SimulatedClock(0)),
send_acm_(AudioCodingModule::Create(0, sender_clock_)),
receive_acm_(AudioCodingModule::Create(0, receiver_clock_)),
channel_(new Channel),
seq_num_fid_(fopen(FLAGS_seq_num.c_str(), "rt")),
send_ts_fid_(fopen(FLAGS_send_ts.c_str(), "rt")),
receive_ts_fid_(fopen(FLAGS_receive_ts.c_str(), "rt")),
pcm_out_fid_(fopen(FLAGS_output.c_str(), "wb")),
samples_in_1ms_(48),
num_10ms_in_codec_frame_(2), // Typical 20 ms frames.
time_to_insert_packet_ms_(3), // An arbitrary offset on pushing packet.
next_receive_ts_(0),
time_to_playout_audio_ms_(kPlayoutPeriodMs),
loss_threshold_(0),
playout_timing_fid_(fopen("playout_timing.txt", "wt")) {}
void SetUp() {
ASSERT_TRUE(sender_clock_ != NULL);
ASSERT_TRUE(receiver_clock_ != NULL);
ASSERT_TRUE(send_acm_.get() != NULL);
ASSERT_TRUE(receive_acm_.get() != NULL);
ASSERT_TRUE(channel_ != NULL);
ASSERT_TRUE(seq_num_fid_ != NULL);
ASSERT_TRUE(send_ts_fid_ != NULL);
ASSERT_TRUE(receive_ts_fid_ != NULL);
ASSERT_TRUE(playout_timing_fid_ != NULL);
next_receive_ts_ = ReceiveTimestamp();
CodecInst codec;
ASSERT_EQ(0, AudioCodingModule::Codec(FLAGS_codec.c_str(), &codec,
FLAGS_codec_sample_rate_hz,
FLAGS_codec_channels));
ASSERT_EQ(0, receive_acm_->InitializeReceiver());
ASSERT_EQ(0, send_acm_->RegisterSendCodec(codec));
ASSERT_EQ(0, receive_acm_->RegisterReceiveCodec(codec));
// Set codec-dependent parameters.
samples_in_1ms_ = codec.plfreq / 1000;
num_10ms_in_codec_frame_ = codec.pacsize / (codec.plfreq / 100);
channel_->RegisterReceiverACM(receive_acm_.get());
send_acm_->RegisterTransportCallback(channel_);
if (FLAGS_input.size() == 0) {
std::string file_name = test::ResourcePath("audio_coding/testfile32kHz",
"pcm");
pcm_in_fid_.Open(file_name, 32000, "r", true); // auto-rewind
std::cout << "Input file " << file_name << " 32 kHz mono." << std::endl;
} else {
pcm_in_fid_.Open(FLAGS_input, static_cast<uint16_t>(FLAGS_input_fs_hz),
"r", true); // auto-rewind
std::cout << "Input file " << FLAGS_input << "at " << FLAGS_input_fs_hz
<< " Hz in " << ((FLAGS_input_stereo) ? "stereo." : "mono.")
<< std::endl;
pcm_in_fid_.ReadStereo(FLAGS_input_stereo);
}
ASSERT_TRUE(pcm_out_fid_ != NULL);
std::cout << "Output file " << FLAGS_output << " at " << FLAGS_output_fs_hz
<< " Hz." << std::endl;
// Other setups
if (FLAGS_init_delay > 0)
EXPECT_EQ(0, receive_acm_->SetInitialPlayoutDelay(FLAGS_init_delay));
if (FLAGS_loss_rate > 0)
loss_threshold_ = RAND_MAX * FLAGS_loss_rate;
else
loss_threshold_ = 0;
}
void TickOneMillisecond(uint32_t* action) {
// One millisecond passed.
time_to_insert_packet_ms_--;
time_to_playout_audio_ms_--;
sender_clock_->AdvanceTimeMilliseconds(1);
receiver_clock_->AdvanceTimeMilliseconds(1);
// Reset action.
*action = 0;
// Is it time to pull audio?
if (time_to_playout_audio_ms_ == 0) {
time_to_playout_audio_ms_ = kPlayoutPeriodMs;
receive_acm_->PlayoutData10Ms(static_cast<int>(FLAGS_output_fs_hz),
&frame_);
fwrite(frame_.data_, sizeof(frame_.data_[0]),
frame_.samples_per_channel_ * frame_.num_channels_, pcm_out_fid_);
*action |= kAudioPlayedOut;
}
// Is it time to push in next packet?
if (time_to_insert_packet_ms_ <= .5) {
*action |= kPacketPushedIn;
// Update time-to-insert packet.
uint32_t t = next_receive_ts_;
next_receive_ts_ = ReceiveTimestamp();
time_to_insert_packet_ms_ += static_cast<float>(next_receive_ts_ - t) /
samples_in_1ms_;
// Push in just enough audio.
for (int n = 0; n < num_10ms_in_codec_frame_; n++) {
pcm_in_fid_.Read10MsData(frame_);
EXPECT_EQ(0, send_acm_->Add10MsData(frame_));
}
// Set the parameters for the packet to be pushed in receiver ACM right
// now.
uint32_t ts = SendTimestamp();
int seq_num = SequenceNumber();
bool lost = false;
channel_->set_send_timestamp(ts);
channel_->set_sequence_number(seq_num);
if (loss_threshold_ > 0 && rand() < loss_threshold_) {
channel_->set_num_packets_to_drop(1);
lost = true;
}
// Process audio in send ACM, this should result in generation of a
// packet.
EXPECT_GT(send_acm_->Process(), 0);
if (FLAGS_verbose) {
if (!lost) {
std::cout << "\nInserting packet number " << seq_num
<< " timestamp " << ts << std::endl;
} else {
std::cout << "\nLost packet number " << seq_num
<< " timestamp " << ts << std::endl;
}
}
}
}
void TearDown() {
delete channel_;
fclose(seq_num_fid_);
fclose(send_ts_fid_);
fclose(receive_ts_fid_);
fclose(pcm_out_fid_);
pcm_in_fid_.Close();
}
~InsertPacketWithTiming() {
delete sender_clock_;
delete receiver_clock_;
}
// Are there more info to simulate.
bool HasPackets() {
if (feof(seq_num_fid_) || feof(send_ts_fid_) || feof(receive_ts_fid_))
return false;
return true;
}
// Jitter buffer delay.
void Delay(int* optimal_delay, int* current_delay) {
ACMNetworkStatistics statistics;
receive_acm_->NetworkStatistics(&statistics);
*optimal_delay = statistics.preferredBufferSize;
*current_delay = statistics.currentBufferSize;
}
private:
uint32_t SendTimestamp() {
uint32_t t;
EXPECT_EQ(1, fscanf(send_ts_fid_, "%u\n", &t));
return t;
}
uint32_t ReceiveTimestamp() {
uint32_t t;
EXPECT_EQ(1, fscanf(receive_ts_fid_, "%u\n", &t));
return t;
}
int SequenceNumber() {
int n;
EXPECT_EQ(1, fscanf(seq_num_fid_, "%d\n", &n));
return n;
}
// This class just creates these pointers, not deleting them. They are deleted
// by the associated ACM.
SimulatedClock* sender_clock_;
SimulatedClock* receiver_clock_;
scoped_ptr<AudioCodingModule> send_acm_;
scoped_ptr<AudioCodingModule> receive_acm_;
Channel* channel_;
FILE* seq_num_fid_; // Input (text), one sequence number per line.
FILE* send_ts_fid_; // Input (text), one send timestamp per line.
FILE* receive_ts_fid_; // Input (text), one receive timestamp per line.
FILE* pcm_out_fid_; // Output PCM16.
PCMFile pcm_in_fid_; // Input PCM16.
int samples_in_1ms_;
// TODO(turajs): this can be computed from the send timestamp, but there is
// some complication to account for lost and reordered packets.
int num_10ms_in_codec_frame_;
float time_to_insert_packet_ms_;
uint32_t next_receive_ts_;
uint32_t time_to_playout_audio_ms_;
AudioFrame frame_;
double loss_threshold_;
// Output (text), sequence number, playout timestamp, time (ms) of playout,
// per line.
FILE* playout_timing_fid_;
};
} // webrtc
int main(int argc, char* argv[]) {
google::ParseCommandLineFlags(&argc, &argv, true);
webrtc::InsertPacketWithTiming test;
test.SetUp();
FILE* delay_log = NULL;
if (FLAGS_delay.size() > 0) {
delay_log = fopen(FLAGS_delay.c_str(), "wt");
if (delay_log == NULL) {
std::cout << "Cannot open the file to log delay values." << std::endl;
exit(1);
}
}
uint32_t action_taken;
int optimal_delay_ms;
int current_delay_ms;
while (test.HasPackets()) {
test.TickOneMillisecond(&action_taken);
if (action_taken != 0) {
test.Delay(&optimal_delay_ms, &current_delay_ms);
if (delay_log != NULL) {
fprintf(delay_log, "%3d %3d\n", optimal_delay_ms, current_delay_ms);
}
}
}
std::cout << std::endl;
test.TearDown();
if (delay_log != NULL)
fclose(delay_log);
}

View File

@@ -0,0 +1,387 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/audio_coding/main/test/opus_test.h"
#include <assert.h>
#include <string>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common_types.h"
#include "webrtc/engine_configurations.h"
#include "webrtc/modules/audio_coding/codecs/opus/interface/opus_interface.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module_typedefs.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_codec_database.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_opus.h"
#include "webrtc/modules/audio_coding/main/test/TestStereo.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/system_wrappers/interface/trace.h"
#include "webrtc/test/testsupport/fileutils.h"
namespace webrtc {
OpusTest::OpusTest()
: acm_receiver_(AudioCodingModule::Create(0)),
channel_a2b_(NULL),
counter_(0),
payload_type_(255),
rtp_timestamp_(0) {}
OpusTest::~OpusTest() {
if (channel_a2b_ != NULL) {
delete channel_a2b_;
channel_a2b_ = NULL;
}
if (opus_mono_encoder_ != NULL) {
WebRtcOpus_EncoderFree(opus_mono_encoder_);
opus_mono_encoder_ = NULL;
}
if (opus_stereo_encoder_ != NULL) {
WebRtcOpus_EncoderFree(opus_stereo_encoder_);
opus_stereo_encoder_ = NULL;
}
if (opus_mono_decoder_ != NULL) {
WebRtcOpus_DecoderFree(opus_mono_decoder_);
opus_mono_decoder_ = NULL;
}
if (opus_stereo_decoder_ != NULL) {
WebRtcOpus_DecoderFree(opus_stereo_decoder_);
opus_stereo_decoder_ = NULL;
}
}
void OpusTest::Perform() {
#ifndef WEBRTC_CODEC_OPUS
// Opus isn't defined, exit.
return;
#else
uint16_t frequency_hz;
int audio_channels;
int16_t test_cntr = 0;
// Open both mono and stereo test files in 32 kHz.
const std::string file_name_stereo =
webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm");
const std::string file_name_mono =
webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm");
frequency_hz = 32000;
in_file_stereo_.Open(file_name_stereo, frequency_hz, "rb");
in_file_stereo_.ReadStereo(true);
in_file_mono_.Open(file_name_mono, frequency_hz, "rb");
in_file_mono_.ReadStereo(false);
// Create Opus encoders for mono and stereo.
ASSERT_GT(WebRtcOpus_EncoderCreate(&opus_mono_encoder_, 1), -1);
ASSERT_GT(WebRtcOpus_EncoderCreate(&opus_stereo_encoder_, 2), -1);
// Create Opus decoders for mono and stereo for stand-alone testing of Opus.
ASSERT_GT(WebRtcOpus_DecoderCreate(&opus_mono_decoder_, 1), -1);
ASSERT_GT(WebRtcOpus_DecoderCreate(&opus_stereo_decoder_, 2), -1);
ASSERT_GT(WebRtcOpus_DecoderInitNew(opus_mono_decoder_), -1);
ASSERT_GT(WebRtcOpus_DecoderInitNew(opus_stereo_decoder_), -1);
ASSERT_TRUE(acm_receiver_.get() != NULL);
EXPECT_EQ(0, acm_receiver_->InitializeReceiver());
// Register Opus stereo as receiving codec.
CodecInst opus_codec_param;
int codec_id = acm_receiver_->Codec("opus", 48000, 2);
EXPECT_EQ(0, acm_receiver_->Codec(codec_id, &opus_codec_param));
payload_type_ = opus_codec_param.pltype;
EXPECT_EQ(0, acm_receiver_->RegisterReceiveCodec(opus_codec_param));
// Create and connect the channel.
channel_a2b_ = new TestPackStereo;
channel_a2b_->RegisterReceiverACM(acm_receiver_.get());
//
// Test Stereo.
//
channel_a2b_->set_codec_mode(kStereo);
audio_channels = 2;
test_cntr++;
OpenOutFile(test_cntr);
// Run Opus with 2.5 ms frame size.
Run(channel_a2b_, audio_channels, 64000, 120);
// Run Opus with 5 ms frame size.
Run(channel_a2b_, audio_channels, 64000, 240);
// Run Opus with 10 ms frame size.
Run(channel_a2b_, audio_channels, 64000, 480);
// Run Opus with 20 ms frame size.
Run(channel_a2b_, audio_channels, 64000, 960);
// Run Opus with 40 ms frame size.
Run(channel_a2b_, audio_channels, 64000, 1920);
// Run Opus with 60 ms frame size.
Run(channel_a2b_, audio_channels, 64000, 2880);
out_file_.Close();
out_file_standalone_.Close();
//
// Test Opus stereo with packet-losses.
//
test_cntr++;
OpenOutFile(test_cntr);
// Run Opus with 20 ms frame size, 1% packet loss.
Run(channel_a2b_, audio_channels, 64000, 960, 1);
// Run Opus with 20 ms frame size, 5% packet loss.
Run(channel_a2b_, audio_channels, 64000, 960, 5);
// Run Opus with 20 ms frame size, 10% packet loss.
Run(channel_a2b_, audio_channels, 64000, 960, 10);
out_file_.Close();
out_file_standalone_.Close();
//
// Test Mono.
//
channel_a2b_->set_codec_mode(kMono);
audio_channels = 1;
test_cntr++;
OpenOutFile(test_cntr);
// Register Opus mono as receiving codec.
opus_codec_param.channels = 1;
EXPECT_EQ(0, acm_receiver_->RegisterReceiveCodec(opus_codec_param));
// Run Opus with 2.5 ms frame size.
Run(channel_a2b_, audio_channels, 32000, 120);
// Run Opus with 5 ms frame size.
Run(channel_a2b_, audio_channels, 32000, 240);
// Run Opus with 10 ms frame size.
Run(channel_a2b_, audio_channels, 32000, 480);
// Run Opus with 20 ms frame size.
Run(channel_a2b_, audio_channels, 32000, 960);
// Run Opus with 40 ms frame size.
Run(channel_a2b_, audio_channels, 32000, 1920);
// Run Opus with 60 ms frame size.
Run(channel_a2b_, audio_channels, 32000, 2880);
out_file_.Close();
out_file_standalone_.Close();
//
// Test Opus mono with packet-losses.
//
test_cntr++;
OpenOutFile(test_cntr);
// Run Opus with 20 ms frame size, 1% packet loss.
Run(channel_a2b_, audio_channels, 64000, 960, 1);
// Run Opus with 20 ms frame size, 5% packet loss.
Run(channel_a2b_, audio_channels, 64000, 960, 5);
// Run Opus with 20 ms frame size, 10% packet loss.
Run(channel_a2b_, audio_channels, 64000, 960, 10);
// Close the files.
in_file_stereo_.Close();
in_file_mono_.Close();
out_file_.Close();
out_file_standalone_.Close();
#endif
}
void OpusTest::Run(TestPackStereo* channel, int channels, int bitrate,
int frame_length, int percent_loss) {
AudioFrame audio_frame;
int32_t out_freq_hz_b = out_file_.SamplingFrequency();
const int kBufferSizeSamples = 480 * 12 * 2; // Can hold 120 ms stereo audio.
int16_t audio[kBufferSizeSamples];
int16_t out_audio[kBufferSizeSamples];
int16_t audio_type;
int written_samples = 0;
int read_samples = 0;
int decoded_samples = 0;
bool first_packet = true;
uint32_t start_time_stamp = 0;
channel->reset_payload_size();
counter_ = 0;
// Set encoder rate.
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_mono_encoder_, bitrate));
EXPECT_EQ(0, WebRtcOpus_SetBitRate(opus_stereo_encoder_, bitrate));
#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) || defined(WEBRTC_ARCH_ARM)
// If we are on Android, iOS and/or ARM, use a lower complexity setting as
// default.
const int kOpusComplexity5 = 5;
EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_mono_encoder_, kOpusComplexity5));
EXPECT_EQ(0, WebRtcOpus_SetComplexity(opus_stereo_encoder_,
kOpusComplexity5));
#endif
// Make sure the runtime is less than 60 seconds to pass Android test.
for (size_t audio_length = 0; audio_length < 10000; audio_length += 10) {
bool lost_packet = false;
// Get 10 msec of audio.
if (channels == 1) {
if (in_file_mono_.EndOfFile()) {
break;
}
in_file_mono_.Read10MsData(audio_frame);
} else {
if (in_file_stereo_.EndOfFile()) {
break;
}
in_file_stereo_.Read10MsData(audio_frame);
}
// If input audio is sampled at 32 kHz, resampling to 48 kHz is required.
EXPECT_EQ(480,
resampler_.Resample10Msec(audio_frame.data_,
audio_frame.sample_rate_hz_,
48000,
channels,
kBufferSizeSamples - written_samples,
&audio[written_samples]));
written_samples += 480 * channels;
// Sometimes we need to loop over the audio vector to produce the right
// number of packets.
int loop_encode = (written_samples - read_samples) /
(channels * frame_length);
if (loop_encode > 0) {
const int kMaxBytes = 1000; // Maximum number of bytes for one packet.
int16_t bitstream_len_byte;
uint8_t bitstream[kMaxBytes];
for (int i = 0; i < loop_encode; i++) {
if (channels == 1) {
bitstream_len_byte = WebRtcOpus_Encode(
opus_mono_encoder_, &audio[read_samples],
frame_length, kMaxBytes, bitstream);
ASSERT_GT(bitstream_len_byte, -1);
} else {
bitstream_len_byte = WebRtcOpus_Encode(
opus_stereo_encoder_, &audio[read_samples],
frame_length, kMaxBytes, bitstream);
ASSERT_GT(bitstream_len_byte, -1);
}
// Simulate packet loss by setting |packet_loss_| to "true" in
// |percent_loss| percent of the loops.
// TODO(tlegrand): Move handling of loss simulation to TestPackStereo.
if (percent_loss > 0) {
if (counter_ == floor((100 / percent_loss) + 0.5)) {
counter_ = 0;
lost_packet = true;
channel->set_lost_packet(true);
} else {
lost_packet = false;
channel->set_lost_packet(false);
}
counter_++;
}
// Run stand-alone Opus decoder, or decode PLC.
if (channels == 1) {
if (!lost_packet) {
decoded_samples += WebRtcOpus_DecodeNew(
opus_mono_decoder_, bitstream, bitstream_len_byte,
&out_audio[decoded_samples * channels], &audio_type);
} else {
decoded_samples += WebRtcOpus_DecodePlc(
opus_mono_decoder_, &out_audio[decoded_samples * channels], 1);
}
} else {
if (!lost_packet) {
decoded_samples += WebRtcOpus_DecodeNew(
opus_stereo_decoder_, bitstream, bitstream_len_byte,
&out_audio[decoded_samples * channels], &audio_type);
} else {
decoded_samples += WebRtcOpus_DecodePlc(
opus_stereo_decoder_, &out_audio[decoded_samples * channels],
1);
}
}
// Send data to the channel. "channel" will handle the loss simulation.
channel->SendData(kAudioFrameSpeech, payload_type_, rtp_timestamp_,
bitstream, bitstream_len_byte, NULL);
if (first_packet) {
first_packet = false;
start_time_stamp = rtp_timestamp_;
}
rtp_timestamp_ += frame_length;
read_samples += frame_length * channels;
}
if (read_samples == written_samples) {
read_samples = 0;
written_samples = 0;
}
}
// Run received side of ACM.
ASSERT_EQ(0, acm_receiver_->PlayoutData10Ms(out_freq_hz_b, &audio_frame));
// Write output speech to file.
out_file_.Write10MsData(
audio_frame.data_,
audio_frame.samples_per_channel_ * audio_frame.num_channels_);
// Write stand-alone speech to file.
out_file_standalone_.Write10MsData(out_audio, decoded_samples * channels);
if (audio_frame.timestamp_ > start_time_stamp) {
// Number of channels should be the same for both stand-alone and
// ACM-decoding.
EXPECT_EQ(audio_frame.num_channels_, channels);
}
decoded_samples = 0;
}
if (in_file_mono_.EndOfFile()) {
in_file_mono_.Rewind();
}
if (in_file_stereo_.EndOfFile()) {
in_file_stereo_.Rewind();
}
// Reset in case we ended with a lost packet.
channel->set_lost_packet(false);
}
void OpusTest::OpenOutFile(int test_number) {
std::string file_name;
std::stringstream file_stream;
file_stream << webrtc::test::OutputPath() << "opustest_out_"
<< test_number << ".pcm";
file_name = file_stream.str();
out_file_.Open(file_name, 48000, "wb");
file_stream.str("");
file_name = file_stream.str();
file_stream << webrtc::test::OutputPath() << "opusstandalone_out_"
<< test_number << ".pcm";
file_name = file_stream.str();
out_file_standalone_.Open(file_name, 48000, "wb");
}
} // namespace webrtc

View File

@@ -0,0 +1,57 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_OPUS_TEST_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_OPUS_TEST_H_
#include <math.h>
#include "webrtc/modules/audio_coding/main/acm2/acm_opus.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_resampler.h"
#include "webrtc/modules/audio_coding/main/test/ACMTest.h"
#include "webrtc/modules/audio_coding/main/test/Channel.h"
#include "webrtc/modules/audio_coding/main/test/PCMFile.h"
#include "webrtc/modules/audio_coding/main/test/TestStereo.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
namespace webrtc {
class OpusTest : public ACMTest {
public:
OpusTest();
~OpusTest();
void Perform();
private:
void Run(TestPackStereo* channel, int channels, int bitrate, int frame_length,
int percent_loss = 0);
void OpenOutFile(int test_number);
scoped_ptr<AudioCodingModule> acm_receiver_;
TestPackStereo* channel_a2b_;
PCMFile in_file_stereo_;
PCMFile in_file_mono_;
PCMFile out_file_;
PCMFile out_file_standalone_;
int counter_;
uint8_t payload_type_;
int rtp_timestamp_;
acm2::ACMResampler resampler_;
WebRtcOpusEncInst* opus_mono_encoder_;
WebRtcOpusEncInst* opus_stereo_encoder_;
WebRtcOpusDecInst* opus_mono_decoder_;
WebRtcOpusDecInst* opus_stereo_decoder_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_OPUS_TEST_H_

View File

@@ -0,0 +1,223 @@
/*
* Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "gtest/gtest.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/codecs/pcm16b/include/pcm16b.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/test/utility.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/interface/scoped_ptr.h"
#include "webrtc/system_wrappers/interface/sleep.h"
#include "webrtc/test/testsupport/fileutils.h"
#include "webrtc/test/testsupport/gtest_disable.h"
namespace webrtc {
class TargetDelayTest : public ::testing::Test {
protected:
TargetDelayTest() : acm_(AudioCodingModule::Create(0)) {}
~TargetDelayTest() {}
void SetUp() {
EXPECT_TRUE(acm_.get() != NULL);
CodecInst codec;
ASSERT_EQ(0, AudioCodingModule::Codec("L16", &codec, kSampleRateHz, 1));
ASSERT_EQ(0, acm_->InitializeReceiver());
ASSERT_EQ(0, acm_->RegisterReceiveCodec(codec));
rtp_info_.header.payloadType = codec.pltype;
rtp_info_.header.timestamp = 0;
rtp_info_.header.ssrc = 0x12345678;
rtp_info_.header.markerBit = false;
rtp_info_.header.sequenceNumber = 0;
rtp_info_.type.Audio.channel = 1;
rtp_info_.type.Audio.isCNG = false;
rtp_info_.frameType = kAudioFrameSpeech;
int16_t audio[kFrameSizeSamples];
const int kRange = 0x7FF; // 2047, easy for masking.
for (int n = 0; n < kFrameSizeSamples; ++n)
audio[n] = (rand() & kRange) - kRange / 2;
WebRtcPcm16b_Encode(audio, kFrameSizeSamples, payload_);
}
void OutOfRangeInput() {
EXPECT_EQ(-1, SetMinimumDelay(-1));
EXPECT_EQ(-1, SetMinimumDelay(10001));
}
void NoTargetDelayBufferSizeChanges() {
for (int n = 0; n < 30; ++n) // Run enough iterations.
Run(true);
int clean_optimal_delay = GetCurrentOptimalDelayMs();
Run(false); // Run with jitter.
int jittery_optimal_delay = GetCurrentOptimalDelayMs();
EXPECT_GT(jittery_optimal_delay, clean_optimal_delay);
int required_delay = RequiredDelay();
EXPECT_GT(required_delay, 0);
EXPECT_NEAR(required_delay, jittery_optimal_delay, 1);
}
void WithTargetDelayBufferNotChanging() {
// A target delay that is one packet larger than jitter.
const int kTargetDelayMs = (kInterarrivalJitterPacket + 1) *
kNum10msPerFrame * 10;
ASSERT_EQ(0, SetMinimumDelay(kTargetDelayMs));
for (int n = 0; n < 30; ++n) // Run enough iterations to fill the buffer.
Run(true);
int clean_optimal_delay = GetCurrentOptimalDelayMs();
EXPECT_EQ(kTargetDelayMs, clean_optimal_delay);
Run(false); // Run with jitter.
int jittery_optimal_delay = GetCurrentOptimalDelayMs();
EXPECT_EQ(jittery_optimal_delay, clean_optimal_delay);
}
void RequiredDelayAtCorrectRange() {
for (int n = 0; n < 30; ++n) // Run clean and store delay.
Run(true);
int clean_optimal_delay = GetCurrentOptimalDelayMs();
// A relatively large delay.
const int kTargetDelayMs = (kInterarrivalJitterPacket + 10) *
kNum10msPerFrame * 10;
ASSERT_EQ(0, SetMinimumDelay(kTargetDelayMs));
for (int n = 0; n < 300; ++n) // Run enough iterations to fill the buffer.
Run(true);
Run(false); // Run with jitter.
int jittery_optimal_delay = GetCurrentOptimalDelayMs();
EXPECT_EQ(kTargetDelayMs, jittery_optimal_delay);
int required_delay = RequiredDelay();
// Checking |required_delay| is in correct range.
EXPECT_GT(required_delay, 0);
EXPECT_GT(jittery_optimal_delay, required_delay);
EXPECT_GT(required_delay, clean_optimal_delay);
// A tighter check for the value of |required_delay|.
// The jitter forces a delay of
// |kInterarrivalJitterPacket * kNum10msPerFrame * 10| milliseconds. So we
// expect |required_delay| be close to that.
EXPECT_NEAR(kInterarrivalJitterPacket * kNum10msPerFrame * 10,
required_delay, 1);
}
void TargetDelayBufferMinMax() {
const int kTargetMinDelayMs = kNum10msPerFrame * 10;
ASSERT_EQ(0, SetMinimumDelay(kTargetMinDelayMs));
for (int m = 0; m < 30; ++m) // Run enough iterations to fill the buffer.
Run(true);
int clean_optimal_delay = GetCurrentOptimalDelayMs();
EXPECT_EQ(kTargetMinDelayMs, clean_optimal_delay);
const int kTargetMaxDelayMs = 2 * (kNum10msPerFrame * 10);
ASSERT_EQ(0, SetMaximumDelay(kTargetMaxDelayMs));
for (int n = 0; n < 30; ++n) // Run enough iterations to fill the buffer.
Run(false);
int capped_optimal_delay = GetCurrentOptimalDelayMs();
EXPECT_EQ(kTargetMaxDelayMs, capped_optimal_delay);
}
private:
static const int kSampleRateHz = 16000;
static const int kNum10msPerFrame = 2;
static const int kFrameSizeSamples = 320; // 20 ms @ 16 kHz.
// payload-len = frame-samples * 2 bytes/sample.
static const int kPayloadLenBytes = 320 * 2;
// Inter-arrival time in number of packets in a jittery channel. One is no
// jitter.
static const int kInterarrivalJitterPacket = 2;
void Push() {
rtp_info_.header.timestamp += kFrameSizeSamples;
rtp_info_.header.sequenceNumber++;
ASSERT_EQ(0, acm_->IncomingPacket(payload_, kFrameSizeSamples * 2,
rtp_info_));
}
// Pull audio equivalent to the amount of audio in one RTP packet.
void Pull() {
AudioFrame frame;
for (int k = 0; k < kNum10msPerFrame; ++k) { // Pull one frame.
ASSERT_EQ(0, acm_->PlayoutData10Ms(-1, &frame));
// Had to use ASSERT_TRUE, ASSERT_EQ generated error.
ASSERT_TRUE(kSampleRateHz == frame.sample_rate_hz_);
ASSERT_EQ(1, frame.num_channels_);
ASSERT_TRUE(kSampleRateHz / 100 == frame.samples_per_channel_);
}
}
void Run(bool clean) {
for (int n = 0; n < 10; ++n) {
for (int m = 0; m < 5; ++m) {
Push();
Pull();
}
if (!clean) {
for (int m = 0; m < 10; ++m) { // Long enough to trigger delay change.
Push();
for (int n = 0; n < kInterarrivalJitterPacket; ++n)
Pull();
}
}
}
}
int SetMinimumDelay(int delay_ms) {
return acm_->SetMinimumPlayoutDelay(delay_ms);
}
int SetMaximumDelay(int delay_ms) {
return acm_->SetMaximumPlayoutDelay(delay_ms);
}
int GetCurrentOptimalDelayMs() {
ACMNetworkStatistics stats;
acm_->NetworkStatistics(&stats);
return stats.preferredBufferSize;
}
int RequiredDelay() {
return acm_->LeastRequiredDelayMs();
}
scoped_ptr<AudioCodingModule> acm_;
WebRtcRTPHeader rtp_info_;
uint8_t payload_[kPayloadLenBytes];
};
TEST_F(TargetDelayTest, DISABLED_ON_ANDROID(OutOfRangeInput)) {
OutOfRangeInput();
}
TEST_F(TargetDelayTest, DISABLED_ON_ANDROID(NoTargetDelayBufferSizeChanges)) {
NoTargetDelayBufferSizeChanges();
}
TEST_F(TargetDelayTest, DISABLED_ON_ANDROID(WithTargetDelayBufferNotChanging)) {
WithTargetDelayBufferNotChanging();
}
TEST_F(TargetDelayTest, DISABLED_ON_ANDROID(RequiredDelayAtCorrectRange)) {
RequiredDelayAtCorrectRange();
}
TEST_F(TargetDelayTest, DISABLED_ON_ANDROID(TargetDelayBufferMinMax)) {
TargetDelayBufferMinMax();
}
} // namespace webrtc

View File

@@ -0,0 +1,333 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "utility.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/common.h"
#include "webrtc/common_types.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
#include "webrtc/modules/audio_coding/main/acm2/acm_common_defs.h"
#define NUM_CODECS_WITH_FIXED_PAYLOAD_TYPE 13
namespace webrtc {
ACMTestTimer::ACMTestTimer()
: _msec(0),
_sec(0),
_min(0),
_hour(0) {
return;
}
ACMTestTimer::~ACMTestTimer() {
return;
}
void ACMTestTimer::Reset() {
_msec = 0;
_sec = 0;
_min = 0;
_hour = 0;
return;
}
void ACMTestTimer::Tick10ms() {
_msec += 10;
Adjust();
return;
}
void ACMTestTimer::Tick1ms() {
_msec++;
Adjust();
return;
}
void ACMTestTimer::Tick100ms() {
_msec += 100;
Adjust();
return;
}
void ACMTestTimer::Tick1sec() {
_sec++;
Adjust();
return;
}
void ACMTestTimer::CurrentTimeHMS(char* currTime) {
sprintf(currTime, "%4lu:%02u:%06.3f", _hour, _min,
(double) _sec + (double) _msec / 1000.);
return;
}
void ACMTestTimer::CurrentTime(unsigned long& h, unsigned char& m,
unsigned char& s, unsigned short& ms) {
h = _hour;
m = _min;
s = _sec;
ms = _msec;
return;
}
void ACMTestTimer::Adjust() {
unsigned int n;
if (_msec >= 1000) {
n = _msec / 1000;
_msec -= (1000 * n);
_sec += n;
}
if (_sec >= 60) {
n = _sec / 60;
_sec -= (n * 60);
_min += n;
}
if (_min >= 60) {
n = _min / 60;
_min -= (n * 60);
_hour += n;
}
}
int16_t ChooseCodec(CodecInst& codecInst) {
PrintCodecs();
//AudioCodingModule* tmpACM = AudioCodingModule::Create(0);
uint8_t noCodec = AudioCodingModule::NumberOfCodecs();
int8_t codecID;
bool outOfRange = false;
char myStr[15] = "";
do {
printf("\nChoose a codec [0]: ");
EXPECT_TRUE(fgets(myStr, 10, stdin) != NULL);
codecID = atoi(myStr);
if ((codecID < 0) || (codecID >= noCodec)) {
printf("\nOut of range.\n");
outOfRange = true;
}
} while (outOfRange);
CHECK_ERROR(AudioCodingModule::Codec((uint8_t )codecID, &codecInst));
return 0;
}
void PrintCodecs() {
uint8_t noCodec = AudioCodingModule::NumberOfCodecs();
CodecInst codecInst;
printf("No Name [Hz] [bps]\n");
for (uint8_t codecCntr = 0; codecCntr < noCodec; codecCntr++) {
AudioCodingModule::Codec(codecCntr, &codecInst);
printf("%2d- %-18s %5d %6d\n", codecCntr, codecInst.plname,
codecInst.plfreq, codecInst.rate);
}
}
CircularBuffer::CircularBuffer(uint32_t len)
: _buff(NULL),
_idx(0),
_buffIsFull(false),
_calcAvg(false),
_calcVar(false),
_sum(0),
_sumSqr(0) {
_buff = new double[len];
if (_buff == NULL) {
_buffLen = 0;
} else {
for (uint32_t n = 0; n < len; n++) {
_buff[n] = 0;
}
_buffLen = len;
}
}
CircularBuffer::~CircularBuffer() {
if (_buff != NULL) {
delete[] _buff;
_buff = NULL;
}
}
void CircularBuffer::Update(const double newVal) {
assert(_buffLen > 0);
// store the value that is going to be overwritten
double oldVal = _buff[_idx];
// record the new value
_buff[_idx] = newVal;
// increment the index, to point to where we would
// write next
_idx++;
// it is a circular buffer, if we are at the end
// we have to cycle to the beginning
if (_idx >= _buffLen) {
// flag that the buffer is filled up.
_buffIsFull = true;
_idx = 0;
}
// Update
if (_calcAvg) {
// for the average we have to update
// the sum
_sum += (newVal - oldVal);
}
if (_calcVar) {
// to calculate variance we have to update
// the sum of squares
_sumSqr += (double) (newVal - oldVal) * (double) (newVal + oldVal);
}
}
void CircularBuffer::SetArithMean(bool enable) {
assert(_buffLen > 0);
if (enable && !_calcAvg) {
uint32_t lim;
if (_buffIsFull) {
lim = _buffLen;
} else {
lim = _idx;
}
_sum = 0;
for (uint32_t n = 0; n < lim; n++) {
_sum += _buff[n];
}
}
_calcAvg = enable;
}
void CircularBuffer::SetVariance(bool enable) {
assert(_buffLen > 0);
if (enable && !_calcVar) {
uint32_t lim;
if (_buffIsFull) {
lim = _buffLen;
} else {
lim = _idx;
}
_sumSqr = 0;
for (uint32_t n = 0; n < lim; n++) {
_sumSqr += _buff[n] * _buff[n];
}
}
_calcAvg = enable;
}
int16_t CircularBuffer::ArithMean(double& mean) {
assert(_buffLen > 0);
if (_buffIsFull) {
mean = _sum / (double) _buffLen;
return 0;
} else {
if (_idx > 0) {
mean = _sum / (double) _idx;
return 0;
} else {
return -1;
}
}
}
int16_t CircularBuffer::Variance(double& var) {
assert(_buffLen > 0);
if (_buffIsFull) {
var = _sumSqr / (double) _buffLen;
return 0;
} else {
if (_idx > 0) {
var = _sumSqr / (double) _idx;
return 0;
} else {
return -1;
}
}
}
bool FixedPayloadTypeCodec(const char* payloadName) {
char fixPayloadTypeCodecs[NUM_CODECS_WITH_FIXED_PAYLOAD_TYPE][32] = { "PCMU",
"PCMA", "GSM", "G723", "DVI4", "LPC", "PCMA", "G722", "QCELP", "CN",
"MPA", "G728", "G729" };
for (int n = 0; n < NUM_CODECS_WITH_FIXED_PAYLOAD_TYPE; n++) {
if (!STR_CASE_CMP(payloadName, fixPayloadTypeCodecs[n])) {
return true;
}
}
return false;
}
DTMFDetector::DTMFDetector() {
for (int16_t n = 0; n < 1000; n++) {
_toneCntr[n] = 0;
}
}
DTMFDetector::~DTMFDetector() {
}
int32_t DTMFDetector::IncomingDtmf(const uint8_t digitDtmf,
const bool /* toneEnded */) {
fprintf(stdout, "%d-", digitDtmf);
_toneCntr[digitDtmf]++;
return 0;
}
void DTMFDetector::PrintDetectedDigits() {
for (int16_t n = 0; n < 1000; n++) {
if (_toneCntr[n] > 0) {
fprintf(stdout, "%d %u msec, \n", n, _toneCntr[n] * 10);
}
}
fprintf(stdout, "\n");
return;
}
void VADCallback::Reset() {
for (int n = 0; n < 6; n++) {
_numFrameTypes[n] = 0;
}
}
VADCallback::VADCallback() {
for (int n = 0; n < 6; n++) {
_numFrameTypes[n] = 0;
}
}
void VADCallback::PrintFrameTypes() {
fprintf(stdout, "No encoding.................. %d\n", _numFrameTypes[0]);
fprintf(stdout, "Active normal encoded........ %d\n", _numFrameTypes[1]);
fprintf(stdout, "Passive normal encoded....... %d\n", _numFrameTypes[2]);
fprintf(stdout, "Passive DTX wideband......... %d\n", _numFrameTypes[3]);
fprintf(stdout, "Passive DTX narrowband....... %d\n", _numFrameTypes[4]);
fprintf(stdout, "Passive DTX super-wideband... %d\n", _numFrameTypes[5]);
}
int32_t VADCallback::InFrameType(int16_t frameType) {
_numFrameTypes[frameType]++;
return 0;
}
} // namespace webrtc

View File

@@ -0,0 +1,152 @@
/*
* Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_UTILITY_H_
#define WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_UTILITY_H_
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
namespace webrtc {
//-----------------------------
#define CHECK_ERROR(f) \
do { \
EXPECT_GE(f, 0) << "Error Calling API"; \
} while(0)
//-----------------------------
#define CHECK_PROTECTED(f) \
do { \
if (f >= 0) { \
ADD_FAILURE() << "Error Calling API"; \
} else { \
printf("An expected error is caught.\n"); \
} \
} while(0)
//----------------------------
#define CHECK_ERROR_MT(f) \
do { \
if (f < 0) { \
fprintf(stderr, "Error Calling API in file %s at line %d \n", \
__FILE__, __LINE__); \
} \
} while(0)
//----------------------------
#define CHECK_PROTECTED_MT(f) \
do { \
if (f >= 0) { \
fprintf(stderr, "Error Calling API in file %s at line %d \n", \
__FILE__, __LINE__); \
} else { \
printf("An expected error is caught.\n"); \
} \
} while(0)
#define DELETE_POINTER(p) \
do { \
if (p != NULL) { \
delete p; \
p = NULL; \
} \
} while(0)
class ACMTestTimer {
public:
ACMTestTimer();
~ACMTestTimer();
void Reset();
void Tick10ms();
void Tick1ms();
void Tick100ms();
void Tick1sec();
void CurrentTimeHMS(char* currTime);
void CurrentTime(unsigned long& h, unsigned char& m, unsigned char& s,
unsigned short& ms);
private:
void Adjust();
unsigned short _msec;
unsigned char _sec;
unsigned char _min;
unsigned long _hour;
};
class CircularBuffer {
public:
CircularBuffer(uint32_t len);
~CircularBuffer();
void SetArithMean(bool enable);
void SetVariance(bool enable);
void Update(const double newVal);
void IsBufferFull();
int16_t Variance(double& var);
int16_t ArithMean(double& mean);
protected:
double* _buff;
uint32_t _idx;
uint32_t _buffLen;
bool _buffIsFull;
bool _calcAvg;
bool _calcVar;
double _sum;
double _sumSqr;
};
int16_t ChooseCodec(CodecInst& codecInst);
void PrintCodecs();
bool FixedPayloadTypeCodec(const char* payloadName);
class DTMFDetector : public AudioCodingFeedback {
public:
DTMFDetector();
~DTMFDetector();
// used for inband DTMF detection
int32_t IncomingDtmf(const uint8_t digitDtmf, const bool toneEnded);
void PrintDetectedDigits();
private:
uint32_t _toneCntr[1000];
};
class VADCallback : public ACMVADCallback {
public:
VADCallback();
~VADCallback() {
}
int32_t InFrameType(int16_t frameType);
void PrintFrameTypes();
void Reset();
private:
uint32_t _numFrameTypes[6];
};
void UseLegacyAcm(webrtc::Config* config);
void UseNewAcm(webrtc::Config* config);
} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_TEST_UTILITY_H_