Reformating files in audio coding module.
This CL format the ramaining files on the audio coding module. No other changes are done, except for fixing a few long lines and TODOs without owner.
BUG=issue1024
Review URL: https://webrtc-codereview.appspot.com/928012
git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@3042 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/modules/audio_coding/main/source/acm_g729.cc b/modules/audio_coding/main/source/acm_g729.cc
index d668ae7..f9bc781 100644
--- a/modules/audio_coding/main/source/acm_g729.cc
+++ b/modules/audio_coding/main/source/acm_g729.cc
@@ -16,25 +16,10 @@
#include "webrtc_neteq_help_macros.h"
#ifdef WEBRTC_CODEC_G729
- // NOTE! G.729 is not included in the open-source package. The following
- // interface file is needed:
- //
- // /modules/audio_coding/codecs/g729/main/interface/g729_interface.h
- //
- // The API in the header file should match the one below.
- //
- // int16_t WebRtcG729_CreateEnc(G729_encinst_t_** inst);
- // int16_t WebRtcG729_CreateDec(G729_decinst_t_** inst);
- // int16_t WebRtcG729_FreeEnc(G729_encinst_t_* inst);
- // int16_t WebRtcG729_FreeDec(G729_decinst_t_* inst);
- // int16_t WebRtcG729_Encode(G729_encinst_t_* encInst, int16_t* input,
- // int16_t len, int16_t* output);
- // int16_t WebRtcG729_EncoderInit(G729_encinst_t_* encInst, int16_t mode);
- // int16_t WebRtcG729_Decode(G729_decinst_t_* decInst);
- // int16_t WebRtcG729_DecodeBwe(G729_decinst_t_* decInst, int16_t* input);
- // int16_t WebRtcG729_DecodePlc(G729_decinst_t_* decInst);
- // int16_t WebRtcG729_DecoderInit(G729_decinst_t_* decInst);
- #include "g729_interface.h"
+// NOTE! G.729 is not included in the open-source package. Modify this file
+// or your codec API to match the function calls and names of used G.729 API
+// file.
+#include "g729_interface.h"
#endif
namespace webrtc {
@@ -47,469 +32,329 @@
return;
}
-
-ACMG729::~ACMG729()
-{
- return;
+ACMG729::~ACMG729() {
+ return;
}
-
-WebRtc_Word16
-ACMG729::InternalEncode(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16* /* bitStreamLenByte */)
-{
- return -1;
+WebRtc_Word16 ACMG729::InternalEncode(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMG729::EnableDTX()
-{
- return -1;
+WebRtc_Word16 ACMG729::EnableDTX() {
+ return -1;
}
-
-WebRtc_Word16
-ACMG729::DisableDTX()
-{
- return -1;
+WebRtc_Word16 ACMG729::DisableDTX() {
+ return -1;
}
-WebRtc_Word32
-ACMG729::ReplaceInternalDTXSafe(
- const bool /*replaceInternalDTX*/)
-{
- return -1;
+WebRtc_Word32 ACMG729::ReplaceInternalDTXSafe(
+ const bool /*replaceInternalDTX*/) {
+ return -1;
}
-WebRtc_Word32
-ACMG729::IsInternalDTXReplacedSafe(
- bool* /* internalDTXReplaced */)
-{
- return -1;
+WebRtc_Word32 ACMG729::IsInternalDTXReplacedSafe(
+ bool* /* internalDTXReplaced */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMG729::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
- return -1;
+WebRtc_Word16 ACMG729::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMG729::InternalInitEncoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- return -1;
+WebRtc_Word16 ACMG729::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMG729::InternalInitDecoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- return -1;
+WebRtc_Word16 ACMG729::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
}
-
-WebRtc_Word32
-ACMG729::CodecDef(
- WebRtcNetEQ_CodecDef& /* codecDef */,
- const CodecInst& /* codecInst */)
-{
- return -1;
+WebRtc_Word32 ACMG729::CodecDef(WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */) {
+ return -1;
}
-
-ACMGenericCodec*
-ACMG729::CreateInstance(void)
-{
- return NULL;
+ACMGenericCodec* ACMG729::CreateInstance(void) {
+ return NULL;
}
-
-WebRtc_Word16
-ACMG729::InternalCreateEncoder()
-{
- return -1;
+WebRtc_Word16 ACMG729::InternalCreateEncoder() {
+ return -1;
}
-
-void
-ACMG729::DestructEncoderSafe()
-{
- return;
+void ACMG729::DestructEncoderSafe() {
+ return;
}
-
-WebRtc_Word16
-ACMG729::InternalCreateDecoder()
-{
- return -1;
+WebRtc_Word16 ACMG729::InternalCreateDecoder() {
+ return -1;
}
-
-void
-ACMG729::DestructDecoderSafe()
-{
- return;
+void ACMG729::DestructDecoderSafe() {
+ return;
}
-
-void
-ACMG729::InternalDestructEncoderInst(
- void* /* ptrInst */)
-{
- return;
+void ACMG729::InternalDestructEncoderInst(void* /* ptrInst */) {
+ return;
}
#else //===================== Actual Implementation =======================
-
-ACMG729::ACMG729(
- WebRtc_Word16 codecID):
-_encoderInstPtr(NULL),
-_decoderInstPtr(NULL)
-{
- _codecID = codecID;
- _hasInternalDTX = true;
- return;
+ACMG729::ACMG729(WebRtc_Word16 codecID)
+ : _encoderInstPtr(NULL),
+ _decoderInstPtr(NULL) {
+ _codecID = codecID;
+ _hasInternalDTX = true;
+ return;
}
-
-ACMG729::~ACMG729()
-{
- if(_encoderInstPtr != NULL)
- {
- // Delete encoder memory
- WebRtcG729_FreeEnc(_encoderInstPtr);
- _encoderInstPtr = NULL;
- }
- if(_decoderInstPtr != NULL)
- {
- // Delete decoder memory
- WebRtcG729_FreeDec(_decoderInstPtr);
- _decoderInstPtr = NULL;
- }
- return;
+ACMG729::~ACMG729() {
+ if (_encoderInstPtr != NULL) {
+ // Delete encoder memory
+ WebRtcG729_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if (_decoderInstPtr != NULL) {
+ // Delete decoder memory
+ WebRtcG729_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
}
+WebRtc_Word16 ACMG729::InternalEncode(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte) {
+ // Initialize before entering the loop
+ WebRtc_Word16 noEncodedSamples = 0;
+ WebRtc_Word16 tmpLenByte = 0;
+ WebRtc_Word16 vadDecision = 0;
+ *bitStreamLenByte = 0;
+ while (noEncodedSamples < _frameLenSmpl) {
+ // Call G.729 encoder with pointer to encoder memory, input
+ // audio, number of samples and bitsream
+ tmpLenByte = WebRtcG729_Encode(
+ _encoderInstPtr, &_inAudio[_inAudioIxRead], 80,
+ (WebRtc_Word16*) (&(bitStream[*bitStreamLenByte])));
-WebRtc_Word16
-ACMG729::InternalEncode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte)
-{
- // Initialize before entering the loop
- WebRtc_Word16 noEncodedSamples = 0;
- WebRtc_Word16 tmpLenByte = 0;
- WebRtc_Word16 vadDecision = 0;
- *bitStreamLenByte = 0;
- while(noEncodedSamples < _frameLenSmpl)
- {
- // Call G.729 encoder with pointer to encoder memory, input
- // audio, number of samples and bitsream
- tmpLenByte = WebRtcG729_Encode(_encoderInstPtr,
- &_inAudio[_inAudioIxRead], 80,
- (WebRtc_Word16*)(&(bitStream[*bitStreamLenByte])));
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += 80;
- // increment the read index this tell the caller that how far
- // we have gone forward in reading the audio buffer
- _inAudioIxRead += 80;
-
- // sanity check
- if(tmpLenByte < 0)
- {
- // error has happened
- *bitStreamLenByte = 0;
- return -1;
- }
-
- // increment number of written bytes
- *bitStreamLenByte += tmpLenByte;
- switch(tmpLenByte)
- {
- case 0:
- {
- if(0 == noEncodedSamples)
- {
- // this is the first 10 ms in this packet and there is
- // no data generated, perhaps DTX is enabled and the
- // codec is not generating any bit-stream for this 10 ms.
- // we do not continue encoding this frame.
- return 0;
- }
- break;
- }
- case 2:
- {
- // check if G.729 internal DTX is enabled
- if(_hasInternalDTX && _dtxEnabled)
- {
- vadDecision = 0;
- for(WebRtc_Word16 n = 0; n < MAX_FRAME_SIZE_10MSEC; n++)
- {
- _vadLabel[n] = vadDecision;
- }
- }
- // we got a SID and have to send out this packet no matter
- // how much audio we have encoded
- return *bitStreamLenByte;
- }
- case 10:
- {
- vadDecision = 1;
- // this is a valid length just continue encoding
- break;
- }
- default:
- {
- return -1;
- }
- }
-
- // update number of encoded samples
- noEncodedSamples += 80;
+ // sanity check
+ if (tmpLenByte < 0) {
+ // error has happened
+ *bitStreamLenByte = 0;
+ return -1;
}
- // update VAD decision vector
- if(_hasInternalDTX && !vadDecision && _dtxEnabled)
- {
- for(WebRtc_Word16 n = 0; n < MAX_FRAME_SIZE_10MSEC; n++)
- {
+ // increment number of written bytes
+ *bitStreamLenByte += tmpLenByte;
+ switch (tmpLenByte) {
+ case 0: {
+ if (0 == noEncodedSamples) {
+ // this is the first 10 ms in this packet and there is
+ // no data generated, perhaps DTX is enabled and the
+ // codec is not generating any bit-stream for this 10 ms.
+ // we do not continue encoding this frame.
+ return 0;
+ }
+ break;
+ }
+ case 2: {
+ // check if G.729 internal DTX is enabled
+ if (_hasInternalDTX && _dtxEnabled) {
+ vadDecision = 0;
+ for (WebRtc_Word16 n = 0; n < MAX_FRAME_SIZE_10MSEC; n++) {
_vadLabel[n] = vadDecision;
+ }
}
- }
-
- // done encoding, return number of encoded bytes
- return *bitStreamLenByte;
-}
-
-
-WebRtc_Word16
-ACMG729::EnableDTX()
-{
- if(_dtxEnabled)
- {
- // DTX already enabled, do nothing
- return 0;
- }
- else if(_encoderExist)
- {
- // Re-init the G.729 encoder to turn on DTX
- if(WebRtcG729_EncoderInit(_encoderInstPtr, 1) < 0)
- {
- return -1;
- }
- _dtxEnabled = true;
- return 0;
- }
- else
- {
+ // we got a SID and have to send out this packet no matter
+ // how much audio we have encoded
+ return *bitStreamLenByte;
+ }
+ case 10: {
+ vadDecision = 1;
+ // this is a valid length just continue encoding
+ break;
+ }
+ default: {
return -1;
+ }
}
+
+ // update number of encoded samples
+ noEncodedSamples += 80;
+ }
+
+ // update VAD decision vector
+ if (_hasInternalDTX && !vadDecision && _dtxEnabled) {
+ for (WebRtc_Word16 n = 0; n < MAX_FRAME_SIZE_10MSEC; n++) {
+ _vadLabel[n] = vadDecision;
+ }
+ }
+
+ // done encoding, return number of encoded bytes
+ return *bitStreamLenByte;
}
-
-WebRtc_Word16
-ACMG729::DisableDTX()
-{
- if(!_dtxEnabled)
- {
- // DTX already dissabled, do nothing
- return 0;
- }
- else if(_encoderExist)
- {
- // Re-init the G.729 decoder to turn off DTX
- if(WebRtcG729_EncoderInit(_encoderInstPtr, 0) < 0)
- {
- return -1;
- }
- _dtxEnabled = false;
- return 0;
- }
- else
- {
- // encoder doesn't exists, therefore disabling is harmless
- return 0;
- }
-}
-
-
-WebRtc_Word32
-ACMG729::ReplaceInternalDTXSafe(
- const bool replaceInternalDTX)
-{
- // This function is used to dissable the G.729 built in DTX and use an
- // external instead.
-
- if(replaceInternalDTX == _hasInternalDTX)
- {
- // Make sure we keep the DTX/VAD setting if possible
- bool oldEnableDTX = _dtxEnabled;
- bool oldEnableVAD = _vadEnabled;
- ACMVADMode oldMode = _vadMode;
- if (replaceInternalDTX)
- {
- // Disable internal DTX before enabling external DTX
- DisableDTX();
- }
- else
- {
- // Disable external DTX before enabling internal
- ACMGenericCodec::DisableDTX();
- }
- _hasInternalDTX = !replaceInternalDTX;
- WebRtc_Word16 status = SetVADSafe(oldEnableDTX, oldEnableVAD, oldMode);
- // Check if VAD status has changed from inactive to active, or if error was
- // reported
- if (status == 1) {
- _vadEnabled = true;
- return status;
- } else if (status < 0) {
- _hasInternalDTX = replaceInternalDTX;
- return -1;
- }
- }
+WebRtc_Word16 ACMG729::EnableDTX() {
+ if (_dtxEnabled) {
+ // DTX already enabled, do nothing
return 0;
-}
-
-
-WebRtc_Word32
-ACMG729::IsInternalDTXReplacedSafe(
- bool* internalDTXReplaced)
-{
- // Get status of wether DTX is replaced or not
- *internalDTXReplaced = !_hasInternalDTX;
+ } else if (_encoderExist) {
+ // Re-init the G.729 encoder to turn on DTX
+ if (WebRtcG729_EncoderInit(_encoderInstPtr, 1) < 0) {
+ return -1;
+ }
+ _dtxEnabled = true;
return 0;
+ } else {
+ return -1;
+ }
}
-
-WebRtc_Word16
-ACMG729::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
- // This function is not used. G.729 decoder is called from inside NetEQ
+WebRtc_Word16 ACMG729::DisableDTX() {
+ if (!_dtxEnabled) {
+ // DTX already dissabled, do nothing
return 0;
-}
-
-
-WebRtc_Word16
-ACMG729::InternalInitEncoder(
- WebRtcACMCodecParams* codecParams)
-{
- // Init G.729 encoder
- return WebRtcG729_EncoderInit(_encoderInstPtr,
- ((codecParams->enableDTX)? 1:0));
-}
-
-
-WebRtc_Word16
-ACMG729::InternalInitDecoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- // Init G.729 decoder
- return WebRtcG729_DecoderInit(_decoderInstPtr);
-}
-
-
-WebRtc_Word32
-ACMG729::CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst)
-{
- if (!_decoderInitialized)
- {
- // Todo:
- // log error
- return -1;
+ } else if (_encoderExist) {
+ // Re-init the G.729 decoder to turn off DTX
+ if (WebRtcG729_EncoderInit(_encoderInstPtr, 0) < 0) {
+ return -1;
}
-
- // Fill up the structure by calling
- // "SET_CODEC_PAR" & "SET_G729_FUNCTION."
- // Then call NetEQ to add the codec to it's
- // database.
- SET_CODEC_PAR((codecDef), kDecoderG729, codecInst.pltype,
- _decoderInstPtr, 8000);
- SET_G729_FUNCTIONS((codecDef));
+ _dtxEnabled = false;
return 0;
+ } else {
+ // encoder doesn't exists, therefore disabling is harmless
+ return 0;
+ }
}
+WebRtc_Word32 ACMG729::ReplaceInternalDTXSafe(const bool replaceInternalDTX) {
+ // This function is used to disable the G.729 built in DTX and use an
+ // external instead.
-ACMGenericCodec*
-ACMG729::CreateInstance(void)
-{
- // Function not used
- return NULL;
-}
-
-
-WebRtc_Word16
-ACMG729::InternalCreateEncoder()
-{
- // Create encoder memory
- return WebRtcG729_CreateEnc(&_encoderInstPtr);
-}
-
-
-void
-ACMG729::DestructEncoderSafe()
-{
- // Free encoder memory
- _encoderExist = false;
- _encoderInitialized = false;
- if(_encoderInstPtr != NULL)
- {
- WebRtcG729_FreeEnc(_encoderInstPtr);
- _encoderInstPtr = NULL;
+ if (replaceInternalDTX == _hasInternalDTX) {
+ // Make sure we keep the DTX/VAD setting if possible
+ bool oldEnableDTX = _dtxEnabled;
+ bool oldEnableVAD = _vadEnabled;
+ ACMVADMode oldMode = _vadMode;
+ if (replaceInternalDTX) {
+ // Disable internal DTX before enabling external DTX
+ DisableDTX();
+ } else {
+ // Disable external DTX before enabling internal
+ ACMGenericCodec::DisableDTX();
}
-}
-
-
-WebRtc_Word16
-ACMG729::InternalCreateDecoder()
-{
- // Create decoder memory
- return WebRtcG729_CreateDec(&_decoderInstPtr);
-}
-
-
-void
-ACMG729::DestructDecoderSafe()
-{
- // Free decoder memory
- _decoderExist = false;
- _decoderInitialized = false;
- if(_decoderInstPtr != NULL)
- {
- WebRtcG729_FreeDec(_decoderInstPtr);
- _decoderInstPtr = NULL;
+ _hasInternalDTX = !replaceInternalDTX;
+ WebRtc_Word16 status = SetVADSafe(oldEnableDTX, oldEnableVAD, oldMode);
+ // Check if VAD status has changed from inactive to active, or if error was
+ // reported
+ if (status == 1) {
+ _vadEnabled = true;
+ return status;
+ } else if (status < 0) {
+ _hasInternalDTX = replaceInternalDTX;
+ return -1;
}
+ }
+ return 0;
}
+WebRtc_Word32 ACMG729::IsInternalDTXReplacedSafe(bool* internalDTXReplaced) {
+ // Get status of wether DTX is replaced or not
+ *internalDTXReplaced = !_hasInternalDTX;
+ return 0;
+}
-void
-ACMG729::InternalDestructEncoderInst(
- void* ptrInst)
-{
- if(ptrInst != NULL)
- {
- WebRtcG729_FreeEnc((G729_encinst_t_*)ptrInst);
- }
- return;
+WebRtc_Word16 ACMG729::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ // This function is not used. G.729 decoder is called from inside NetEQ
+ return 0;
+}
+
+WebRtc_Word16 ACMG729::InternalInitEncoder(WebRtcACMCodecParams* codecParams) {
+ // Init G.729 encoder
+ return WebRtcG729_EncoderInit(_encoderInstPtr,
+ ((codecParams->enableDTX) ? 1 : 0));
+}
+
+WebRtc_Word16 ACMG729::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ // Init G.729 decoder
+ return WebRtcG729_DecoderInit(_decoderInstPtr);
+}
+
+WebRtc_Word32 ACMG729::CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst) {
+ if (!_decoderInitialized) {
+ // Todo:
+ // log error
+ return -1;
+ }
+
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_G729_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderG729, codecInst.pltype, _decoderInstPtr,
+ 8000);
+ SET_G729_FUNCTIONS((codecDef));
+ return 0;
+}
+
+ACMGenericCodec* ACMG729::CreateInstance(void) {
+ // Function not used
+ return NULL;
+}
+
+WebRtc_Word16 ACMG729::InternalCreateEncoder() {
+ // Create encoder memory
+ return WebRtcG729_CreateEnc(&_encoderInstPtr);
+}
+
+void ACMG729::DestructEncoderSafe() {
+ // Free encoder memory
+ _encoderExist = false;
+ _encoderInitialized = false;
+ if (_encoderInstPtr != NULL) {
+ WebRtcG729_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+}
+
+WebRtc_Word16 ACMG729::InternalCreateDecoder() {
+ // Create decoder memory
+ return WebRtcG729_CreateDec(&_decoderInstPtr);
+}
+
+void ACMG729::DestructDecoderSafe() {
+ // Free decoder memory
+ _decoderExist = false;
+ _decoderInitialized = false;
+ if (_decoderInstPtr != NULL) {
+ WebRtcG729_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+}
+
+void ACMG729::InternalDestructEncoderInst(void* ptrInst) {
+ if (ptrInst != NULL) {
+ WebRtcG729_FreeEnc((G729_encinst_t_*) ptrInst);
+ }
+ return;
}
#endif
-} // namespace webrtc
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_g729.h b/modules/audio_coding/main/source/acm_g729.h
index 474dabd..5d8d4a8 100644
--- a/modules/audio_coding/main/source/acm_g729.h
+++ b/modules/audio_coding/main/source/acm_g729.h
@@ -19,62 +19,53 @@
namespace webrtc {
-class ACMG729 : public ACMGenericCodec
-{
-public:
- ACMG729(WebRtc_Word16 codecID);
- ~ACMG729();
- // for FEC
- ACMGenericCodec* CreateInstance(void);
+class ACMG729 : public ACMGenericCodec {
+ public:
+ ACMG729(WebRtc_Word16 codecID);
+ ~ACMG729();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
- WebRtc_Word16 InternalEncode(
- WebRtc_UWord8* bitstream,
- WebRtc_Word16* bitStreamLenByte);
+ WebRtc_Word16 InternalEncode(WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
- WebRtc_Word16 InternalInitEncoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitEncoder(WebRtcACMCodecParams *codecParams);
- WebRtc_Word16 InternalInitDecoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitDecoder(WebRtcACMCodecParams *codecParams);
-protected:
- WebRtc_Word16 DecodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType);
+ protected:
+ WebRtc_Word16 DecodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
- WebRtc_Word32 CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst);
+ WebRtc_Word32 CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
- void DestructEncoderSafe();
+ void DestructEncoderSafe();
- void DestructDecoderSafe();
+ void DestructDecoderSafe();
- WebRtc_Word16 InternalCreateEncoder();
+ WebRtc_Word16 InternalCreateEncoder();
- WebRtc_Word16 InternalCreateDecoder();
+ WebRtc_Word16 InternalCreateDecoder();
- void InternalDestructEncoderInst(
- void* ptrInst);
+ void InternalDestructEncoderInst(void* ptrInst);
- WebRtc_Word16 EnableDTX();
+ WebRtc_Word16 EnableDTX();
- WebRtc_Word16 DisableDTX();
+ WebRtc_Word16 DisableDTX();
- WebRtc_Word32 ReplaceInternalDTXSafe(
- const bool replaceInternalDTX);
+ WebRtc_Word32 ReplaceInternalDTXSafe(const bool replaceInternalDTX);
- WebRtc_Word32 IsInternalDTXReplacedSafe(
- bool* internalDTXReplaced);
+ WebRtc_Word32 IsInternalDTXReplacedSafe(bool* internalDTXReplaced);
- G729_encinst_t_* _encoderInstPtr;
- G729_decinst_t_* _decoderInstPtr;
+ G729_encinst_t_* _encoderInstPtr;
+ G729_decinst_t_* _decoderInstPtr;
};
-} // namespace webrtc
+} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_G729_H_
diff --git a/modules/audio_coding/main/source/acm_g7291.cc b/modules/audio_coding/main/source/acm_g7291.cc
index cc8783f..0726dec 100644
--- a/modules/audio_coding/main/source/acm_g7291.cc
+++ b/modules/audio_coding/main/source/acm_g7291.cc
@@ -16,24 +16,9 @@
#include "webrtc_neteq_help_macros.h"
#ifdef WEBRTC_CODEC_G729_1
- // NOTE! G.729.1 is not included in the open-source package. The following
- // interface file is needed:
- //
- // /modules/audio_coding/codecs/g7291/main/interface/g7291_interface.h
- //
- // The API in the header file should match the one below.
- //
- // int16_t WebRtcG7291_Create(G729_1_inst_t_** inst);
- // int16_t WebRtcG7291_Free(G729_1_inst_t_* inst);
- // int16_t WebRtcG7291_Encode(G729_1_inst_t_* encInst, int16_t* input,
- // int16_t* output, int16_t myRate,
- // int16_t nrFrames);
- // int16_t WebRtcG7291_EncoderInit(G729_1_inst_t_* encInst, int16_t myRate,
- // int16_t flag8kHz, int16_t flagG729mode);
- // int16_t WebRtcG7291_Decode(G729_1_inst_t_* decInst);
- // int16_t WebRtcG7291_DecodeBwe(G729_1_inst_t_* decInst, int16_t* input);
- // int16_t WebRtcG7291_DecodePlc(G729_1_inst_t_* decInst);
- // int16_t WebRtcG7291_DecoderInit(G729_1_inst_t_* decInst);
+// NOTE! G.729.1 is not included in the open-source package. Modify this file
+// or your codec API to match the function calls and names of used G.729.1 API
+// file.
#include "g7291_interface.h"
#endif
@@ -41,7 +26,7 @@
#ifndef WEBRTC_CODEC_G729_1
-ACMG729_1::ACMG729_1( WebRtc_Word16 /* codecID */)
+ACMG729_1::ACMG729_1(WebRtc_Word16 /* codecID */)
: _encoderInstPtr(NULL),
_decoderInstPtr(NULL),
_myRate(32000),
@@ -50,105 +35,63 @@
return;
}
-
-ACMG729_1::~ACMG729_1()
-{
- return;
+ACMG729_1::~ACMG729_1() {
+ return;
}
-
-WebRtc_Word16
-ACMG729_1::InternalEncode(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16* /* bitStreamLenByte */)
-{
- return -1;
+WebRtc_Word16 ACMG729_1::InternalEncode(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMG729_1::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
- return -1;
+WebRtc_Word16 ACMG729_1::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMG729_1::InternalInitEncoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- return -1;
+WebRtc_Word16 ACMG729_1::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMG729_1::InternalInitDecoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- return -1;
+WebRtc_Word16 ACMG729_1::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
}
-
-WebRtc_Word32
-ACMG729_1::CodecDef(
- WebRtcNetEQ_CodecDef& /* codecDef */,
- const CodecInst& /* codecInst */)
-{
- return -1;
+WebRtc_Word32 ACMG729_1::CodecDef(WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */) {
+ return -1;
}
-
-ACMGenericCodec*
-ACMG729_1::CreateInstance(void)
-{
- return NULL;
+ACMGenericCodec* ACMG729_1::CreateInstance(void) {
+ return NULL;
}
-
-WebRtc_Word16
-ACMG729_1::InternalCreateEncoder()
-{
- return -1;
+WebRtc_Word16 ACMG729_1::InternalCreateEncoder() {
+ return -1;
}
-
-void
-ACMG729_1::DestructEncoderSafe()
-{
- return;
+void ACMG729_1::DestructEncoderSafe() {
+ return;
}
-
-WebRtc_Word16
-ACMG729_1::InternalCreateDecoder()
-{
- return -1;
+WebRtc_Word16 ACMG729_1::InternalCreateDecoder() {
+ return -1;
}
-
-void
-ACMG729_1::DestructDecoderSafe()
-{
- return;
+void ACMG729_1::DestructDecoderSafe() {
+ return;
}
-
-void
-ACMG729_1::InternalDestructEncoderInst(
- void* /* ptrInst */)
-{
- return;
+void ACMG729_1::InternalDestructEncoderInst(void* /* ptrInst */) {
+ return;
}
-WebRtc_Word16
-ACMG729_1::SetBitRateSafe(
- const WebRtc_Word32 /*rate*/ )
-{
+WebRtc_Word16 ACMG729_1::SetBitRateSafe(const WebRtc_Word32 /*rate*/) {
return -1;
}
@@ -168,304 +111,233 @@
return;
}
-ACMG729_1::~ACMG729_1()
-{
- if(_encoderInstPtr != NULL)
- {
- WebRtcG7291_Free(_encoderInstPtr);
- _encoderInstPtr = NULL;
- }
- if(_decoderInstPtr != NULL)
- {
- WebRtcG7291_Free(_decoderInstPtr);
- _decoderInstPtr = NULL;
- }
- return;
+ACMG729_1::~ACMG729_1() {
+ if (_encoderInstPtr != NULL) {
+ WebRtcG7291_Free(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if (_decoderInstPtr != NULL) {
+ WebRtcG7291_Free(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
}
+WebRtc_Word16 ACMG729_1::InternalEncode(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte) {
-WebRtc_Word16
-ACMG729_1::InternalEncode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte)
-{
-
- // Initialize before entering the loop
- WebRtc_Word16 noEncodedSamples = 0;
- *bitStreamLenByte = 0;
+ // Initialize before entering the loop
+ WebRtc_Word16 noEncodedSamples = 0;
+ *bitStreamLenByte = 0;
WebRtc_Word16 byteLengthFrame = 0;
- // Derive number of 20ms frames per encoded packet.
+ // Derive number of 20ms frames per encoded packet.
// [1,2,3] <=> [20,40,60]ms <=> [320,640,960] samples
- WebRtc_Word16 n20msFrames = (_frameLenSmpl / 320);
- // Byte length for the frame. +1 is for rate information.
- byteLengthFrame = _myRate/(8*50) * n20msFrames + (1 - _flagG729mode);
+ WebRtc_Word16 n20msFrames = (_frameLenSmpl / 320);
+ // Byte length for the frame. +1 is for rate information.
+ byteLengthFrame = _myRate / (8 * 50) * n20msFrames + (1 - _flagG729mode);
- // The following might be revised if we have G729.1 Annex C (support for DTX);
- do
- {
- *bitStreamLenByte = WebRtcG7291_Encode(_encoderInstPtr, &_inAudio[_inAudioIxRead],
- (WebRtc_Word16*)bitStream, _myRate, n20msFrames);
+ // The following might be revised if we have G729.1 Annex C (support for DTX);
+ do {
+ *bitStreamLenByte = WebRtcG7291_Encode(_encoderInstPtr,
+ &_inAudio[_inAudioIxRead],
+ (WebRtc_Word16*) bitStream, _myRate,
+ n20msFrames);
- // increment the read index this tell the caller that how far
- // we have gone forward in reading the audio buffer
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
_inAudioIxRead += 160;
- // sanity check
- if(*bitStreamLenByte < 0)
- {
+ // sanity check
+ if (*bitStreamLenByte < 0) {
// error has happened
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalEncode: Encode error for G729_1");
- *bitStreamLenByte = 0;
- return -1;
- }
-
- noEncodedSamples += 160;
- } while(*bitStreamLenByte == 0);
-
-
- // This criteria will change if we have Annex C.
- if(*bitStreamLenByte != byteLengthFrame)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalEncode: Encode error for G729_1");
- *bitStreamLenByte = 0;
- return -1;
- }
-
-
- if(noEncodedSamples != _frameLenSmpl)
- {
- *bitStreamLenByte = 0;
- return -1;
- }
-
- return *bitStreamLenByte;
-}
-
-
-WebRtc_Word16
-ACMG729_1::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
- return 0;
-}
-
-
-WebRtc_Word16
-ACMG729_1::InternalInitEncoder(
- WebRtcACMCodecParams* codecParams)
-{
- //set the bit rate and initialize
- _myRate = codecParams->codecInstant.rate;
- return SetBitRateSafe( (WebRtc_UWord32)_myRate);
-}
-
-
-WebRtc_Word16
-ACMG729_1::InternalInitDecoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- if (WebRtcG7291_DecoderInit(_decoderInstPtr) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalInitDecoder: init decoder failed for G729_1");
- return -1;
- }
- return 0;
-}
-
-
-WebRtc_Word32
-ACMG729_1::CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst)
-{
- if (!_decoderInitialized)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "CodeDef: Decoder uninitialized for G729_1");
+ "InternalEncode: Encode error for G729_1");
+ *bitStreamLenByte = 0;
return -1;
}
- // Fill up the structure by calling
- // "SET_CODEC_PAR" & "SET_G729_FUNCTION."
- // Then call NetEQ to add the codec to it's
- // database.
- SET_CODEC_PAR((codecDef), kDecoderG729_1, codecInst.pltype,
- _decoderInstPtr, 16000);
- SET_G729_1_FUNCTIONS((codecDef));
- return 0;
-}
+ noEncodedSamples += 160;
+ } while (*bitStreamLenByte == 0);
-
-ACMGenericCodec*
-ACMG729_1::CreateInstance(void)
-{
- return NULL;
-}
-
-
-WebRtc_Word16
-ACMG729_1::InternalCreateEncoder()
-{
- if (WebRtcG7291_Create(&_encoderInstPtr) < 0)
- {
+ // This criteria will change if we have Annex C.
+ if (*bitStreamLenByte != byteLengthFrame) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalCreateEncoder: create encoder failed for G729_1");
+ "InternalEncode: Encode error for G729_1");
+ *bitStreamLenByte = 0;
+ return -1;
+ }
+
+ if (noEncodedSamples != _frameLenSmpl) {
+ *bitStreamLenByte = 0;
+ return -1;
+ }
+
+ return *bitStreamLenByte;
+}
+
+WebRtc_Word16 ACMG729_1::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ return 0;
+}
+
+WebRtc_Word16 ACMG729_1::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams) {
+ //set the bit rate and initialize
+ _myRate = codecParams->codecInstant.rate;
+ return SetBitRateSafe((WebRtc_UWord32) _myRate);
+}
+
+WebRtc_Word16 ACMG729_1::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ if (WebRtcG7291_DecoderInit(_decoderInstPtr) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitDecoder: init decoder failed for G729_1");
return -1;
}
return 0;
}
+WebRtc_Word32 ACMG729_1::CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst) {
+ if (!_decoderInitialized) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CodeDef: Decoder uninitialized for G729_1");
+ return -1;
+ }
-void
-ACMG729_1::DestructEncoderSafe()
-{
- _encoderExist = false;
- _encoderInitialized = false;
- if(_encoderInstPtr != NULL)
- {
- WebRtcG7291_Free(_encoderInstPtr);
- _encoderInstPtr = NULL;
- }
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_G729_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderG729_1, codecInst.pltype, _decoderInstPtr,
+ 16000);
+ SET_G729_1_FUNCTIONS((codecDef));
+ return 0;
}
+ACMGenericCodec* ACMG729_1::CreateInstance(void) {
+ return NULL;
+}
-WebRtc_Word16
-ACMG729_1::InternalCreateDecoder()
-{
- if (WebRtcG7291_Create(&_decoderInstPtr) < 0)
- {
+WebRtc_Word16 ACMG729_1::InternalCreateEncoder() {
+ if (WebRtcG7291_Create(&_encoderInstPtr) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateEncoder: create encoder failed for G729_1");
+ return -1;
+ }
+ return 0;
+}
+
+void ACMG729_1::DestructEncoderSafe() {
+ _encoderExist = false;
+ _encoderInitialized = false;
+ if (_encoderInstPtr != NULL) {
+ WebRtcG7291_Free(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+}
+
+WebRtc_Word16 ACMG729_1::InternalCreateDecoder() {
+ if (WebRtcG7291_Create(&_decoderInstPtr) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateDecoder: create decoder failed for G729_1");
+ return -1;
+ }
+ return 0;
+}
+
+void ACMG729_1::DestructDecoderSafe() {
+ _decoderExist = false;
+ _decoderInitialized = false;
+ if (_decoderInstPtr != NULL) {
+ WebRtcG7291_Free(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+}
+
+void ACMG729_1::InternalDestructEncoderInst(void* ptrInst) {
+ if (ptrInst != NULL) {
+ //WebRtcG7291_Free((G729_1_inst_t*)ptrInst);
+ }
+ return;
+}
+
+WebRtc_Word16 ACMG729_1::SetBitRateSafe(const WebRtc_Word32 rate) {
+ //allowed rates: { 8000, 12000, 14000, 16000, 18000, 20000,
+ // 22000, 24000, 26000, 28000, 30000, 32000};
+ // TODO(tlegrand): This check exists in one other place two. Should be
+ // possible to reuse code.
+ switch (rate) {
+ case 8000: {
+ _myRate = 8000;
+ break;
+ }
+ case 12000: {
+ _myRate = 12000;
+ break;
+ }
+ case 14000: {
+ _myRate = 14000;
+ break;
+ }
+ case 16000: {
+ _myRate = 16000;
+ break;
+ }
+ case 18000: {
+ _myRate = 18000;
+ break;
+ }
+ case 20000: {
+ _myRate = 20000;
+ break;
+ }
+ case 22000: {
+ _myRate = 22000;
+ break;
+ }
+ case 24000: {
+ _myRate = 24000;
+ break;
+ }
+ case 26000: {
+ _myRate = 26000;
+ break;
+ }
+ case 28000: {
+ _myRate = 28000;
+ break;
+ }
+ case 30000: {
+ _myRate = 30000;
+ break;
+ }
+ case 32000: {
+ _myRate = 32000;
+ break;
+ }
+ default: {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalCreateDecoder: create decoder failed for G729_1");
- return -1;
- }
- return 0;
+ "SetBitRateSafe: Invalid rate G729_1");
+ return -1;
+ }
+ }
+
+ // Re-init with new rate
+ if (WebRtcG7291_EncoderInit(_encoderInstPtr, _myRate, _flag8kHz,
+ _flagG729mode) >= 0) {
+ _encoderParams.codecInstant.rate = _myRate;
+ return 0;
+ } else {
+ return -1;
+ }
}
-
-void
-ACMG729_1::DestructDecoderSafe()
-{
- _decoderExist = false;
- _decoderInitialized = false;
- if(_decoderInstPtr != NULL)
- {
- WebRtcG7291_Free(_decoderInstPtr);
- _decoderInstPtr = NULL;
- }
-}
-
-
-void
-ACMG729_1::InternalDestructEncoderInst(
- void* ptrInst)
-{
- if(ptrInst != NULL)
- {
- //WebRtcG7291_Free((G729_1_inst_t*)ptrInst);
- }
- return;
-}
-
-WebRtc_Word16
-ACMG729_1::SetBitRateSafe(
- const WebRtc_Word32 rate)
-{
- //allowed rates: { 8000, 12000, 14000, 16000, 18000, 20000,
- // 22000, 24000, 26000, 28000, 30000, 32000};
- // TODO(tlegrand): This check exists in one other place two. Should be
- // possible to reuse code.
- switch(rate)
- {
- case 8000:
- {
- _myRate = 8000;
- break;
- }
- case 12000:
- {
- _myRate = 12000;
- break;
- }
- case 14000:
- {
- _myRate = 14000;
- break;
- }
- case 16000:
- {
- _myRate = 16000;
- break;
- }
- case 18000:
- {
- _myRate = 18000;
- break;
- }
- case 20000:
- {
- _myRate = 20000;
- break;
- }
- case 22000:
- {
- _myRate = 22000;
- break;
- }
- case 24000:
- {
- _myRate = 24000;
- break;
- }
- case 26000:
- {
- _myRate = 26000;
- break;
- }
- case 28000:
- {
- _myRate = 28000;
- break;
- }
- case 30000:
- {
- _myRate = 30000;
- break;
- }
- case 32000:
- {
- _myRate = 32000;
- break;
- }
- default:
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "SetBitRateSafe: Invalid rate G729_1");
- return -1;
- }
- }
-
- // Re-init with new rate
- if (WebRtcG7291_EncoderInit(_encoderInstPtr, _myRate, _flag8kHz, _flagG729mode) >= 0)
- {
- _encoderParams.codecInstant.rate = _myRate;
- return 0;
- }
- else
- {
- return -1;
- }
-}
-
-
#endif
-} // namespace webrtc
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_g7291.h b/modules/audio_coding/main/source/acm_g7291.h
index 1d03250..35f07ff 100644
--- a/modules/audio_coding/main/source/acm_g7291.h
+++ b/modules/audio_coding/main/source/acm_g7291.h
@@ -19,59 +19,50 @@
namespace webrtc {
-class ACMG729_1: public ACMGenericCodec
-{
-public:
- ACMG729_1(WebRtc_Word16 codecID);
- ~ACMG729_1();
- // for FEC
- ACMGenericCodec* CreateInstance(void);
+class ACMG729_1 : public ACMGenericCodec {
+ public:
+ ACMG729_1(WebRtc_Word16 codecID);
+ ~ACMG729_1();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
- WebRtc_Word16 InternalEncode(
- WebRtc_UWord8* bitstream,
- WebRtc_Word16* bitStreamLenByte);
+ WebRtc_Word16 InternalEncode(WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
- WebRtc_Word16 InternalInitEncoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitEncoder(WebRtcACMCodecParams *codecParams);
- WebRtc_Word16 InternalInitDecoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitDecoder(WebRtcACMCodecParams *codecParams);
-protected:
- WebRtc_Word16 DecodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType);
+ protected:
+ WebRtc_Word16 DecodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
- WebRtc_Word32 CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst);
+ WebRtc_Word32 CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
- void DestructEncoderSafe();
+ void DestructEncoderSafe();
- void DestructDecoderSafe();
+ void DestructDecoderSafe();
- WebRtc_Word16 InternalCreateEncoder();
+ WebRtc_Word16 InternalCreateEncoder();
- WebRtc_Word16 InternalCreateDecoder();
+ WebRtc_Word16 InternalCreateDecoder();
- void InternalDestructEncoderInst(
- void* ptrInst);
+ void InternalDestructEncoderInst(void* ptrInst);
- WebRtc_Word16 SetBitRateSafe(
- const WebRtc_Word32 rate);
+ WebRtc_Word16 SetBitRateSafe(const WebRtc_Word32 rate);
- G729_1_inst_t_* _encoderInstPtr;
- G729_1_inst_t_* _decoderInstPtr;
+ G729_1_inst_t_* _encoderInstPtr;
+ G729_1_inst_t_* _decoderInstPtr;
- WebRtc_UWord16 _myRate;
- WebRtc_Word16 _flag8kHz;
- WebRtc_Word16 _flagG729mode;
-
+ WebRtc_UWord16 _myRate;
+ WebRtc_Word16 _flag8kHz;
+ WebRtc_Word16 _flagG729mode;
};
-} // namespace webrtc
+} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_G729_1_H_
diff --git a/modules/audio_coding/main/source/acm_generic_codec.cc b/modules/audio_coding/main/source/acm_generic_codec.cc
index f98f260..d3dfe1f 100644
--- a/modules/audio_coding/main/source/acm_generic_codec.cc
+++ b/modules/audio_coding/main/source/acm_generic_codec.cc
@@ -19,14 +19,12 @@
#include "webrtc_vad.h"
#include "webrtc_cng.h"
-namespace webrtc
-{
+namespace webrtc {
// Enum for CNG
-enum
-{
- kMaxPLCParamsCNG = WEBRTC_CNG_MAX_LPC_ORDER,
- kNewCNGNumPLCParams = 8
+enum {
+ kMaxPLCParamsCNG = WEBRTC_CNG_MAX_LPC_ORDER,
+ kNewCNGNumPLCParams = 8
};
#define ACM_SID_INTERVAL_MSEC 100
@@ -78,1469 +76,1177 @@
_decoderParams.codecInstant.pltype = -1;
}
-ACMGenericCodec::~ACMGenericCodec()
-{
- // Check all the members which are pointers and
- // if they are not NULL delete/free them.
+ACMGenericCodec::~ACMGenericCodec() {
+ // Check all the members which are pointers and
+ // if they are not NULL delete/free them.
- if(_ptrVADInst != NULL)
- {
- WebRtcVad_Free(_ptrVADInst);
- _ptrVADInst = NULL;
- }
+ if (_ptrVADInst != NULL) {
+ WebRtcVad_Free(_ptrVADInst);
+ _ptrVADInst = NULL;
+ }
- if (_inAudio != NULL)
- {
- delete [] _inAudio;
- _inAudio = NULL;
- }
+ if (_inAudio != NULL) {
+ delete[] _inAudio;
+ _inAudio = NULL;
+ }
- if (_inTimestamp != NULL)
- {
- delete [] _inTimestamp;
- _inTimestamp = NULL;
- }
- if(_ptrDTXInst != NULL)
- {
- WebRtcCng_FreeEnc(_ptrDTXInst);
- _ptrDTXInst = NULL;
- }
- delete &_codecWrapperLock;
+ if (_inTimestamp != NULL) {
+ delete[] _inTimestamp;
+ _inTimestamp = NULL;
+ }
+ if (_ptrDTXInst != NULL) {
+ WebRtcCng_FreeEnc(_ptrDTXInst);
+ _ptrDTXInst = NULL;
+ }
+ delete &_codecWrapperLock;
}
-WebRtc_Word32
-ACMGenericCodec::Add10MsData(
- const WebRtc_UWord32 timestamp,
- const WebRtc_Word16* data,
- const WebRtc_UWord16 lengthSmpl,
- const WebRtc_UWord8 audioChannel)
-{
- WriteLockScoped wl(_codecWrapperLock);
- return Add10MsDataSafe(timestamp, data, lengthSmpl, audioChannel);
+WebRtc_Word32 ACMGenericCodec::Add10MsData(const WebRtc_UWord32 timestamp,
+ const WebRtc_Word16* data,
+ const WebRtc_UWord16 lengthSmpl,
+ const WebRtc_UWord8 audioChannel) {
+ WriteLockScoped wl(_codecWrapperLock);
+ return Add10MsDataSafe(timestamp, data, lengthSmpl, audioChannel);
}
-WebRtc_Word32
-ACMGenericCodec::Add10MsDataSafe(
- const WebRtc_UWord32 timestamp,
- const WebRtc_Word16* data,
- const WebRtc_UWord16 lengthSmpl,
- const WebRtc_UWord8 audioChannel)
-{
- // The codec expects to get data in correct sampling rate.
- // get the sampling frequency of the codec
- WebRtc_UWord16 plFreqHz;
+WebRtc_Word32 ACMGenericCodec::Add10MsDataSafe(
+ const WebRtc_UWord32 timestamp, const WebRtc_Word16* data,
+ const WebRtc_UWord16 lengthSmpl, const WebRtc_UWord8 audioChannel) {
+ // The codec expects to get data in correct sampling rate.
+ // get the sampling frequency of the codec
+ WebRtc_UWord16 plFreqHz;
- if(EncoderSampFreq(plFreqHz) < 0)
- {
- // _codecID is not correct, perhaps the codec is not initialized yet.
- return -1;
+ if (EncoderSampFreq(plFreqHz) < 0) {
+ // _codecID is not correct, perhaps the codec is not initialized yet.
+ return -1;
+ }
+
+ // Sanity check, if the length of the input corresponds to 10 ms.
+ if ((plFreqHz / 100) != lengthSmpl) {
+ // This is not 10 ms of audio, given the sampling frequency of the
+ // codec
+ return -1;
+ }
+ if (_lastTimestamp == timestamp) {
+ // Same timestamp as the last time, overwrite.
+ if ((_inAudioIxWrite >= lengthSmpl * audioChannel) &&
+ (_inTimestampIxWrite > 0)) {
+ _inAudioIxWrite -= lengthSmpl * audioChannel;
+ _inTimestampIxWrite--;
+ WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _uniqueID,
+ "Adding 10ms with previous timestamp, overwriting the previous 10ms");
+ } else {
+ WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _uniqueID,
+ "Adding 10ms with previous timestamp, this will sound bad");
}
+ }
- // Sanity check, if the length of the input corresponds to 10 ms.
- if((plFreqHz / 100) != lengthSmpl)
- {
- // This is not 10 ms of audio, given the sampling frequency of the
- // codec
- return -1;
- }
- if(_lastTimestamp == timestamp)
- {
- // Same timestamp as the last time, overwrite.
- if((_inAudioIxWrite >= lengthSmpl * audioChannel) &&
- (_inTimestampIxWrite > 0))
- {
- _inAudioIxWrite -= lengthSmpl * audioChannel;
- _inTimestampIxWrite--;
- WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _uniqueID,
- "Adding 10ms with previous timestamp, \
-overwriting the previous 10ms");
- }
- else
- {
- WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _uniqueID,
- "Adding 10ms with previous timestamp, this will sound bad");
- }
- }
+ _lastTimestamp = timestamp;
- _lastTimestamp = timestamp;
+ if ((_inAudioIxWrite + lengthSmpl * audioChannel) > AUDIO_BUFFER_SIZE_W16) {
+ // Get the number of samples to be overwritten
+ WebRtc_Word16 missedSamples = _inAudioIxWrite + lengthSmpl * audioChannel-
+ AUDIO_BUFFER_SIZE_W16;
- if ((_inAudioIxWrite + lengthSmpl*audioChannel) > AUDIO_BUFFER_SIZE_W16)
- {
- // Get the number of samples to be overwritten
- WebRtc_Word16 missedSamples = _inAudioIxWrite + lengthSmpl*audioChannel -
- AUDIO_BUFFER_SIZE_W16;
+ // Move the data (overwite the old data)
+ memmove(_inAudio, _inAudio + missedSamples,
+ (AUDIO_BUFFER_SIZE_W16 - lengthSmpl * audioChannel) *
+ sizeof(WebRtc_Word16));
+ // Copy the new data
+ memcpy(_inAudio + (AUDIO_BUFFER_SIZE_W16 - lengthSmpl * audioChannel), data,
+ lengthSmpl * audioChannel * sizeof(WebRtc_Word16));
- // Move the data (overwite the old data)
- memmove(_inAudio, _inAudio + missedSamples,
- (AUDIO_BUFFER_SIZE_W16 - lengthSmpl*audioChannel)*sizeof(WebRtc_Word16));
- // Copy the new data
- memcpy(_inAudio + (AUDIO_BUFFER_SIZE_W16 - lengthSmpl*audioChannel), data,
- lengthSmpl*audioChannel * sizeof(WebRtc_Word16));
+ // Get the number of 10 ms blocks which are overwritten
+ WebRtc_Word16 missed10MsecBlocks = (WebRtc_Word16)(
+ (missedSamples / audioChannel * 100) / plFreqHz);
- // Get the number of 10 ms blocks which are overwritten
- WebRtc_Word16 missed10MsecBlocks =
- (WebRtc_Word16)((missedSamples/audioChannel * 100) / plFreqHz);
-
- // Move the timestamps
- memmove(_inTimestamp, _inTimestamp + missed10MsecBlocks,
- (_inTimestampIxWrite - missed10MsecBlocks) * sizeof(WebRtc_UWord32));
- _inTimestampIxWrite -= missed10MsecBlocks;
- _inTimestamp[_inTimestampIxWrite] = timestamp;
- _inTimestampIxWrite++;
-
- // Buffer is full
- _inAudioIxWrite = AUDIO_BUFFER_SIZE_W16;
- IncreaseNoMissedSamples(missedSamples);
- _isAudioBuffFresh = false;
- return -missedSamples;
- }
- memcpy(_inAudio + _inAudioIxWrite, data, lengthSmpl*audioChannel * sizeof(WebRtc_Word16));
- _inAudioIxWrite += lengthSmpl*audioChannel;
-
- assert(_inTimestampIxWrite < TIMESTAMP_BUFFER_SIZE_W32);
- assert(_inTimestampIxWrite >= 0);
-
+ // Move the timestamps
+ memmove(_inTimestamp, _inTimestamp + missed10MsecBlocks,
+ (_inTimestampIxWrite - missed10MsecBlocks) *
+ sizeof(WebRtc_UWord32));
+ _inTimestampIxWrite -= missed10MsecBlocks;
_inTimestamp[_inTimestampIxWrite] = timestamp;
_inTimestampIxWrite++;
+
+ // Buffer is full
+ _inAudioIxWrite = AUDIO_BUFFER_SIZE_W16;
+ IncreaseNoMissedSamples(missedSamples);
_isAudioBuffFresh = false;
+ return -missedSamples;
+ }
+ memcpy(_inAudio + _inAudioIxWrite, data,
+ lengthSmpl * audioChannel * sizeof(WebRtc_Word16));
+ _inAudioIxWrite += lengthSmpl * audioChannel;
+
+ assert(_inTimestampIxWrite < TIMESTAMP_BUFFER_SIZE_W32);
+ assert(_inTimestampIxWrite >= 0);
+
+ _inTimestamp[_inTimestampIxWrite] = timestamp;
+ _inTimestampIxWrite++;
+ _isAudioBuffFresh = false;
+ return 0;
+}
+
+WebRtc_Word16 ACMGenericCodec::Encode(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_UWord32* timeStamp,
+ WebRtcACMEncodingType* encodingType) {
+ WriteLockScoped lockCodec(_codecWrapperLock);
+ ReadLockScoped lockNetEq(*_netEqDecodeLock);
+ return EncodeSafe(bitStream, bitStreamLenByte, timeStamp, encodingType);
+}
+
+WebRtc_Word16 ACMGenericCodec::EncodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_UWord32* timeStamp,
+ WebRtcACMEncodingType* encodingType) {
+ // Do we have enough data to encode?
+ // we wait until we have a full frame to encode.
+ if (_inAudioIxWrite < _frameLenSmpl * _noChannels) {
+ // There is not enough audio
+ *timeStamp = 0;
+ *bitStreamLenByte = 0;
+ // Doesn't really matter what this parameter set to
+ *encodingType = kNoEncoding;
return 0;
-}
+ }
-WebRtc_Word16
-ACMGenericCodec::Encode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte,
- WebRtc_UWord32* timeStamp,
- WebRtcACMEncodingType* encodingType)
-{
- WriteLockScoped lockCodec(_codecWrapperLock);
- ReadLockScoped lockNetEq(*_netEqDecodeLock);
- return EncodeSafe(bitStream, bitStreamLenByte,
- timeStamp, encodingType);
-}
+ // Not all codecs accept the whole frame to be pushed into
+ // encoder at once.
+ const WebRtc_Word16 myBasicCodingBlockSmpl =
+ ACMCodecDB::BasicCodingBlock(_codecID);
+ if ((myBasicCodingBlockSmpl < 0) || (!_encoderInitialized) ||
+ (!_encoderExist)) {
+ // This should not happen
+ *timeStamp = 0;
+ *bitStreamLenByte = 0;
+ *encodingType = kNoEncoding;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EncodeSafe: error, basic coding sample block is negative");
+ return -1;
+ }
+ // This makes the internal encoder read from the begining of the buffer
+ _inAudioIxRead = 0;
+ *timeStamp = _inTimestamp[0];
-WebRtc_Word16
-ACMGenericCodec::EncodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte,
- WebRtc_UWord32* timeStamp,
- WebRtcACMEncodingType* encodingType)
-{
- // Do we have enough data to encode?
- // we wait until we have a full frame to encode.
- if(_inAudioIxWrite < _frameLenSmpl*_noChannels)
- {
- // There is not enough audio
- *timeStamp = 0;
- *bitStreamLenByte = 0;
- // Doesn't really matter what this parameter set to
- *encodingType = kNoEncoding;
- return 0;
- }
+ // Process the audio through VAD the function doesn't set _vadLabels.
+ // If VAD is disabled all labels are set to ONE (active)
+ WebRtc_Word16 status = 0;
+ WebRtc_Word16 dtxProcessedSamples = 0;
- // Not all codecs accept the whole frame to be pushed into
- // encoder at once.
- const WebRtc_Word16 myBasicCodingBlockSmpl =
- ACMCodecDB::BasicCodingBlock(_codecID);
- if((myBasicCodingBlockSmpl < 0) ||
- (!_encoderInitialized) ||
- (!_encoderExist))
- {
- // This should not happen
- *timeStamp = 0;
- *bitStreamLenByte = 0;
- *encodingType = kNoEncoding;
+ status = ProcessFrameVADDTX(bitStream, bitStreamLenByte,
+ &dtxProcessedSamples);
+
+ if (status < 0) {
+ *timeStamp = 0;
+ *bitStreamLenByte = 0;
+ *encodingType = kNoEncoding;
+ } else {
+ if (dtxProcessedSamples > 0) {
+ // Dtx have processed some samples may or may not a bit-stream
+ // is generated we should not do any encoding (normally there
+ // will be not enough data)
+
+ // Setting the following makes that the move of audio data
+ // and timestamps happen correctly
+ _inAudioIxRead = dtxProcessedSamples;
+ // This will let the owner of ACMGenericCodec to know that the
+ // generated bit-stream is DTX to use correct payload type
+ WebRtc_UWord16 sampFreqHz;
+ EncoderSampFreq(sampFreqHz);
+ if (sampFreqHz == 8000) {
+ *encodingType = kPassiveDTXNB;
+ } else if (sampFreqHz == 16000) {
+ *encodingType = kPassiveDTXWB;
+ } else if (sampFreqHz == 32000) {
+ *encodingType = kPassiveDTXSWB;
+ } else if (sampFreqHz == 48000) {
+ *encodingType = kPassiveDTXFB;
+ } else {
+ status = -1;
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "EncodeSafe: error, basic coding sample block is negative");
- return -1;
- }
+ "EncodeSafe: Wrong sampling frequency for DTX.");
+ }
- // This makes the internal encoder read from the begining of the buffer
- _inAudioIxRead = 0;
- *timeStamp = _inTimestamp[0];
-
- // Process the audio through VAD the function doesn't set _vadLabels.
- // If VAD is disabled all labels are set to ONE (active)
- WebRtc_Word16 status = 0;
- WebRtc_Word16 dtxProcessedSamples = 0;
-
- status = ProcessFrameVADDTX(bitStream, bitStreamLenByte,
- &dtxProcessedSamples);
-
- if(status < 0)
- {
- *timeStamp = 0;
- *bitStreamLenByte = 0;
+ // Transport empty frame if we have an empty bitstream
+ if ((*bitStreamLenByte == 0) &&
+ (_sentCNPrevious || ((_inAudioIxWrite - _inAudioIxRead) <= 0))) {
+ // Makes sure we transmit an empty frame
+ *bitStreamLenByte = 1;
*encodingType = kNoEncoding;
- }
- else
- {
- if(dtxProcessedSamples > 0)
- {
- // Dtx have processed some samples may or may not a bit-stream
- // is generated we should not do any encoding (normally there
- // will be not enough data)
+ }
+ _sentCNPrevious = true;
+ } else {
+ _sentCNPrevious = false;
+ // This will let the caller of the method to know if the frame is
+ // Active or non-Active The caller of the method knows that the
+ // stream is encoded by codec and can use the info for callbacks,
+ // if any registered.
+ if (myBasicCodingBlockSmpl == 0) {
+ // This codec can handle all allowed frame sizes as basic
+ // coding block
+ status = InternalEncode(bitStream, bitStreamLenByte);
- // Setting the following makes that the move of audio data
- // and timestamps happen correctly
- _inAudioIxRead = dtxProcessedSamples;
- // This will let the owner of ACMGenericCodec to know that the
- // generated bit-stream is DTX to use correct payload type
- WebRtc_UWord16 sampFreqHz;
- EncoderSampFreq(sampFreqHz);
- if (sampFreqHz == 8000) {
- *encodingType = kPassiveDTXNB;
- } else if (sampFreqHz == 16000) {
- *encodingType = kPassiveDTXWB;
- } else if (sampFreqHz == 32000) {
- *encodingType = kPassiveDTXSWB;
- } else if (sampFreqHz == 48000) {
- *encodingType = kPassiveDTXFB;
- } else {
- status = -1;
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "EncodeSafe: Wrong sampling frequency for DTX.");
- }
-
- // Transport empty frame if we have an empty bitstream
- if ((*bitStreamLenByte == 0)
- && (_sentCNPrevious || ((_inAudioIxWrite - _inAudioIxRead) <= 0))
- )
- {
- // Makes sure we transmit an empty frame
- *bitStreamLenByte = 1;
- *encodingType = kNoEncoding;
- }
- _sentCNPrevious = true;
+ if (status < 0) {
+ // TODO(tlegrand): Maybe reseting the encoder to be fresh for the next
+ // frame.
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding,
+ _uniqueID, "EncodeSafe: error in internalEncode");
+ *bitStreamLenByte = 0;
+ *encodingType = kNoEncoding;
}
- else
- {
- _sentCNPrevious = false;
- // This will let the caller of the method to know if the frame is
- // Active or non-Active The caller of the method knows that the
- // stream is encoded by codec and can use the info for callbacks,
- // if any registered.
- if(myBasicCodingBlockSmpl == 0)
- {
- // This codec can handle all allowed frame sizes as basic
- // coding block
- status = InternalEncode(bitStream, bitStreamLenByte);
+ } else {
+ // A basic-coding-block for this codec is defined so we loop
+ // over the audio with the steps of the basic-coding-block.
+ // It is not necessary that in each itteration
+ WebRtc_Word16 tmpBitStreamLenByte;
- if(status < 0)
- {
- // TODO:
- // Maybe reseting the encoder to be fresh for the next
- // frame
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "EncodeSafe: error in internalEncode");
- *bitStreamLenByte = 0;
- *encodingType = kNoEncoding;
- }
- }
- else
- {
- // A basic-coding-block for this codec is defined so we loop
- // over the audio with the steps of the basic-coding-block.
- // It is not necessary that in each itteration
- WebRtc_Word16 tmpBitStreamLenByte;
+ // Reset the variables which will be increamented in the loop
+ *bitStreamLenByte = 0;
+ bool done = false;
+ while (!done) {
+ status = InternalEncode(&bitStream[*bitStreamLenByte],
+ &tmpBitStreamLenByte);
+ *bitStreamLenByte += tmpBitStreamLenByte;
- // Reset the variables which will be increamented in the loop
- *bitStreamLenByte = 0;
- bool done = false;
- while(!done)
- {
- status = InternalEncode(&bitStream[*bitStreamLenByte],
- &tmpBitStreamLenByte);
- *bitStreamLenByte += tmpBitStreamLenByte;
+ // Guard Against errors and too large payloads
+ if ((status < 0) || (*bitStreamLenByte > MAX_PAYLOAD_SIZE_BYTE)) {
+ // Error has happened if we are in the middle of a full
+ // frame we have to exit. Before exiting, whatever bits
+ // are in the buffer are probably corruptred. Anyways
+ // we ignore them.
+ *bitStreamLenByte = 0;
+ *encodingType = kNoEncoding;
+ // We might have come here because of the second
+ // condition.
+ status = -1;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding,
+ _uniqueID, "EncodeSafe: error in InternalEncode");
+ // break from the loop
+ break;
+ }
- // Guard Against errors and too large payloads
- if((status < 0) ||
- (*bitStreamLenByte > MAX_PAYLOAD_SIZE_BYTE))
- {
- // Error has happened if we are in the middle of a full
- // frame we have to exit. Before exiting, whatever bits
- // are in the buffer are probably corruptred. Anyways
- // we ignore them.
- *bitStreamLenByte = 0;
- *encodingType = kNoEncoding;
- // We might have come here because of the second
- // condition.
- status = -1;
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding,
- _uniqueID, "EncodeSafe: error in InternalEncode");
- // break from the loop
- break;
- }
-
- // TODO(andrew): This should be multiplied by the number of
- // channels, right?
- // http://code.google.com/p/webrtc/issues/detail?id=714
- done = _inAudioIxRead >= _frameLenSmpl;
- }
- }
- if(status >= 0)
- {
- *encodingType = (_vadLabel[0] == 1)?
- kActiveNormalEncoded:kPassiveNormalEncoded;
- // Transport empty frame if we have an empty bitsteram
- if ((*bitStreamLenByte == 0) && ((_inAudioIxWrite - _inAudioIxRead) <= 0))
- {
- // Makes sure we transmit an empty frame
- *bitStreamLenByte = 1;
- *encodingType = kNoEncoding;
- }
- }
+ // TODO(andrew): This should be multiplied by the number of
+ // channels, right?
+ // http://code.google.com/p/webrtc/issues/detail?id=714
+ done = _inAudioIxRead >= _frameLenSmpl;
}
+ }
+ if (status >= 0) {
+ *encodingType = (_vadLabel[0] == 1) ? kActiveNormalEncoded :
+ kPassiveNormalEncoded;
+ // Transport empty frame if we have an empty bitsteram
+ if ((*bitStreamLenByte == 0) &&
+ ((_inAudioIxWrite - _inAudioIxRead) <= 0)) {
+ // Makes sure we transmit an empty frame
+ *bitStreamLenByte = 1;
+ *encodingType = kNoEncoding;
+ }
+ }
}
+ }
- // Move the timestampe buffer according to the number of 10 ms blocks
- // which are read.
- WebRtc_UWord16 sampFreqHz;
- EncoderSampFreq(sampFreqHz);
+ // Move the timestampe buffer according to the number of 10 ms blocks
+ // which are read.
+ WebRtc_UWord16 sampFreqHz;
+ EncoderSampFreq(sampFreqHz);
- WebRtc_Word16 num10MsecBlocks =
- (WebRtc_Word16)((_inAudioIxRead/_noChannels * 100) / sampFreqHz);
- if(_inTimestampIxWrite > num10MsecBlocks)
- {
- memmove(_inTimestamp, _inTimestamp + num10MsecBlocks,
+ WebRtc_Word16 num10MsecBlocks = (WebRtc_Word16)(
+ (_inAudioIxRead / _noChannels * 100) / sampFreqHz);
+ if (_inTimestampIxWrite > num10MsecBlocks) {
+ memmove(_inTimestamp, _inTimestamp + num10MsecBlocks,
(_inTimestampIxWrite - num10MsecBlocks) * sizeof(WebRtc_Word32));
- }
- _inTimestampIxWrite -= num10MsecBlocks;
+ }
+ _inTimestampIxWrite -= num10MsecBlocks;
- // We have to move the audio that is not encoded to the beginning
- // of the buffer and accordingly adjust the read and write indices.
- if(_inAudioIxRead < _inAudioIxWrite)
- {
- memmove(_inAudio, &_inAudio[_inAudioIxRead],
- (_inAudioIxWrite - _inAudioIxRead)*sizeof(WebRtc_Word16));
- }
+ // We have to move the audio that is not encoded to the beginning
+ // of the buffer and accordingly adjust the read and write indices.
+ if (_inAudioIxRead < _inAudioIxWrite) {
+ memmove(_inAudio, &_inAudio[_inAudioIxRead],
+ (_inAudioIxWrite - _inAudioIxRead) * sizeof(WebRtc_Word16));
+ }
- _inAudioIxWrite -= _inAudioIxRead;
+ _inAudioIxWrite -= _inAudioIxRead;
- _inAudioIxRead = 0;
- _lastEncodedTimestamp = *timeStamp;
- return (status < 0) ? (-1):(*bitStreamLenByte);
+ _inAudioIxRead = 0;
+ _lastEncodedTimestamp = *timeStamp;
+ return (status < 0) ? (-1) : (*bitStreamLenByte);
}
-WebRtc_Word16
-ACMGenericCodec::Decode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType)
-{
- WriteLockScoped wl(_codecWrapperLock);
- return DecodeSafe(bitStream, bitStreamLenByte, audio,
- audioSamples, speechType);
+WebRtc_Word16 ACMGenericCodec::Decode(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType) {
+ WriteLockScoped wl(_codecWrapperLock);
+ return DecodeSafe(bitStream, bitStreamLenByte, audio, audioSamples,
+ speechType);
}
-bool
-ACMGenericCodec::EncoderInitialized()
-{
- ReadLockScoped rl(_codecWrapperLock);
- return _encoderInitialized;
+bool ACMGenericCodec::EncoderInitialized() {
+ ReadLockScoped rl(_codecWrapperLock);
+ return _encoderInitialized;
}
-bool
-ACMGenericCodec::DecoderInitialized()
-{
- ReadLockScoped rl(_codecWrapperLock);
- return _decoderInitialized;
+bool ACMGenericCodec::DecoderInitialized() {
+ ReadLockScoped rl(_codecWrapperLock);
+ return _decoderInitialized;
}
+WebRtc_Word32 ACMGenericCodec::RegisterInNetEq(ACMNetEQ* netEq,
+ const CodecInst& codecInst) {
+ WebRtcNetEQ_CodecDef codecDef;
+ WriteLockScoped wl(_codecWrapperLock);
-WebRtc_Word32
-ACMGenericCodec::RegisterInNetEq(
- ACMNetEQ* netEq,
- const CodecInst& codecInst)
-{
- WebRtcNetEQ_CodecDef codecDef;
- WriteLockScoped wl(_codecWrapperLock);
-
- if(CodecDef(codecDef, codecInst) < 0)
- {
- // Failed to register
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "RegisterInNetEq: error, failed to register");
- _registeredInNetEq = false;
- return -1;
+ if (CodecDef(codecDef, codecInst) < 0) {
+ // Failed to register
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "RegisterInNetEq: error, failed to register");
+ _registeredInNetEq = false;
+ return -1;
+ } else {
+ if (netEq->AddCodec(&codecDef, _isMaster) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "RegisterInNetEq: error, failed to add codec");
+ _registeredInNetEq = false;
+ return -1;
}
- else
- {
- if(netEq->AddCodec(&codecDef, _isMaster) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "RegisterInNetEq: error, failed to add codec");
- _registeredInNetEq = false;
- return -1;
- }
- // Registered
- _registeredInNetEq = true;
- return 0;
- }
-}
-
-WebRtc_Word16
-ACMGenericCodec::EncoderParams(
- WebRtcACMCodecParams* encParams)
-{
- ReadLockScoped rl(_codecWrapperLock);
- return EncoderParamsSafe(encParams);
-}
-
-WebRtc_Word16
-ACMGenericCodec::EncoderParamsSafe(
- WebRtcACMCodecParams* encParams)
-{
- // Codec parameters are valid only if the encoder is initialized
- if(_encoderInitialized)
- {
- WebRtc_Word32 currentRate;
- memcpy(encParams, &_encoderParams, sizeof(WebRtcACMCodecParams));
- currentRate = encParams->codecInstant.rate;
- CurrentRate(currentRate);
- encParams->codecInstant.rate = currentRate;
- return 0;
- }
- else
- {
- encParams->codecInstant.plname[0] = '\0';
- encParams->codecInstant.pltype = -1;
- encParams->codecInstant.pacsize = 0;
- encParams->codecInstant.rate = 0;
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "EncoderParamsSafe: error, encoder not initialized");
- return -1;
- }
-}
-
-bool
-ACMGenericCodec::DecoderParams(
- WebRtcACMCodecParams* decParams,
- const WebRtc_UWord8 payloadType)
-{
- ReadLockScoped rl(_codecWrapperLock);
- return DecoderParamsSafe(decParams, payloadType);
-}
-
-bool
-ACMGenericCodec::DecoderParamsSafe(
- WebRtcACMCodecParams* decParams,
- const WebRtc_UWord8 payloadType)
-{
- // Decoder parameters are valid only if decoder is initialized
- if(_decoderInitialized)
- {
- if(payloadType == _decoderParams.codecInstant.pltype)
- {
- memcpy(decParams, &_decoderParams, sizeof(WebRtcACMCodecParams));
- return true;
- }
- }
-
- decParams->codecInstant.plname[0] = '\0';
- decParams->codecInstant.pltype = -1;
- decParams->codecInstant.pacsize = 0;
- decParams->codecInstant.rate = 0;
- return false;
-}
-
-WebRtc_Word16
-ACMGenericCodec::ResetEncoder()
-{
- WriteLockScoped lockCodec(_codecWrapperLock);
- ReadLockScoped lockNetEq(*_netEqDecodeLock);
- return ResetEncoderSafe();
-}
-
-WebRtc_Word16
-ACMGenericCodec::ResetEncoderSafe()
-{
- if(!_encoderExist || !_encoderInitialized)
- {
- // We don't reset if doesn't exists or not initialized yet
- return 0;
- }
-
- _inAudioIxWrite = 0;
- _inAudioIxRead = 0;
- _inTimestampIxWrite = 0;
- _noMissedSamples = 0;
- _isAudioBuffFresh = true;
- memset(_inAudio, 0, AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
- memset(_inTimestamp, 0, TIMESTAMP_BUFFER_SIZE_W32 * sizeof(WebRtc_Word32));
-
- // Store DTX/VAD params
- bool enableVAD = _vadEnabled;
- bool enableDTX = _dtxEnabled;
- ACMVADMode mode = _vadMode;
-
- // Reset the encoder
- if(InternalResetEncoder() < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "ResetEncoderSafe: error in reset encoder");
- return -1;
- }
-
- // Disable DTX & VAD this deletes the states
- // we like to have fresh start
- DisableDTX();
- DisableVAD();
-
- // Set DTX/VAD
- return SetVADSafe(enableDTX, enableVAD, mode);
-}
-
-WebRtc_Word16
-ACMGenericCodec::InternalResetEncoder()
-{
- // For most of the codecs it is sufficient to
- // call their internal initialization.
- // There are some exceptions.
- // ----
- // For iSAC we don't want to lose BWE history,
- // so for iSAC we have to over-write this function.
- // ----
- return InternalInitEncoder(&_encoderParams);
-}
-
-WebRtc_Word16
-ACMGenericCodec::InitEncoder(
- WebRtcACMCodecParams* codecParams,
- bool forceInitialization)
-{
- WriteLockScoped lockCodec(_codecWrapperLock);
- ReadLockScoped lockNetEq(*_netEqDecodeLock);
- return InitEncoderSafe(codecParams, forceInitialization);
-}
-
-WebRtc_Word16
-ACMGenericCodec::InitEncoderSafe(
- WebRtcACMCodecParams* codecParams,
- bool forceInitialization)
-{
- // Check if we got a valid set of parameters
- int mirrorID;
- int codecNumber =
- ACMCodecDB::CodecNumber(&(codecParams->codecInstant), &mirrorID);
-
- if(codecNumber < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InitEncoderSafe: error, codec number negative");
- return -1;
- }
- // Check if the parameters are for this codec
- if((_codecID >= 0) && (_codecID != codecNumber) && (_codecID != mirrorID))
- {
- // The current codec is not the same as the one given by codecParams
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InitEncoderSafe: current codec is not the same as the one given by codecParams");
- return -1;
- }
-
- if(!CanChangeEncodingParam(codecParams->codecInstant))
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InitEncoderSafe: cannot change encoding parameters");
- return -1;
- }
-
- if(_encoderInitialized && !forceInitialization)
- {
- // The encoder is already initialized
- return 0;
- }
- WebRtc_Word16 status;
- if(!_encoderExist)
- {
- _encoderInitialized = false;
- status = CreateEncoder();
- if(status < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InitEncoderSafe: cannot create encoder");
- return -1;
- }
- else
- {
- _encoderExist = true;
- }
- }
- _frameLenSmpl = (codecParams->codecInstant).pacsize;
- _noChannels = codecParams->codecInstant.channels;
- status = InternalInitEncoder(codecParams);
- if(status < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InitEncoderSafe: error in init encoder");
- _encoderInitialized = false;
- return -1;
- }
- else
- {
- memcpy(&_encoderParams, codecParams, sizeof(WebRtcACMCodecParams));
- _encoderInitialized = true;
- if(_inAudio == NULL)
- {
- _inAudio = new WebRtc_Word16[AUDIO_BUFFER_SIZE_W16];
- if(_inAudio == NULL)
- {
- return -1;
- }
- memset(_inAudio, 0, AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
- }
- if(_inTimestamp == NULL)
- {
- _inTimestamp = new WebRtc_UWord32[TIMESTAMP_BUFFER_SIZE_W32];
- if(_inTimestamp == NULL)
- {
- return -1;
- }
- memset(_inTimestamp, 0, sizeof(WebRtc_UWord32) *
- TIMESTAMP_BUFFER_SIZE_W32);
- }
- _isAudioBuffFresh = true;
- }
- status = SetVADSafe(codecParams->enableDTX, codecParams->enableVAD,
- codecParams->vadMode);
-
- return status;
-}
-
-bool
-ACMGenericCodec::CanChangeEncodingParam(
- CodecInst& /*codecInst*/)
-{
- return true;
-}
-
-WebRtc_Word16
-ACMGenericCodec::InitDecoder(
- WebRtcACMCodecParams* codecParams,
- bool forceInitialization)
-{
- WriteLockScoped lockCodc(_codecWrapperLock);
- WriteLockScoped lockNetEq(*_netEqDecodeLock);
- return InitDecoderSafe(codecParams, forceInitialization);
-}
-
-WebRtc_Word16
-ACMGenericCodec::InitDecoderSafe(
- WebRtcACMCodecParams* codecParams,
- bool forceInitialization)
-{
- int mirrorID;
- // Check if we got a valid set of parameters
- int codecNumber =
- ACMCodecDB::ReceiverCodecNumber(&codecParams->codecInstant, &mirrorID);
-
- if(codecNumber < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InitDecoderSafe: error, invalid codec number");
- return -1;
- }
- // Check if the parameters are for this codec
- if((_codecID >= 0) && (_codecID != codecNumber) && (_codecID != mirrorID))
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InitDecoderSafe: current codec is not the same as the one given "
- "by codecParams");
- // The current codec is not the same as the one given by codecParams
- return -1;
- }
-
-
- if(_decoderInitialized && !forceInitialization)
- {
- // The encoder is already initialized
- return 0;
- }
-
- WebRtc_Word16 status;
- if(!_decoderExist)
- {
- _decoderInitialized = false;
- status = CreateDecoder();
- if(status < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InitDecoderSafe: cannot create decoder");
- return -1;
- }
- else
- {
- _decoderExist = true;
- }
- }
-
- status = InternalInitDecoder(codecParams);
- if(status < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InitDecoderSafe: cannot init decoder");
- _decoderInitialized = false;
- return -1;
- }
- else
- {
- // Store the parameters
- SaveDecoderParamSafe(codecParams);
- _decoderInitialized = true;
- }
+ // Registered
+ _registeredInNetEq = true;
return 0;
+ }
}
-WebRtc_Word16
-ACMGenericCodec::ResetDecoder(WebRtc_Word16 payloadType)
-{
- WriteLockScoped lockCodec(_codecWrapperLock);
- WriteLockScoped lockNetEq(*_netEqDecodeLock);
- return ResetDecoderSafe(payloadType);
+WebRtc_Word16 ACMGenericCodec::EncoderParams(WebRtcACMCodecParams* encParams) {
+ ReadLockScoped rl(_codecWrapperLock);
+ return EncoderParamsSafe(encParams);
}
-WebRtc_Word16
-ACMGenericCodec::ResetDecoderSafe(WebRtc_Word16 payloadType)
-{
- WebRtcACMCodecParams decoderParams;
- if(!_decoderExist || !_decoderInitialized)
- {
- return 0;
+WebRtc_Word16 ACMGenericCodec::EncoderParamsSafe(
+ WebRtcACMCodecParams* encParams) {
+ // Codec parameters are valid only if the encoder is initialized
+ if (_encoderInitialized) {
+ WebRtc_Word32 currentRate;
+ memcpy(encParams, &_encoderParams, sizeof(WebRtcACMCodecParams));
+ currentRate = encParams->codecInstant.rate;
+ CurrentRate(currentRate);
+ encParams->codecInstant.rate = currentRate;
+ return 0;
+ } else {
+ encParams->codecInstant.plname[0] = '\0';
+ encParams->codecInstant.pltype = -1;
+ encParams->codecInstant.pacsize = 0;
+ encParams->codecInstant.rate = 0;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EncoderParamsSafe: error, encoder not initialized");
+ return -1;
+ }
+}
+
+bool ACMGenericCodec::DecoderParams(WebRtcACMCodecParams* decParams,
+ const WebRtc_UWord8 payloadType) {
+ ReadLockScoped rl(_codecWrapperLock);
+ return DecoderParamsSafe(decParams, payloadType);
+}
+
+bool ACMGenericCodec::DecoderParamsSafe(WebRtcACMCodecParams* decParams,
+ const WebRtc_UWord8 payloadType) {
+ // Decoder parameters are valid only if decoder is initialized
+ if (_decoderInitialized) {
+ if (payloadType == _decoderParams.codecInstant.pltype) {
+ memcpy(decParams, &_decoderParams, sizeof(WebRtcACMCodecParams));
+ return true;
}
- // Initialization of the decoder should work for all
- // the codec. If there is a codec that has to keep
- // some states then we need to define a virtual and
- // overwrite in that codec
- DecoderParamsSafe(&decoderParams, (WebRtc_UWord8) payloadType);
- return InternalInitDecoder(&decoderParams);
+ }
+
+ decParams->codecInstant.plname[0] = '\0';
+ decParams->codecInstant.pltype = -1;
+ decParams->codecInstant.pacsize = 0;
+ decParams->codecInstant.rate = 0;
+ return false;
}
-void
-ACMGenericCodec::ResetNoMissedSamples()
-{
- WriteLockScoped cs(_codecWrapperLock);
- _noMissedSamples = 0;
+WebRtc_Word16 ACMGenericCodec::ResetEncoder() {
+ WriteLockScoped lockCodec(_codecWrapperLock);
+ ReadLockScoped lockNetEq(*_netEqDecodeLock);
+ return ResetEncoderSafe();
}
-void
-ACMGenericCodec::IncreaseNoMissedSamples(
- const WebRtc_Word16 noSamples)
-{
- _noMissedSamples += noSamples;
+WebRtc_Word16 ACMGenericCodec::ResetEncoderSafe() {
+ if (!_encoderExist || !_encoderInitialized) {
+ // We don't reset if doesn't exists or not initialized yet
+ return 0;
+ }
+
+ _inAudioIxWrite = 0;
+ _inAudioIxRead = 0;
+ _inTimestampIxWrite = 0;
+ _noMissedSamples = 0;
+ _isAudioBuffFresh = true;
+ memset(_inAudio, 0, AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
+ memset(_inTimestamp, 0, TIMESTAMP_BUFFER_SIZE_W32 * sizeof(WebRtc_Word32));
+
+ // Store DTX/VAD params
+ bool enableVAD = _vadEnabled;
+ bool enableDTX = _dtxEnabled;
+ ACMVADMode mode = _vadMode;
+
+ // Reset the encoder
+ if (InternalResetEncoder() < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "ResetEncoderSafe: error in reset encoder");
+ return -1;
+ }
+
+ // Disable DTX & VAD this deletes the states
+ // we like to have fresh start
+ DisableDTX();
+ DisableVAD();
+
+ // Set DTX/VAD
+ return SetVADSafe(enableDTX, enableVAD, mode);
+}
+
+WebRtc_Word16 ACMGenericCodec::InternalResetEncoder() {
+ // For most of the codecs it is sufficient to
+ // call their internal initialization.
+ // There are some exceptions.
+ // ----
+ // For iSAC we don't want to lose BWE history,
+ // so for iSAC we have to over-write this function.
+ // ----
+ return InternalInitEncoder(&_encoderParams);
+}
+
+WebRtc_Word16 ACMGenericCodec::InitEncoder(WebRtcACMCodecParams* codecParams,
+ bool forceInitialization) {
+ WriteLockScoped lockCodec(_codecWrapperLock);
+ ReadLockScoped lockNetEq(*_netEqDecodeLock);
+ return InitEncoderSafe(codecParams, forceInitialization);
+}
+
+WebRtc_Word16 ACMGenericCodec::InitEncoderSafe(
+ WebRtcACMCodecParams* codecParams, bool forceInitialization) {
+ // Check if we got a valid set of parameters
+ int mirrorID;
+ int codecNumber = ACMCodecDB::CodecNumber(&(codecParams->codecInstant),
+ &mirrorID);
+
+ if (codecNumber < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitEncoderSafe: error, codec number negative");
+ return -1;
+ }
+ // Check if the parameters are for this codec
+ if ((_codecID >= 0) && (_codecID != codecNumber) && (_codecID != mirrorID)) {
+ // The current codec is not the same as the one given by codecParams
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitEncoderSafe: current codec is not the same as the one given by "
+ "codecParams");
+ return -1;
+ }
+
+ if (!CanChangeEncodingParam(codecParams->codecInstant)) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitEncoderSafe: cannot change encoding parameters");
+ return -1;
+ }
+
+ if (_encoderInitialized && !forceInitialization) {
+ // The encoder is already initialized
+ return 0;
+ }
+ WebRtc_Word16 status;
+ if (!_encoderExist) {
+ _encoderInitialized = false;
+ status = CreateEncoder();
+ if (status < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitEncoderSafe: cannot create encoder");
+ return -1;
+ } else {
+ _encoderExist = true;
+ }
+ }
+ _frameLenSmpl = (codecParams->codecInstant).pacsize;
+ _noChannels = codecParams->codecInstant.channels;
+ status = InternalInitEncoder(codecParams);
+ if (status < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitEncoderSafe: error in init encoder");
+ _encoderInitialized = false;
+ return -1;
+ } else {
+ memcpy(&_encoderParams, codecParams, sizeof(WebRtcACMCodecParams));
+ _encoderInitialized = true;
+ if (_inAudio == NULL) {
+ _inAudio = new WebRtc_Word16[AUDIO_BUFFER_SIZE_W16];
+ if (_inAudio == NULL) {
+ return -1;
+ }
+ memset(_inAudio, 0, AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
+ }
+ if (_inTimestamp == NULL) {
+ _inTimestamp = new WebRtc_UWord32[TIMESTAMP_BUFFER_SIZE_W32];
+ if (_inTimestamp == NULL) {
+ return -1;
+ }
+ memset(_inTimestamp, 0,
+ sizeof(WebRtc_UWord32) * TIMESTAMP_BUFFER_SIZE_W32);
+ }
+ _isAudioBuffFresh = true;
+ }
+ status = SetVADSafe(codecParams->enableDTX, codecParams->enableVAD,
+ codecParams->vadMode);
+
+ return status;
+}
+
+bool ACMGenericCodec::CanChangeEncodingParam(CodecInst& /*codecInst*/) {
+ return true;
+}
+
+WebRtc_Word16 ACMGenericCodec::InitDecoder(WebRtcACMCodecParams* codecParams,
+ bool forceInitialization) {
+ WriteLockScoped lockCodc(_codecWrapperLock);
+ WriteLockScoped lockNetEq(*_netEqDecodeLock);
+ return InitDecoderSafe(codecParams, forceInitialization);
+}
+
+WebRtc_Word16 ACMGenericCodec::InitDecoderSafe(
+ WebRtcACMCodecParams* codecParams, bool forceInitialization) {
+ int mirrorID;
+ // Check if we got a valid set of parameters
+ int codecNumber = ACMCodecDB::ReceiverCodecNumber(&codecParams->codecInstant,
+ &mirrorID);
+
+ if (codecNumber < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitDecoderSafe: error, invalid codec number");
+ return -1;
+ }
+ // Check if the parameters are for this codec
+ if ((_codecID >= 0) && (_codecID != codecNumber) && (_codecID != mirrorID)) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitDecoderSafe: current codec is not the same as the one given "
+ "by codecParams");
+ // The current codec is not the same as the one given by codecParams
+ return -1;
+ }
+
+ if (_decoderInitialized && !forceInitialization) {
+ // The encoder is already initialized
+ return 0;
+ }
+
+ WebRtc_Word16 status;
+ if (!_decoderExist) {
+ _decoderInitialized = false;
+ status = CreateDecoder();
+ if (status < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitDecoderSafe: cannot create decoder");
+ return -1;
+ } else {
+ _decoderExist = true;
+ }
+ }
+
+ status = InternalInitDecoder(codecParams);
+ if (status < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitDecoderSafe: cannot init decoder");
+ _decoderInitialized = false;
+ return -1;
+ } else {
+ // Store the parameters
+ SaveDecoderParamSafe(codecParams);
+ _decoderInitialized = true;
+ }
+ return 0;
+}
+
+WebRtc_Word16 ACMGenericCodec::ResetDecoder(WebRtc_Word16 payloadType) {
+ WriteLockScoped lockCodec(_codecWrapperLock);
+ WriteLockScoped lockNetEq(*_netEqDecodeLock);
+ return ResetDecoderSafe(payloadType);
+}
+
+WebRtc_Word16 ACMGenericCodec::ResetDecoderSafe(WebRtc_Word16 payloadType) {
+ WebRtcACMCodecParams decoderParams;
+ if (!_decoderExist || !_decoderInitialized) {
+ return 0;
+ }
+ // Initialization of the decoder should work for all
+ // the codec. If there is a codec that has to keep
+ // some states then we need to define a virtual and
+ // overwrite in that codec
+ DecoderParamsSafe(&decoderParams, (WebRtc_UWord8) payloadType);
+ return InternalInitDecoder(&decoderParams);
+}
+
+void ACMGenericCodec::ResetNoMissedSamples() {
+ WriteLockScoped cs(_codecWrapperLock);
+ _noMissedSamples = 0;
+}
+
+void ACMGenericCodec::IncreaseNoMissedSamples(const WebRtc_Word16 noSamples) {
+ _noMissedSamples += noSamples;
}
// Get the number of missed samples, this can be public
-WebRtc_UWord32
-ACMGenericCodec::NoMissedSamples() const
-{
- ReadLockScoped cs(_codecWrapperLock);
- return _noMissedSamples;
-}
-void
-ACMGenericCodec::DestructEncoder()
-{
- WriteLockScoped wl(_codecWrapperLock);
-
- // Disable VAD and delete the instance
- if(_ptrVADInst != NULL)
- {
- WebRtcVad_Free(_ptrVADInst);
- _ptrVADInst = NULL;
- }
- _vadEnabled = false;
- _vadMode = VADNormal;
-
- //Disable DTX and delete the instance
- _dtxEnabled = false;
- if(_ptrDTXInst != NULL)
- {
- WebRtcCng_FreeEnc(_ptrDTXInst);
- _ptrDTXInst = NULL;
- }
- _numLPCParams = kNewCNGNumPLCParams;
-
- DestructEncoderSafe();
+WebRtc_UWord32 ACMGenericCodec::NoMissedSamples() const {
+ ReadLockScoped cs(_codecWrapperLock);
+ return _noMissedSamples;
}
-void
-ACMGenericCodec::DestructDecoder()
-{
- WriteLockScoped wl(_codecWrapperLock);
- _decoderParams.codecInstant.pltype = -1;
- DestructDecoderSafe();
+void ACMGenericCodec::DestructEncoder() {
+ WriteLockScoped wl(_codecWrapperLock);
+
+ // Disable VAD and delete the instance
+ if (_ptrVADInst != NULL) {
+ WebRtcVad_Free(_ptrVADInst);
+ _ptrVADInst = NULL;
+ }
+ _vadEnabled = false;
+ _vadMode = VADNormal;
+
+ //Disable DTX and delete the instance
+ _dtxEnabled = false;
+ if (_ptrDTXInst != NULL) {
+ WebRtcCng_FreeEnc(_ptrDTXInst);
+ _ptrDTXInst = NULL;
+ }
+ _numLPCParams = kNewCNGNumPLCParams;
+
+ DestructEncoderSafe();
}
-WebRtc_Word16
-ACMGenericCodec::SetBitRate(
- const WebRtc_Word32 bitRateBPS)
-{
- WriteLockScoped wl(_codecWrapperLock);
- return SetBitRateSafe(bitRateBPS);
+void ACMGenericCodec::DestructDecoder() {
+ WriteLockScoped wl(_codecWrapperLock);
+ _decoderParams.codecInstant.pltype = -1;
+ DestructDecoderSafe();
}
-WebRtc_Word16
-ACMGenericCodec::SetBitRateSafe(
- const WebRtc_Word32 bitRateBPS)
-{
- // If the codec can change the bit-rate this function
- // should be overwritten, otherewise the only acceptable
- // value is the one that is in database.
- CodecInst codecParams;
- if(ACMCodecDB::Codec(_codecID, &codecParams) < 0)
- {
+WebRtc_Word16 ACMGenericCodec::SetBitRate(const WebRtc_Word32 bitRateBPS) {
+ WriteLockScoped wl(_codecWrapperLock);
+ return SetBitRateSafe(bitRateBPS);
+}
+
+WebRtc_Word16 ACMGenericCodec::SetBitRateSafe(const WebRtc_Word32 bitRateBPS) {
+ // If the codec can change the bit-rate this function
+ // should be overwritten, otherewise the only acceptable
+ // value is the one that is in database.
+ CodecInst codecParams;
+ if (ACMCodecDB::Codec(_codecID, &codecParams) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "SetBitRateSafe: error in ACMCodecDB::Codec");
+ return -1;
+ }
+ if (codecParams.rate != bitRateBPS) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "SetBitRateSafe: rate value is not acceptable");
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+WebRtc_Word32 ACMGenericCodec::GetEstimatedBandwidth() {
+ WriteLockScoped wl(_codecWrapperLock);
+ return GetEstimatedBandwidthSafe();
+}
+
+WebRtc_Word32 ACMGenericCodec::GetEstimatedBandwidthSafe() {
+ // All codecs but iSAC will return -1
+ return -1;
+}
+
+WebRtc_Word32 ACMGenericCodec::SetEstimatedBandwidth(
+ WebRtc_Word32 estimatedBandwidth) {
+ WriteLockScoped wl(_codecWrapperLock);
+ return SetEstimatedBandwidthSafe(estimatedBandwidth);
+}
+
+WebRtc_Word32 ACMGenericCodec::SetEstimatedBandwidthSafe(
+ WebRtc_Word32 /*estimatedBandwidth*/) {
+ // All codecs but iSAC will return -1
+ return -1;
+}
+
+WebRtc_Word32 ACMGenericCodec::GetRedPayload(WebRtc_UWord8* redPayload,
+ WebRtc_Word16* payloadBytes) {
+ WriteLockScoped wl(_codecWrapperLock);
+ return GetRedPayloadSafe(redPayload, payloadBytes);
+}
+
+WebRtc_Word32 ACMGenericCodec::GetRedPayloadSafe(
+ WebRtc_UWord8* /* redPayload */, WebRtc_Word16* /* payloadBytes */) {
+ return -1; // Do nothing by default
+}
+
+WebRtc_Word16 ACMGenericCodec::CreateEncoder() {
+ WebRtc_Word16 status = 0;
+ if (!_encoderExist) {
+ status = InternalCreateEncoder();
+ // We just created the codec and obviously it is not initialized
+ _encoderInitialized = false;
+ }
+
+ if (status < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CreateEncoder: error in internal create encoder");
+ _encoderExist = false;
+ } else {
+ _encoderExist = true;
+ }
+ return status;
+}
+
+WebRtc_Word16 ACMGenericCodec::CreateDecoder() {
+ WebRtc_Word16 status = 0;
+ if (!_decoderExist) {
+ status = InternalCreateDecoder();
+ // Decoder just created and obviously it is not initialized
+ _decoderInitialized = false;
+ }
+
+ if (status < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CreateDecoder: error in internal create decoder");
+ _decoderExist = false;
+ } else {
+ _decoderExist = true;
+ }
+ return status;
+}
+
+void ACMGenericCodec::DestructEncoderInst(void* ptrInst) {
+ if (ptrInst != NULL) {
+ WriteLockScoped lockCodec(_codecWrapperLock);
+ ReadLockScoped lockNetEq(*_netEqDecodeLock);
+ InternalDestructEncoderInst(ptrInst);
+ }
+}
+
+WebRtc_Word16 ACMGenericCodec::AudioBuffer(WebRtcACMAudioBuff& audioBuff) {
+ ReadLockScoped cs(_codecWrapperLock);
+ memcpy(audioBuff.inAudio, _inAudio,
+ AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
+ audioBuff.inAudioIxRead = _inAudioIxRead;
+ audioBuff.inAudioIxWrite = _inAudioIxWrite;
+ memcpy(audioBuff.inTimestamp, _inTimestamp,
+ TIMESTAMP_BUFFER_SIZE_W32 * sizeof(WebRtc_UWord32));
+ audioBuff.inTimestampIxWrite = _inTimestampIxWrite;
+ audioBuff.lastTimestamp = _lastTimestamp;
+ return 0;
+}
+
+WebRtc_Word16 ACMGenericCodec::SetAudioBuffer(WebRtcACMAudioBuff& audioBuff) {
+ WriteLockScoped cs(_codecWrapperLock);
+ memcpy(_inAudio, audioBuff.inAudio,
+ AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
+ _inAudioIxRead = audioBuff.inAudioIxRead;
+ _inAudioIxWrite = audioBuff.inAudioIxWrite;
+ memcpy(_inTimestamp, audioBuff.inTimestamp,
+ TIMESTAMP_BUFFER_SIZE_W32 * sizeof(WebRtc_UWord32));
+ _inTimestampIxWrite = audioBuff.inTimestampIxWrite;
+ _lastTimestamp = audioBuff.lastTimestamp;
+ _isAudioBuffFresh = false;
+ return 0;
+}
+
+WebRtc_UWord32 ACMGenericCodec::LastEncodedTimestamp() const {
+ ReadLockScoped cs(_codecWrapperLock);
+ return _lastEncodedTimestamp;
+}
+
+WebRtc_UWord32 ACMGenericCodec::EarliestTimestamp() const {
+ ReadLockScoped cs(_codecWrapperLock);
+ return _inTimestamp[0];
+}
+
+WebRtc_Word16 ACMGenericCodec::SetVAD(const bool enableDTX,
+ const bool enableVAD,
+ const ACMVADMode mode) {
+ WriteLockScoped cs(_codecWrapperLock);
+ return SetVADSafe(enableDTX, enableVAD, mode);
+}
+
+WebRtc_Word16 ACMGenericCodec::SetVADSafe(const bool enableDTX,
+ const bool enableVAD,
+ const ACMVADMode mode) {
+ if (enableDTX) {
+ // Make G729 AnnexB a special case
+ if (!STR_CASE_CMP(_encoderParams.codecInstant.plname, "G729")
+ && !_hasInternalDTX) {
+ if (ACMGenericCodec::EnableDTX() < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "SetBitRateSafe: error in ACMCodecDB::Codec");
+ "SetVADSafe: error in enable DTX");
return -1;
- }
- if(codecParams.rate != bitRateBPS)
- {
+ }
+ } else {
+ if (EnableDTX() < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "SetBitRateSafe: rate value is not acceptable");
+ "SetVADSafe: error in enable DTX");
return -1;
+ }
}
- else
- {
+
+ if (_hasInternalDTX) {
+ // Codec has internal DTX, practically we don't need WebRtc VAD,
+ // however, we let the user to turn it on if they need call-backs
+ // on silence. Store VAD mode for future even if VAD is off.
+ _vadMode = mode;
+ return (enableVAD) ? EnableVAD(mode) : DisableVAD();
+ } else {
+ // Codec does not have internal DTX so enabling DTX requires an
+ // active VAD. 'enableDTX == true' overwrites VAD status.
+ if (EnableVAD(mode) < 0) {
+ // If we cannot create VAD we have to disable DTX
+ if (!_vadEnabled) {
+ DisableDTX();
+ }
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "SetVADSafe: error in enable VAD");
+ return -1;
+ }
+
+ // Return '1', to let the caller know VAD was turned on, even if the
+ // function was called with VAD='false'
+ if (enableVAD == false) {
+ return 1;
+ } else {
return 0;
+ }
}
+ } else {
+ // Make G729 AnnexB a special case
+ if (!STR_CASE_CMP(_encoderParams.codecInstant.plname, "G729")
+ && !_hasInternalDTX) {
+ ACMGenericCodec::DisableDTX();
+ } else {
+ DisableDTX();
+ }
+ return (enableVAD) ? EnableVAD(mode) : DisableVAD();
+ }
}
-WebRtc_Word32
-ACMGenericCodec::GetEstimatedBandwidth()
-{
- WriteLockScoped wl(_codecWrapperLock);
- return GetEstimatedBandwidthSafe();
-}
-
-WebRtc_Word32
-ACMGenericCodec::GetEstimatedBandwidthSafe()
-{
- // All codecs but iSAC will return -1
+WebRtc_Word16 ACMGenericCodec::EnableDTX() {
+ if (_hasInternalDTX) {
+ // We should not be here if we have internal DTX
+ // this function should be overwritten by the derived
+ // class in this case
return -1;
-}
-
-WebRtc_Word32
-ACMGenericCodec::SetEstimatedBandwidth(
- WebRtc_Word32 estimatedBandwidth)
-{
- WriteLockScoped wl(_codecWrapperLock);
- return SetEstimatedBandwidthSafe(estimatedBandwidth);
-}
-
-WebRtc_Word32
-ACMGenericCodec::SetEstimatedBandwidthSafe(
- WebRtc_Word32 /*estimatedBandwidth*/)
-{
- // All codecs but iSAC will return -1
- return -1;
-}
-
-WebRtc_Word32
-ACMGenericCodec::GetRedPayload(
- WebRtc_UWord8* redPayload,
- WebRtc_Word16* payloadBytes)
-{
- WriteLockScoped wl(_codecWrapperLock);
- return GetRedPayloadSafe(redPayload, payloadBytes);
-}
-
-WebRtc_Word32
-ACMGenericCodec::GetRedPayloadSafe(
- WebRtc_UWord8* /* redPayload */,
- WebRtc_Word16* /* payloadBytes */)
-{
- return -1; // Do nothing by default
-}
-
-WebRtc_Word16
-ACMGenericCodec::CreateEncoder()
-{
- WebRtc_Word16 status = 0;
- if(!_encoderExist)
- {
- status = InternalCreateEncoder();
- // We just created the codec and obviously it is not initialized
- _encoderInitialized = false;
+ }
+ if (!_dtxEnabled) {
+ if (WebRtcCng_CreateEnc(&_ptrDTXInst) < 0) {
+ _ptrDTXInst = NULL;
+ return -1;
}
-
- if(status < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "CreateEncoder: error in internal create encoder");
- _encoderExist = false;
- }
- else
- {
- _encoderExist = true;
- }
- return status;
-}
-
-WebRtc_Word16
-ACMGenericCodec::CreateDecoder()
-{
- WebRtc_Word16 status = 0;
- if(!_decoderExist)
- {
- status = InternalCreateDecoder();
- // Decoder just created and obviously it is not initialized
- _decoderInitialized = false;
- }
-
- if(status < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "CreateDecoder: error in internal create decoder");
- _decoderExist = false;
- }
- else
- {
- _decoderExist = true;
- }
- return status;
-}
-
-
-void ACMGenericCodec::DestructEncoderInst(void* ptrInst)
-{
- if(ptrInst != NULL)
- {
- WriteLockScoped lockCodec(_codecWrapperLock);
- ReadLockScoped lockNetEq(*_netEqDecodeLock);
- InternalDestructEncoderInst(ptrInst);
- }
-}
-
-
-WebRtc_Word16
-ACMGenericCodec::AudioBuffer(
- WebRtcACMAudioBuff& audioBuff)
-{
- ReadLockScoped cs(_codecWrapperLock);
- memcpy(audioBuff.inAudio, _inAudio,
- AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
- audioBuff.inAudioIxRead = _inAudioIxRead;
- audioBuff.inAudioIxWrite = _inAudioIxWrite;
- memcpy(audioBuff.inTimestamp, _inTimestamp,
- TIMESTAMP_BUFFER_SIZE_W32*sizeof(WebRtc_UWord32));
- audioBuff.inTimestampIxWrite = _inTimestampIxWrite;
- audioBuff.lastTimestamp = _lastTimestamp;
- return 0;
-}
-
-
-WebRtc_Word16
-ACMGenericCodec::SetAudioBuffer(
- WebRtcACMAudioBuff& audioBuff)
-{
- WriteLockScoped cs(_codecWrapperLock);
- memcpy(_inAudio, audioBuff.inAudio,
- AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
- _inAudioIxRead = audioBuff.inAudioIxRead;
- _inAudioIxWrite = audioBuff.inAudioIxWrite;
- memcpy(_inTimestamp, audioBuff.inTimestamp,
- TIMESTAMP_BUFFER_SIZE_W32*sizeof(WebRtc_UWord32));
- _inTimestampIxWrite = audioBuff.inTimestampIxWrite;
- _lastTimestamp = audioBuff.lastTimestamp;
- _isAudioBuffFresh = false;
- return 0;
-}
-
-
-WebRtc_UWord32
-ACMGenericCodec::LastEncodedTimestamp() const
-{
- ReadLockScoped cs(_codecWrapperLock);
- return _lastEncodedTimestamp;
-}
-
-
-WebRtc_UWord32
-ACMGenericCodec::EarliestTimestamp() const
-{
- ReadLockScoped cs(_codecWrapperLock);
- return _inTimestamp[0];
-}
-
-
-WebRtc_Word16
-ACMGenericCodec::SetVAD(
- const bool enableDTX,
- const bool enableVAD,
- const ACMVADMode mode)
-{
- WriteLockScoped cs(_codecWrapperLock);
- return SetVADSafe(enableDTX, enableVAD, mode);
-}
-
-
-WebRtc_Word16
-ACMGenericCodec::SetVADSafe(
- const bool enableDTX,
- const bool enableVAD,
- const ACMVADMode mode)
-{
- if(enableDTX)
- {
- // Make G729 AnnexB a special case
- if (!STR_CASE_CMP(_encoderParams.codecInstant.plname, "G729") && !_hasInternalDTX)
- {
- if (ACMGenericCodec::EnableDTX() < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "SetVADSafe: error in enable DTX");
- return -1;
- }
- }
- else
- {
- if(EnableDTX() < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "SetVADSafe: error in enable DTX");
- return -1;
- }
- }
-
- if(_hasInternalDTX)
- {
- // Codec has internal DTX, practically we don't need WebRtc VAD,
- // however, we let the user to turn it on if they need call-backs
- // on silence. Store VAD mode for future even if VAD is off.
- _vadMode = mode;
- return (enableVAD)? EnableVAD(mode):DisableVAD();
- }
- else
- {
- // Codec does not have internal DTX so enabling DTX requires an
- // active VAD. 'enableDTX == true' overwrites VAD status.
- if(EnableVAD(mode) < 0)
- {
- // If we cannot create VAD we have to disable DTX
- if(!_vadEnabled)
- {
- DisableDTX();
- }
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "SetVADSafe: error in enable VAD");
- return -1;
- }
-
- // Return '1', to let the caller know VAD was turned on, even if the
- // function was called with VAD='false'
- if (enableVAD == false) {
- return 1;
- } else {
- return 0;
- }
- }
- }
- else
- {
- // Make G729 AnnexB a special case
- if (!STR_CASE_CMP(_encoderParams.codecInstant.plname, "G729") && !_hasInternalDTX)
- {
- ACMGenericCodec::DisableDTX();
- }
- else
- {
- DisableDTX();
- }
- return (enableVAD)? EnableVAD(mode):DisableVAD();
- }
-}
-
-WebRtc_Word16
-ACMGenericCodec::EnableDTX()
-{
- if(_hasInternalDTX)
- {
- // We should not be here if we have internal DTX
- // this function should be overwritten by the derived
- // class in this case
- return -1;
- }
- if(!_dtxEnabled)
- {
- if(WebRtcCng_CreateEnc(&_ptrDTXInst) < 0)
- {
- _ptrDTXInst = NULL;
- return -1;
- }
- WebRtc_UWord16 freqHz;
- EncoderSampFreq(freqHz);
- if(WebRtcCng_InitEnc(_ptrDTXInst, freqHz,
- ACM_SID_INTERVAL_MSEC, _numLPCParams) < 0)
- {
- // Couldn't initialize, has to return -1, and free the memory
- WebRtcCng_FreeEnc(_ptrDTXInst);
- _ptrDTXInst = NULL;
- return -1;
- }
- _dtxEnabled = true;
- }
- return 0;
-}
-
-WebRtc_Word16
-ACMGenericCodec::DisableDTX()
-{
- if(_hasInternalDTX)
- {
- // We should not be here if we have internal DTX
- // this function should be overwritten by the derived
- // class in this case
- return -1;
- }
- if(_ptrDTXInst != NULL)
- {
- WebRtcCng_FreeEnc(_ptrDTXInst);
- _ptrDTXInst = NULL;
- }
- _dtxEnabled = false;
- return 0;
-}
-
-WebRtc_Word16
-ACMGenericCodec::EnableVAD(
- ACMVADMode mode)
-{
- if((mode < VADNormal) || (mode > VADVeryAggr))
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "EnableVAD: error in VAD mode range");
- return -1;
- }
-
- if(!_vadEnabled)
- {
- if(WebRtcVad_Create(&_ptrVADInst) < 0)
- {
- _ptrVADInst = NULL;
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "EnableVAD: error in create VAD");
- return -1;
- }
- if(WebRtcVad_Init(_ptrVADInst) < 0)
- {
- WebRtcVad_Free(_ptrVADInst);
- _ptrVADInst = NULL;
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "EnableVAD: error in init VAD");
- return -1;
- }
- }
-
- // Set the vad mode to the given value
- if(WebRtcVad_set_mode(_ptrVADInst, mode) < 0)
- {
- // We failed to set the mode and we have to return -1. If
- // we already have a working VAD (_vadEnabled == true) then
- // we leave it to work. otherwise, the following will be
- // executed.
- if(!_vadEnabled)
- {
- // We just created the instance but cannot set the mode
- // we have to free the memomry.
- WebRtcVad_Free(_ptrVADInst);
- _ptrVADInst = NULL;
- }
- WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _uniqueID,
- "EnableVAD: failed to set the VAD mode");
- return -1;
- }
- _vadMode = mode;
- _vadEnabled = true;
- return 0;
-}
-
-WebRtc_Word16
-ACMGenericCodec::DisableVAD()
-{
- if(_ptrVADInst != NULL)
- {
- WebRtcVad_Free(_ptrVADInst);
- _ptrVADInst = NULL;
- }
- _vadEnabled = false;
- return 0;
-}
-
-WebRtc_Word32
-ACMGenericCodec::ReplaceInternalDTX(
- const bool replaceInternalDTX)
-{
- WriteLockScoped cs(_codecWrapperLock);
- return ReplaceInternalDTXSafe(replaceInternalDTX);
-}
-
-WebRtc_Word32
-ACMGenericCodec::ReplaceInternalDTXSafe(
- const bool /* replaceInternalDTX */)
-{
- return -1;
-}
-
-WebRtc_Word32
-ACMGenericCodec::IsInternalDTXReplaced(
- bool* internalDTXReplaced)
-{
- WriteLockScoped cs(_codecWrapperLock);
- return IsInternalDTXReplacedSafe(internalDTXReplaced);
-}
-
-WebRtc_Word32
-ACMGenericCodec::IsInternalDTXReplacedSafe(
- bool* internalDTXReplaced)
-{
- *internalDTXReplaced = false;
- return 0;
-}
-
-WebRtc_Word16
-ACMGenericCodec::ProcessFrameVADDTX(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte,
- WebRtc_Word16* samplesProcessed)
-{
- if(!_vadEnabled)
- {
- // VAD not enabled, set all vadLable[] to 1 (speech detected)
- for(WebRtc_Word16 n = 0; n < MAX_FRAME_SIZE_10MSEC; n++)
- {
- _vadLabel[n] = 1;
- }
- *samplesProcessed = 0;
- return 0;
- }
-
WebRtc_UWord16 freqHz;
EncoderSampFreq(freqHz);
-
- // Calculate number of samples in 10 ms blocks, and number ms in one frame
- WebRtc_Word16 samplesIn10Msec = (WebRtc_Word16)(freqHz / 100);
- WebRtc_Word32 frameLenMsec = (((WebRtc_Word32)_frameLenSmpl * 1000) / freqHz);
- WebRtc_Word16 status;
-
- // Vector for storing maximum 30 ms of mono audio at 48 kHz.
- WebRtc_Word16 audio[1440];
-
- // Calculate number of VAD-blocks to process, and number of samples in each block.
- int noSamplesToProcess[2];
- if (frameLenMsec == 40)
- {
- // 20 ms in each VAD block
- noSamplesToProcess[0] = noSamplesToProcess[1] = 2*samplesIn10Msec;
+ if (WebRtcCng_InitEnc(_ptrDTXInst, freqHz, ACM_SID_INTERVAL_MSEC,
+ _numLPCParams) < 0) {
+ // Couldn't initialize, has to return -1, and free the memory
+ WebRtcCng_FreeEnc(_ptrDTXInst);
+ _ptrDTXInst = NULL;
+ return -1;
}
- else
- {
- // For 10-30 ms framesizes, second VAD block will be size zero ms,
- // for 50 and 60 ms first VAD block will be 30 ms.
- noSamplesToProcess[0] = (frameLenMsec > 30)? 3*samplesIn10Msec : _frameLenSmpl;
- noSamplesToProcess[1] = _frameLenSmpl-noSamplesToProcess[0];
+ _dtxEnabled = true;
+ }
+ return 0;
+}
+
+WebRtc_Word16 ACMGenericCodec::DisableDTX() {
+ if (_hasInternalDTX) {
+ // We should not be here if we have internal DTX
+ // this function should be overwritten by the derived
+ // class in this case
+ return -1;
+ }
+ if (_ptrDTXInst != NULL) {
+ WebRtcCng_FreeEnc(_ptrDTXInst);
+ _ptrDTXInst = NULL;
+ }
+ _dtxEnabled = false;
+ return 0;
+}
+
+WebRtc_Word16 ACMGenericCodec::EnableVAD(ACMVADMode mode) {
+ if ((mode < VADNormal) || (mode > VADVeryAggr)) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EnableVAD: error in VAD mode range");
+ return -1;
+ }
+
+ if (!_vadEnabled) {
+ if (WebRtcVad_Create(&_ptrVADInst) < 0) {
+ _ptrVADInst = NULL;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EnableVAD: error in create VAD");
+ return -1;
+ }
+ if (WebRtcVad_Init(_ptrVADInst) < 0) {
+ WebRtcVad_Free(_ptrVADInst);
+ _ptrVADInst = NULL;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EnableVAD: error in init VAD");
+ return -1;
+ }
+ }
+
+ // Set the vad mode to the given value
+ if (WebRtcVad_set_mode(_ptrVADInst, mode) < 0) {
+ // We failed to set the mode and we have to return -1. If
+ // we already have a working VAD (_vadEnabled == true) then
+ // we leave it to work. otherwise, the following will be
+ // executed.
+ if (!_vadEnabled) {
+ // We just created the instance but cannot set the mode
+ // we have to free the memomry.
+ WebRtcVad_Free(_ptrVADInst);
+ _ptrVADInst = NULL;
+ }
+ WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _uniqueID,
+ "EnableVAD: failed to set the VAD mode");
+ return -1;
+ }
+ _vadMode = mode;
+ _vadEnabled = true;
+ return 0;
+}
+
+WebRtc_Word16 ACMGenericCodec::DisableVAD() {
+ if (_ptrVADInst != NULL) {
+ WebRtcVad_Free(_ptrVADInst);
+ _ptrVADInst = NULL;
+ }
+ _vadEnabled = false;
+ return 0;
+}
+
+WebRtc_Word32 ACMGenericCodec::ReplaceInternalDTX(
+ const bool replaceInternalDTX) {
+ WriteLockScoped cs(_codecWrapperLock);
+ return ReplaceInternalDTXSafe(replaceInternalDTX);
+}
+
+WebRtc_Word32 ACMGenericCodec::ReplaceInternalDTXSafe(
+ const bool /* replaceInternalDTX */) {
+ return -1;
+}
+
+WebRtc_Word32 ACMGenericCodec::IsInternalDTXReplaced(
+ bool* internalDTXReplaced) {
+ WriteLockScoped cs(_codecWrapperLock);
+ return IsInternalDTXReplacedSafe(internalDTXReplaced);
+}
+
+WebRtc_Word32 ACMGenericCodec::IsInternalDTXReplacedSafe(
+ bool* internalDTXReplaced) {
+ *internalDTXReplaced = false;
+ return 0;
+}
+
+WebRtc_Word16 ACMGenericCodec::ProcessFrameVADDTX(
+ WebRtc_UWord8* bitStream, WebRtc_Word16* bitStreamLenByte,
+ WebRtc_Word16* samplesProcessed) {
+ if (!_vadEnabled) {
+ // VAD not enabled, set all vadLable[] to 1 (speech detected)
+ for (WebRtc_Word16 n = 0; n < MAX_FRAME_SIZE_10MSEC; n++) {
+ _vadLabel[n] = 1;
+ }
+ *samplesProcessed = 0;
+ return 0;
+ }
+
+ WebRtc_UWord16 freqHz;
+ EncoderSampFreq(freqHz);
+
+ // Calculate number of samples in 10 ms blocks, and number ms in one frame
+ WebRtc_Word16 samplesIn10Msec = (WebRtc_Word16)(freqHz / 100);
+ WebRtc_Word32 frameLenMsec = (((WebRtc_Word32) _frameLenSmpl * 1000) /
+ freqHz);
+ WebRtc_Word16 status;
+
+ // Vector for storing maximum 30 ms of mono audio at 48 kHz.
+ WebRtc_Word16 audio[1440];
+
+ // Calculate number of VAD-blocks to process, and number of samples in each
+ // block.
+ int noSamplesToProcess[2];
+ if (frameLenMsec == 40) {
+ // 20 ms in each VAD block
+ noSamplesToProcess[0] = noSamplesToProcess[1] = 2 * samplesIn10Msec;
+ } else {
+ // For 10-30 ms framesizes, second VAD block will be size zero ms,
+ // for 50 and 60 ms first VAD block will be 30 ms.
+ noSamplesToProcess[0] =
+ (frameLenMsec > 30) ? 3 * samplesIn10Msec : _frameLenSmpl;
+ noSamplesToProcess[1] = _frameLenSmpl - noSamplesToProcess[0];
+ }
+
+ int offSet = 0;
+ int loops = (noSamplesToProcess[1] > 0) ? 2 : 1;
+ for (int i = 0; i < loops; i++) {
+ // If stereo, calculate mean of the two channels
+ if (_noChannels == 2) {
+ for (int j = 0; j < noSamplesToProcess[i]; j++) {
+ audio[j] = (_inAudio[(offSet + j) * 2] +
+ _inAudio[(offSet + j) * 2 + 1]) / 2;
+ }
+ offSet = noSamplesToProcess[0];
+ } else {
+ // Mono, copy data from _inAudio to continue work on
+ memcpy(audio, _inAudio, sizeof(WebRtc_Word16) * noSamplesToProcess[i]);
}
- int offSet = 0;
- int loops = (noSamplesToProcess[1]>0) ? 2 : 1;
- for (int i=0; i<loops; i++) {
- // If stereo, calculate mean of the two channels
- if(_noChannels == 2) {
- for (int j=0; j<noSamplesToProcess[i]; j++) {
- audio[j] = (_inAudio[(offSet+j)*2]+_inAudio[(offSet+j)*2+1])/2;
- }
- offSet = noSamplesToProcess[0];
- } else {
- // Mono, copy data from _inAudio to continue work on
- memcpy(audio, _inAudio, sizeof(WebRtc_Word16)*noSamplesToProcess[i]);
+ // Call VAD
+ status = (WebRtc_Word16) WebRtcVad_Process(_ptrVADInst, (int) freqHz, audio,
+ noSamplesToProcess[i]);
+
+ _vadLabel[i] = status;
+
+ if (status < 0) {
+ // This will force that the data be removed from the buffer
+ *samplesProcessed += noSamplesToProcess[i];
+ return -1;
+ }
+
+ // If VAD decision non-active, update DTX. NOTE! We only do this if the
+ // first part of a frame gets the VAD decision "inactive". Otherwise DTX
+ // might say it is time to transmit SID frame, but we will encode the whole
+ // frame, because the first part is active.
+ *samplesProcessed = 0;
+ if ((status == 0) && (i == 0) && _dtxEnabled && !_hasInternalDTX) {
+ WebRtc_Word16 bitStreamLen;
+ WebRtc_Word16 num10MsecFrames = noSamplesToProcess[i] / samplesIn10Msec;
+ *bitStreamLenByte = 0;
+ for (WebRtc_Word16 n = 0; n < num10MsecFrames; n++) {
+ // This block is (passive) && (vad enabled). If first CNG after
+ // speech, force SID by setting last parameter to "1".
+ status = WebRtcCng_Encode(_ptrDTXInst, &audio[n * samplesIn10Msec],
+ samplesIn10Msec, bitStream, &bitStreamLen,
+ !_prev_frame_cng);
+ if (status < 0) {
+ return -1;
}
- // Call VAD
- status = (WebRtc_Word16)WebRtcVad_Process(_ptrVADInst, (int)freqHz,
- audio, noSamplesToProcess[i]);
+ // Update previous frame was CNG.
+ _prev_frame_cng = 1;
- _vadLabel[i] = status;
+ *samplesProcessed += samplesIn10Msec * _noChannels;
- if(status < 0)
- {
- // This will force that the data be removed from the buffer
- *samplesProcessed += noSamplesToProcess[i];
- return -1;
- }
+ // bitStreamLen will only be > 0 once per 100 ms
+ *bitStreamLenByte += bitStreamLen;
+ }
- // If VAD decision non-active, update DTX. NOTE! We only do this if the first part of
- // a frame gets the VAD decision "inactive". Otherwise DTX might say it is time to
- // transmit SID frame, but we will encode the whole frame, because the first part is
- // active.
+ // Check if all samples got processed by the DTX
+ if (*samplesProcessed != noSamplesToProcess[i] * _noChannels) {
+ // Set to zero since something went wrong. Shouldn't happen.
*samplesProcessed = 0;
- if((status == 0) && (i==0) && _dtxEnabled && !_hasInternalDTX)
- {
- WebRtc_Word16 bitStreamLen;
- WebRtc_Word16 num10MsecFrames = noSamplesToProcess[i] / samplesIn10Msec;
- *bitStreamLenByte = 0;
- for(WebRtc_Word16 n = 0; n < num10MsecFrames; n++)
- {
- // This block is (passive) && (vad enabled). If first CNG after
- // speech, force SID by setting last parameter to "1".
- status = WebRtcCng_Encode(_ptrDTXInst,
- &audio[n*samplesIn10Msec],
- samplesIn10Msec, bitStream,
- &bitStreamLen, !_prev_frame_cng);
- if (status < 0) {
- return -1;
- }
-
- // Update previous frame was CNG.
- _prev_frame_cng = 1;
-
- *samplesProcessed += samplesIn10Msec*_noChannels;
-
- // bitStreamLen will only be > 0 once per 100 ms
- *bitStreamLenByte += bitStreamLen;
- }
-
- // Check if all samples got processed by the DTX
- if(*samplesProcessed != noSamplesToProcess[i]*_noChannels) {
- // Set to zero since something went wrong. Shouldn't happen.
- *samplesProcessed = 0;
- }
- } else {
- // Update previous frame was not CNG.
- _prev_frame_cng = 0;
- }
-
- if(*samplesProcessed > 0)
- {
- // The block contains inactive speech, and is processed by DTX.
- // Discontinue running VAD.
- break;
- }
+ }
+ } else {
+ // Update previous frame was not CNG.
+ _prev_frame_cng = 0;
}
- return status;
+ if (*samplesProcessed > 0) {
+ // The block contains inactive speech, and is processed by DTX.
+ // Discontinue running VAD.
+ break;
+ }
+ }
+
+ return status;
}
-WebRtc_Word16
-ACMGenericCodec::SamplesLeftToEncode()
-{
- ReadLockScoped rl(_codecWrapperLock);
- return (_frameLenSmpl <= _inAudioIxWrite)?
- 0:(_frameLenSmpl - _inAudioIxWrite);
+WebRtc_Word16 ACMGenericCodec::SamplesLeftToEncode() {
+ ReadLockScoped rl(_codecWrapperLock);
+ return (_frameLenSmpl <= _inAudioIxWrite) ? 0 :
+ (_frameLenSmpl - _inAudioIxWrite);
}
-void
-ACMGenericCodec::SetUniqueID(
- const WebRtc_UWord32 id)
-{
- _uniqueID = id;
+void ACMGenericCodec::SetUniqueID(const WebRtc_UWord32 id) {
+ _uniqueID = id;
}
-bool
-ACMGenericCodec::IsAudioBufferFresh() const
-{
- ReadLockScoped rl(_codecWrapperLock);
- return _isAudioBuffFresh;
+bool ACMGenericCodec::IsAudioBufferFresh() const {
+ ReadLockScoped rl(_codecWrapperLock);
+ return _isAudioBuffFresh;
}
// This function is replaced by codec specific functions for some codecs
-WebRtc_Word16
-ACMGenericCodec::EncoderSampFreq(WebRtc_UWord16& sampFreqHz)
-{
- WebRtc_Word32 f;
- f = ACMCodecDB::CodecFreq(_codecID);
- if(f < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "EncoderSampFreq: codec frequency is negative");
- return -1;
- }
- else
- {
- sampFreqHz = (WebRtc_UWord16)f;
- return 0;
- }
-}
-
-
-WebRtc_Word32
-ACMGenericCodec::ConfigISACBandwidthEstimator(
- const WebRtc_UWord8 /* initFrameSizeMsec */,
- const WebRtc_UWord16 /* initRateBitPerSec */,
- const bool /* enforceFrameSize */)
-{
- WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
- "The send-codec is not iSAC, failed to config iSAC bandwidth estimator.");
- return -1;
-}
-
-WebRtc_Word32
-ACMGenericCodec::SetISACMaxRate(
- const WebRtc_UWord32 /* maxRateBitPerSec */)
-{
- WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
- "The send-codec is not iSAC, failed to set iSAC max rate.");
- return -1;
-}
-
-WebRtc_Word32
-ACMGenericCodec::SetISACMaxPayloadSize(
- const WebRtc_UWord16 /* maxPayloadLenBytes */)
-{
- WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
- "The send-codec is not iSAC, failed to set iSAC max payload-size.");
- return -1;
-}
-
-
-void
-ACMGenericCodec::SaveDecoderParam(
- const WebRtcACMCodecParams* codecParams)
-{
- WriteLockScoped wl(_codecWrapperLock);
- SaveDecoderParamSafe(codecParams);
-}
-
-
-void
-ACMGenericCodec::SaveDecoderParamSafe(
- const WebRtcACMCodecParams* codecParams)
-{
- memcpy(&_decoderParams, codecParams, sizeof(WebRtcACMCodecParams));
-}
-
-WebRtc_Word16
-ACMGenericCodec::UpdateEncoderSampFreq(
- WebRtc_UWord16 /* encoderSampFreqHz */)
-{
+WebRtc_Word16 ACMGenericCodec::EncoderSampFreq(WebRtc_UWord16& sampFreqHz) {
+ WebRtc_Word32 f;
+ f = ACMCodecDB::CodecFreq(_codecID);
+ if (f < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "It is asked for a change in smapling frequency while the \
-current send-codec supports only one sampling rate.");
+ "EncoderSampFreq: codec frequency is negative");
return -1;
+ } else {
+ sampFreqHz = (WebRtc_UWord16) f;
+ return 0;
+ }
}
-
-void
-ACMGenericCodec::SetIsMaster(
- bool isMaster)
-{
- WriteLockScoped wl(_codecWrapperLock);
- _isMaster = isMaster;
+WebRtc_Word32 ACMGenericCodec::ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 /* initFrameSizeMsec */,
+ const WebRtc_UWord16 /* initRateBitPerSec */,
+ const bool /* enforceFrameSize */) {
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
+ "The send-codec is not iSAC, failed to config iSAC bandwidth estimator.");
+ return -1;
}
-
-
-WebRtc_Word16
-ACMGenericCodec::REDPayloadISAC(
- const WebRtc_Word32 /* isacRate */,
- const WebRtc_Word16 /* isacBwEstimate */,
- WebRtc_UWord8* /* payload */,
- WebRtc_Word16* /* payloadLenBytes */)
-{
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Error: REDPayloadISAC is an iSAC specific function");
- return -1;
+WebRtc_Word32 ACMGenericCodec::SetISACMaxRate(
+ const WebRtc_UWord32 /* maxRateBitPerSec */) {
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
+ "The send-codec is not iSAC, failed to set iSAC max rate.");
+ return -1;
}
-} // namespace webrtc
+WebRtc_Word32 ACMGenericCodec::SetISACMaxPayloadSize(
+ const WebRtc_UWord16 /* maxPayloadLenBytes */) {
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
+ "The send-codec is not iSAC, failed to set iSAC max payload-size.");
+ return -1;
+}
+
+void ACMGenericCodec::SaveDecoderParam(
+ const WebRtcACMCodecParams* codecParams) {
+ WriteLockScoped wl(_codecWrapperLock);
+ SaveDecoderParamSafe(codecParams);
+}
+
+void ACMGenericCodec::SaveDecoderParamSafe(
+ const WebRtcACMCodecParams* codecParams) {
+ memcpy(&_decoderParams, codecParams, sizeof(WebRtcACMCodecParams));
+}
+
+WebRtc_Word16 ACMGenericCodec::UpdateEncoderSampFreq(
+ WebRtc_UWord16 /* encoderSampFreqHz */) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "It is asked for a change in smapling frequency while the current "
+ "send-codec supports only one sampling rate.");
+ return -1;
+}
+
+void ACMGenericCodec::SetIsMaster(bool isMaster) {
+ WriteLockScoped wl(_codecWrapperLock);
+ _isMaster = isMaster;
+}
+
+WebRtc_Word16 ACMGenericCodec::REDPayloadISAC(
+ const WebRtc_Word32 /* isacRate */,
+ const WebRtc_Word16 /* isacBwEstimate */,
+ WebRtc_UWord8* /* payload */,
+ WebRtc_Word16* /* payloadLenBytes */) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error: REDPayloadISAC is an iSAC specific function");
+ return -1;
+}
+
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_generic_codec.h b/modules/audio_coding/main/source/acm_generic_codec.h
index 29c882c..5cb9bf1 100644
--- a/modules/audio_coding/main/source/acm_generic_codec.h
+++ b/modules/audio_coding/main/source/acm_generic_codec.h
@@ -23,1311 +23,1202 @@
struct WebRtcVadInst;
struct WebRtcCngEncInst;
-namespace webrtc
-{
+namespace webrtc {
// forward declaration
struct CodecInst;
-class ACMNetEQ;
+class ACMNetEQ;
-class ACMGenericCodec
-{
-public:
- ///////////////////////////////////////////////////////////////////////////
- // Constructor of the class
- //
- ACMGenericCodec();
+class ACMGenericCodec {
+ public:
+ ///////////////////////////////////////////////////////////////////////////
+ // Constructor of the class
+ //
+ ACMGenericCodec();
+ ///////////////////////////////////////////////////////////////////////////
+ // Destructor of the class.
+ //
+ virtual ~ACMGenericCodec();
- ///////////////////////////////////////////////////////////////////////////
- // Destructor of the class.
- //
- virtual ~ACMGenericCodec();
+ ///////////////////////////////////////////////////////////////////////////
+ // ACMGenericCodec* CreateInstance();
+ // The function will be used for FEC. It is not implemented yet.
+ //
+ virtual ACMGenericCodec* CreateInstance() = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 Encode()
+ // The function is called to perform an encoding of the audio stored in
+ // audio buffer. An encoding is performed only if enough audio, i.e. equal
+ // to the frame-size of the codec, exist. The audio frame will be processed
+ // by VAD and CN/DTX if required. There are few different cases.
+ //
+ // A) Neither VAD nor DTX is active; the frame is encoded by the encoder.
+ //
+ // B) VAD is enabled but not DTX; in this case the audio is processed by VAD
+ // and encoded by the encoder. The "*encodingType" will be either
+ // "activeNormalEncode" or "passiveNormalEncode" if frame is active or
+ // passive, respectively.
+ //
+ // C) DTX is enabled; if the codec has internal VAD/DTX we just encode the
+ // frame by the encoder. Otherwise, the frame is passed through VAD and
+ // if identified as passive, then it will be processed by CN/DTX. If the
+ // frame is active it will be encoded by the encoder.
+ //
+ // This function acquires the appropriate locks and calls EncodeSafe() for
+ // the actual processing.
+ //
+ // Outputs:
+ // -bitStream : a buffer where bit-stream will be written to.
+ // -bitStreamLenByte : contains the length of the bit-stream in
+ // bytes.
+ // -timeStamp : contains the RTP timestamp, this is the
+ // sampling time of the first sample encoded
+ // (measured in number of samples).
+ // -encodingType : contains the type of encoding applied on the
+ // audio samples. The alternatives are
+ // (c.f. acm_common_types.h)
+ // -kNoEncoding:
+ // there was not enough data to encode. or
+ // some error has happened that we could
+ // not do encoding.
+ // -kActiveNormalEncoded:
+ // the audio frame is active and encoded by
+ // the given codec.
+ // -kPassiveNormalEncoded:
+ // the audio frame is passive but coded with
+ // the given codec (NO DTX).
+ // -kPassiveDTXWB:
+ // The audio frame is passive and used
+ // wide-band CN to encode.
+ // -kPassiveDTXNB:
+ // The audio frame is passive and used
+ // narrow-band CN to encode.
+ //
+ // Return value:
+ // -1 if error is occurred, otherwise the length of the bit-stream in
+ // bytes.
+ //
+ WebRtc_Word16 Encode(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_UWord32* timeStamp,
+ WebRtcACMEncodingType* encodingType);
- ///////////////////////////////////////////////////////////////////////////
- // ACMGenericCodec* CreateInstance();
- // The function will be used for FEC. It is not implemented yet.
- //
- virtual ACMGenericCodec* CreateInstance() = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 Decode()
+ // This function is used to decode a given bit-stream, without engaging
+ // NetEQ.
+ //
+ // This function acquires the appropriate locks and calls DecodeSafe() for
+ // the actual processing. Please note that this is not functional yet.
+ //
+ // Inputs:
+ // -bitStream : a buffer where bit-stream will be read.
+ // -bitStreamLenByte : the length of the bit-stream in bytes.
+ //
+ // Outputs:
+ // -audio : pointer to a buffer where the audio will written.
+ // -audioSamples : number of audio samples out of decoding the given
+ // bit-stream.
+ // -speechType : speech type (for future use).
+ //
+ // Return value:
+ // -1 if failed to decode,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 Decode(WebRtc_UWord8* bitStream, WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio, WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+ ///////////////////////////////////////////////////////////////////////////
+ // void SplitStereoPacket()
+ // This function is used to split stereo payloads in left and right channel.
+ // Codecs which has stereo support has there own implementation of the
+ // function.
+ //
+ // Input/Output:
+ // -payload : a vector with the received payload data.
+ // The function will reorder the data so that
+ // first half holds the left channel data, and the
+ // second half the right channel data.
+ // -payload_length : length of payload in bytes. Will be changed to
+ // twice the input in case of true stereo, where
+ // we simply copy the data and return it both for
+ // left channel and right channel decoding.
+ virtual void SplitStereoPacket(WebRtc_UWord8* /* payload */,
+ WebRtc_Word32* /* payload_length */) {
+ }
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 Encode()
- // The function is called to perform an encoding of the audio stored in
- // audio buffer. An encoding is performed only if enough audio, i.e. equal
- // to the frame-size of the codec, exist. The audio frame will be processed
- // by VAD and CN/DTX if required. There are few different cases.
- //
- // A) Neither VAD nor DTX is active; the frame is encoded by the encoder.
- //
- // B) VAD is enabled but not DTX; in this case the audio is processed by VAD
- // and encoded by the encoder. The "*encodingType" will be either
- // "activeNormalEncode" or "passiveNormalEncode" if frame is active or
- // passive, respectively.
- //
- // C) DTX is enabled; if the codec has internal VAD/DTX we just encode the
- // frame by the encoder. Otherwise, the frame is passed through VAD and
- // if identified as passive, then it will be processed by CN/DTX. If the
- // frame is active it will be encoded by the encoder.
- //
- // This function acquires the appropriate locks and calls EncodeSafe() for
- // the actual processing.
- //
- // Outputs:
- // -bitStream : a buffer where bit-stream will be written to.
- // -bitStreamLenByte : contains the length of the bit-stream in
- // bytes.
- // -timeStamp : contains the RTP timestamp, this is the
- // sampling time of the first sample encoded
- // (measured in number of samples).
- // -encodingType : contains the type of encoding applied on the
- // audio samples. The alternatives are
- // (c.f. acm_common_types.h)
- // -kNoEncoding:
- // there was not enough data to encode. or
- // some error has happened that we could
- // not do encoding.
- // -kActiveNormalEncoded:
- // the audio frame is active and encoded by
- // the given codec.
- // -kPassiveNormalEncoded:
- // the audio frame is passive but coded with
- // the given codec (NO DTX).
- // -kPassiveDTXWB:
- // The audio frame is passive and used
- // wide-band CN to encode.
- // -kPassiveDTXNB:
- // The audio frame is passive and used
- // narrow-band CN to encode.
- //
- // Return value:
- // -1 if error is occurred, otherwise the length of the bit-stream in
- // bytes.
- //
- WebRtc_Word16 Encode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte,
- WebRtc_UWord32* timeStamp,
- WebRtcACMEncodingType* encodingType);
+ ///////////////////////////////////////////////////////////////////////////
+ // bool EncoderInitialized();
+ //
+ // Return value:
+ // True if the encoder is successfully initialized,
+ // false otherwise.
+ //
+ bool EncoderInitialized();
+ ///////////////////////////////////////////////////////////////////////////
+ // bool DecoderInitialized();
+ //
+ // Return value:
+ // True if the decoder is successfully initialized,
+ // false otherwise.
+ //
+ bool DecoderInitialized();
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 Decode()
- // This function is used to decode a given bit-stream, without engaging
- // NetEQ.
- //
- // This function acquires the appropriate locks and calls DecodeSafe() for
- // the actual processing. Please note that this is not functional yet.
- //
- // Inputs:
- // -bitStream : a buffer where bit-stream will be read.
- // -bitStreamLenByte : the length of the bit-stream in bytes.
- //
- // Outputs:
- // -audio : pointer to a buffer where the audio will written.
- // -audioSamples : number of audio samples out of decoding the given
- // bit-stream.
- // -speechType : speech type (for future use).
- //
- // Return value:
- // -1 if failed to decode,
- // 0 if succeeded.
- //
- WebRtc_Word16 Decode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 EncoderParams()
+ // It is called to get encoder parameters. It will call
+ // EncoderParamsSafe() in turn.
+ //
+ // Output:
+ // -encParams : a buffer where the encoder parameters is
+ // written to. If the encoder is not
+ // initialized this buffer is filled with
+ // invalid values
+ // Return value:
+ // -1 if the encoder is not initialized,
+ // 0 otherwise.
+ //
+ //
+ WebRtc_Word16 EncoderParams(WebRtcACMCodecParams *encParams);
- ///////////////////////////////////////////////////////////////////////////
- // void SplitStereoPacket()
- // This function is used to split stereo payloads in left and right channel.
- // Codecs which has stereo support has there own implementation of the
- // function.
- //
- // Input/Output:
- // -payload : a vector with the received payload data.
- // The function will reorder the data so that
- // first half holds the left channel data, and the
- // second half the right channel data.
- // -payload_length : length of payload in bytes. Will be changed to
- // twice the input in case of true stereo, where
- // we simply copy the data and return it both for
- // left channel and right channel decoding.
- virtual void SplitStereoPacket(WebRtc_UWord8* /* payload */,
- WebRtc_Word32* /* payload_length */) {}
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 DecoderParams(...)
+ // It is called to get decoder parameters. It will call DecoderParamsSafe()
+ // in turn.
+ //
+ // Output:
+ // -decParams : a buffer where the decoder parameters is
+ // written to. If the decoder is not initialized
+ // this buffer is filled with invalid values
+ //
+ // Return value:
+ // -1 if the decoder is not initialized,
+ // 0 otherwise.
+ //
+ //
+ bool DecoderParams(WebRtcACMCodecParams *decParams,
+ const WebRtc_UWord8 payloadType);
- ///////////////////////////////////////////////////////////////////////////
- // bool EncoderInitialized();
- //
- // Return value:
- // True if the encoder is successfully initialized,
- // false otherwise.
- //
- bool EncoderInitialized();
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InitEncoder(...)
+ // This function is called to initialize the encoder with the given
+ // parameters.
+ //
+ // Input:
+ // -codecParams : parameters of encoder.
+ // -forceInitialization: if false the initialization is invoked only if
+ // the encoder is not initialized. If true the
+ // encoder is forced to (re)initialize.
+ //
+ // Return value:
+ // 0 if could initialize successfully,
+ // -1 if failed to initialize.
+ //
+ //
+ WebRtc_Word16 InitEncoder(WebRtcACMCodecParams* codecParams,
+ bool forceInitialization);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InitDecoder()
+ // This function is called to initialize the decoder with the given
+ // parameters. (c.f. acm_common_defs.h & common_types.h for the
+ // definition of the structure)
+ //
+ // Input:
+ // -codecParams : parameters of decoder.
+ // -forceInitialization: if false the initialization is invoked only
+ // if the decoder is not initialized. If true
+ // the encoder is forced to(re)initialize.
+ //
+ // Return value:
+ // 0 if could initialize successfully,
+ // -1 if failed to initialize.
+ //
+ //
+ WebRtc_Word16 InitDecoder(WebRtcACMCodecParams* codecParams,
+ bool forceInitialization);
- ///////////////////////////////////////////////////////////////////////////
- // bool DecoderInitialized();
- //
- // Return value:
- // True if the decoder is successfully initialized,
- // false otherwise.
- //
- bool DecoderInitialized();
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 RegisterInNetEq(...)
+ // This function is called to register the decoder in NetEq, with the given
+ // payload-type.
+ //
+ // Inputs:
+ // -netEq : pointer to NetEq Instance
+ // -codecInst : instance with of the codec settings of the codec
+ //
+ // Return values
+ // -1 if failed to register,
+ // 0 if successfully initialized.
+ //
+ WebRtc_Word32 RegisterInNetEq(ACMNetEQ* netEq, const CodecInst& codecInst);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 Add10MsData(...)
+ // This function is called to add 10 ms of audio to the audio buffer of
+ // the codec.
+ //
+ // Inputs:
+ // -timeStamp : the timestamp of the 10 ms audio. the timestamp
+ // is the sampling time of the
+ // first sample measured in number of samples.
+ // -data : a buffer that contains the audio. The codec
+ // expects to get the audio in correct sampling
+ // frequency
+ // -length : the length of the audio buffer
+ // -audioChannel : 0 for mono, 1 for stereo (not supported yet)
+ //
+ // Return values:
+ // -1 if failed
+ // 0 otherwise.
+ //
+ WebRtc_Word32 Add10MsData(const WebRtc_UWord32 timeStamp,
+ const WebRtc_Word16* data,
+ const WebRtc_UWord16 length,
+ const WebRtc_UWord8 audioChannel);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 EncoderParams()
- // It is called to get encoder parameters. It will call
- // EncoderParamsSafe() in turn.
- //
- // Output:
- // -encParams : a buffer where the encoder parameters is
- // written to. If the encoder is not
- // initialized this buffer is filled with
- // invalid values
- // Return value:
- // -1 if the encoder is not initialized,
- // 0 otherwise.
- //
- //
- WebRtc_Word16 EncoderParams(
- WebRtcACMCodecParams *encParams);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_UWord32 NoMissedSamples()
+ // This function returns the number of samples which are overwritten in
+ // the audio buffer. The audio samples are overwritten if the input audio
+ // buffer is full, but Add10MsData() is called. (We might remove this
+ // function if it is not used)
+ //
+ // Return Value:
+ // Number of samples which are overwritten.
+ //
+ WebRtc_UWord32 NoMissedSamples() const;
+ ///////////////////////////////////////////////////////////////////////////
+ // void ResetNoMissedSamples()
+ // This function resets the number of overwritten samples to zero.
+ // (We might remove this function if we remove NoMissedSamples())
+ //
+ void ResetNoMissedSamples();
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 DecoderParams(...)
- // It is called to get decoder parameters. It will call DecoderParamsSafe()
- // in turn.
- //
- // Output:
- // -decParams : a buffer where the decoder parameters is
- // written to. If the decoder is not initialized
- // this buffer is filled with invalid values
- //
- // Return value:
- // -1 if the decoder is not initialized,
- // 0 otherwise.
- //
- //
- bool DecoderParams(
- WebRtcACMCodecParams *decParams,
- const WebRtc_UWord8 payloadType);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 SetBitRate()
+ // The function is called to set the encoding rate.
+ //
+ // Input:
+ // -bitRateBPS : encoding rate in bits per second
+ //
+ // Return value:
+ // -1 if failed to set the rate, due to invalid input or given
+ // codec is not rate-adjustable.
+ // 0 if the rate is adjusted successfully
+ //
+ WebRtc_Word16 SetBitRate(const WebRtc_Word32 bitRateBPS);
+ ///////////////////////////////////////////////////////////////////////////
+ // DestructEncoderInst()
+ // This API is used in conferencing. It will free the memory that is pointed
+ // by "ptrInst". "ptrInst" is a pointer to encoder instance, created and
+ // filled up by calling EncoderInst(...).
+ //
+ // Inputs:
+ // -ptrInst : pointer to an encoder instance to be deleted.
+ //
+ //
+ void DestructEncoderInst(void* ptrInst);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 InitEncoder(...)
- // This function is called to initialize the encoder with the given
- // parameters.
- //
- // Input:
- // -codecParams : parameters of encoder.
- // -forceInitialization: if false the initialization is invoked only if
- // the encoder is not initialized. If true the
- // encoder is forced to (re)initialize.
- //
- // Return value:
- // 0 if could initialize successfully,
- // -1 if failed to initialize.
- //
- //
- WebRtc_Word16 InitEncoder(
- WebRtcACMCodecParams* codecParams,
- bool forceInitialization);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 AudioBuffer()
+ // This is used when synchronization of codecs is required. There are cases
+ // that the audio buffers of two codecs have to be synched. By calling this
+ // function on can get the audio buffer and other related parameters, such
+ // as timestamps...
+ //
+ // Output:
+ // -audioBuff : a pointer to WebRtcACMAudioBuff where the audio
+ // buffer of this codec will be written to.
+ //
+ // Return value:
+ // -1 if fails to copy the audio buffer,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 AudioBuffer(WebRtcACMAudioBuff& audioBuff);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_UWord32 EarliestTimestamp()
+ // Returns the timestamp of the first 10 ms in audio buffer. This is used
+ // to identify if a synchronization of two encoders is required.
+ //
+ // Return value:
+ // timestamp of the first 10 ms audio in the audio buffer.
+ //
+ WebRtc_UWord32 EarliestTimestamp() const;
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 InitDecoder()
- // This function is called to initialize the decoder with the given
- // parameters. (c.f. acm_common_defs.h & common_types.h for the
- // definition of the structure)
- //
- // Input:
- // -codecParams : parameters of decoder.
- // -forceInitialization: if false the initialization is invoked only
- // if the decoder is not initialized. If true
- // the encoder is forced to(re)initialize.
- //
- // Return value:
- // 0 if could initialize successfully,
- // -1 if failed to initialize.
- //
- //
- WebRtc_Word16 InitDecoder(
- WebRtcACMCodecParams* codecParams,
- bool forceInitialization);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 SetAudioBuffer()
+ // This function is called to set the audio buffer and the associated
+ // parameters to a given value.
+ //
+ // Return value:
+ // -1 if fails to copy the audio buffer,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 SetAudioBuffer(WebRtcACMAudioBuff& audioBuff);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 SetVAD()
+ // This is called to set VAD & DTX. If the codec has internal DTX that will
+ // be used. If DTX is enabled and the codec does not have internal DTX,
+ // WebRtc-VAD will be used to decide if the frame is active. If DTX is
+ // disabled but VAD is enabled. The audio is passed through VAD to label it
+ // as active or passive, but the frame is encoded normally. However the
+ // bit-stream is labeled properly so that ACM::Process() can use this
+ // information. In case of failure, the previous states of the VAD & DTX
+ // are kept.
+ //
+ // Inputs:
+ // -enableDTX : if true DTX will be enabled otherwise the DTX is
+ // disabled. If codec has internal DTX that will be
+ // used, otherwise WebRtc-CNG is used. In the latter
+ // case VAD is automatically activated.
+ // -enableVAD : if true WebRtc-VAD is enabled, otherwise VAD is
+ // disabled, except for the case that DTX is enabled
+ // but codec doesn't have internal DTX. In this case
+ // VAD is enabled regardless of the value of
+ // "enableVAD."
+ // -mode : this specifies the aggressiveness of VAD.
+ //
+ // Return value
+ // -1 if failed to set DTX & VAD as specified,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 SetVAD(const bool enableDTX = true,
+ const bool enableVAD = false, const ACMVADMode mode =
+ VADNormal);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word32 RegisterInNetEq(...)
- // This function is called to register the decoder in NetEq, with the given
- // payload-type.
- //
- // Inputs:
- // -netEq : pointer to NetEq Instance
- // -codecInst : instance with of the codec settings of the codec
- //
- // Return values
- // -1 if failed to register,
- // 0 if successfully initialized.
- //
- WebRtc_Word32 RegisterInNetEq(
- ACMNetEQ* netEq,
- const CodecInst& codecInst);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 ReplaceInternalDTX()
+ // This is called to replace the codec internal DTX with WebRtc DTX.
+ // This is only valid for G729 where the user has possibility to replace
+ // AnnexB with WebRtc DTX. For other codecs this function has no effect.
+ //
+ // Input:
+ // -replaceInternalDTX : if true the internal DTX is replaced with WebRtc.
+ //
+ // Return value
+ // -1 if failed to replace internal DTX,
+ // 0 if succeeded.
+ //
+ WebRtc_Word32 ReplaceInternalDTX(const bool replaceInternalDTX);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 IsInternalDTXReplaced()
+ // This is called to check if the codec internal DTX is replaced by WebRtc
+ // DTX. This is only valid for G729 where the user has possibility to replace
+ // AnnexB with WebRtc DTX. For other codecs this function has no effect.
+ //
+ // Output:
+ // -internalDTXReplaced : if true the internal DTX is replaced with WebRtc.
+ //
+ // Return value
+ // -1 if failed to check
+ // 0 if succeeded.
+ //
+ WebRtc_Word32 IsInternalDTXReplaced(bool* internalDTXReplaced);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word32 Add10MsData(...)
- // This function is called to add 10 ms of audio to the audio buffer of
- // the codec.
- //
- // Inputs:
- // -timeStamp : the timestamp of the 10 ms audio. the timestamp
- // is the sampling time of the
- // first sample measured in number of samples.
- // -data : a buffer that contains the audio. The codec
- // expects to get the audio in correct sampling
- // frequency
- // -length : the length of the audio buffer
- // -audioChannel : 0 for mono, 1 for stereo (not supported yet)
- //
- // Return values:
- // -1 if failed
- // 0 otherwise.
- //
- WebRtc_Word32 Add10MsData(
- const WebRtc_UWord32 timeStamp,
- const WebRtc_Word16* data,
- const WebRtc_UWord16 length,
- const WebRtc_UWord8 audioChannel);
+ ///////////////////////////////////////////////////////////////////////////
+ // void SetNetEqDecodeLock()
+ // Passes the NetEq lock to the codec.
+ //
+ // Input:
+ // -netEqDecodeLock : pointer to the lock associated with NetEQ of ACM.
+ //
+ void SetNetEqDecodeLock(RWLockWrapper* netEqDecodeLock) {
+ _netEqDecodeLock = netEqDecodeLock;
+ }
+ ///////////////////////////////////////////////////////////////////////////
+ // bool HasInternalDTX()
+ // Used to check if the codec has internal DTX.
+ //
+ // Return value:
+ // true if the codec has an internal DTX, e.g. G729,
+ // false otherwise.
+ //
+ bool HasInternalDTX() const {
+ return _hasInternalDTX;
+ }
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_UWord32 NoMissedSamples()
- // This function returns the number of samples which are overwritten in
- // the audio buffer. The audio samples are overwritten if the input audio
- // buffer is full, but Add10MsData() is called. (We might remove this
- // function if it is not used)
- //
- // Return Value:
- // Number of samples which are overwritten.
- //
- WebRtc_UWord32 NoMissedSamples() const;
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 GetEstimatedBandwidth()
+ // Used to get decoder estimated bandwidth. Only iSAC will provide a value.
+ //
+ //
+ // Return value:
+ // -1 if fails to get decoder estimated bandwidth,
+ // >0 estimated bandwidth in bits/sec.
+ //
+ WebRtc_Word32 GetEstimatedBandwidth();
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SetEstimatedBandwidth()
+ // Used to set estiamted bandwidth sent out of band from other side. Only
+ // iSAC will have use for the value.
+ //
+ // Input:
+ // -estimatedBandwidth: estimated bandwidth in bits/sec
+ //
+ // Return value:
+ // -1 if fails to set estimated bandwidth,
+ // 0 on success.
+ //
+ WebRtc_Word32 SetEstimatedBandwidth(WebRtc_Word32 estimatedBandwidth);
- ///////////////////////////////////////////////////////////////////////////
- // void ResetNoMissedSamples()
- // This function resets the number of overwritten samples to zero.
- // (We might remove this function if we remove NoMissedSamples())
- //
- void ResetNoMissedSamples();
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 GetRedPayload()
+ // Used to get codec specific RED payload (if such is implemented).
+ // Currently only done in iSAC.
+ //
+ // Outputs:
+ // -redPayload : a pointer to the data for RED payload.
+ // -payloadBytes : number of bytes in RED payload.
+ //
+ // Return value:
+ // -1 if fails to get codec specific RED,
+ // 0 if succeeded.
+ //
+ WebRtc_Word32 GetRedPayload(WebRtc_UWord8* redPayload,
+ WebRtc_Word16* payloadBytes);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 ResetEncoder()
+ // By calling this function you would re-initialize the encoder with the
+ // current parameters. All the settings, e.g. VAD/DTX, frame-size... should
+ // remain unchanged. (In case of iSAC we don't want to lose BWE history.)
+ //
+ // Return value
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 ResetEncoder();
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 SetBitRate()
- // The function is called to set the encoding rate.
- //
- // Input:
- // -bitRateBPS : encoding rate in bits per second
- //
- // Return value:
- // -1 if failed to set the rate, due to invalid input or given
- // codec is not rate-adjustable.
- // 0 if the rate is adjusted successfully
- //
- WebRtc_Word16 SetBitRate(const WebRtc_Word32 bitRateBPS);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 ResetEncoder()
+ // By calling this function you would re-initialize the decoder with the
+ // current parameters.
+ //
+ // Return value
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 ResetDecoder(WebRtc_Word16 payloadType);
+ ///////////////////////////////////////////////////////////////////////////
+ // void DestructEncoder()
+ // This function is called to delete the encoder instance, if possible, to
+ // have a fresh start. For codecs where encoder and decoder share the same
+ // instance we cannot delete the encoder and instead we will initialize the
+ // encoder. We also delete VAD and DTX if they have been created.
+ //
+ void DestructEncoder();
- ///////////////////////////////////////////////////////////////////////////
- // DestructEncoderInst()
- // This API is used in conferencing. It will free the memory that is pointed
- // by "ptrInst". "ptrInst" is a pointer to encoder instance, created and
- // filled up by calling EncoderInst(...).
- //
- // Inputs:
- // -ptrInst : pointer to an encoder instance to be deleted.
- //
- //
- void DestructEncoderInst(
- void* ptrInst);
+ ///////////////////////////////////////////////////////////////////////////
+ // void DestructDecoder()
+ // This function is called to delete the decoder instance, if possible, to
+ // have a fresh start. For codecs where encoder and decoder share the same
+ // instance we cannot delete the encoder and instead we will initialize the
+ // decoder. Before deleting decoder instance it has to be removed from the
+ // NetEq list.
+ //
+ void DestructDecoder();
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 AudioBuffer()
- // This is used when synchronization of codecs is required. There are cases
- // that the audio buffers of two codecs have to be synched. By calling this
- // function on can get the audio buffer and other related parameters, such
- // as timestamps...
- //
- // Output:
- // -audioBuff : a pointer to WebRtcACMAudioBuff where the audio
- // buffer of this codec will be written to.
- //
- // Return value:
- // -1 if fails to copy the audio buffer,
- // 0 if succeeded.
- //
- WebRtc_Word16 AudioBuffer(
- WebRtcACMAudioBuff& audioBuff);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 SamplesLeftToEncode()
+ // Returns the number of samples required to be able to do encoding.
+ //
+ // Return value:
+ // Number of samples.
+ //
+ WebRtc_Word16 SamplesLeftToEncode();
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_UWord32 LastEncodedTimestamp()
+ // Returns the timestamp of the last frame it encoded.
+ //
+ // Return value:
+ // Timestamp.
+ //
+ WebRtc_UWord32 LastEncodedTimestamp() const;
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_UWord32 EarliestTimestamp()
- // Returns the timestamp of the first 10 ms in audio buffer. This is used
- // to identify if a synchronization of two encoders is required.
- //
- // Return value:
- // timestamp of the first 10 ms audio in the audio buffer.
- //
- WebRtc_UWord32 EarliestTimestamp() const;
+ ///////////////////////////////////////////////////////////////////////////
+ // SetUniqueID()
+ // Set a unique ID for the codec to be used for tracing and debuging
+ //
+ // Input
+ // -id : A number to identify the codec.
+ //
+ void SetUniqueID(const WebRtc_UWord32 id);
+ ///////////////////////////////////////////////////////////////////////////
+ // IsAudioBufferFresh()
+ // Specifies if ever audio is injected to this codec.
+ //
+ // Return value
+ // -true; no audio is feed into this codec
+ // -false; audio has already been fed to the codec.
+ //
+ bool IsAudioBufferFresh() const;
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 SetAudioBuffer()
- // This function is called to set the audio buffer and the associated
- // parameters to a given value.
- //
- // Return value:
- // -1 if fails to copy the audio buffer,
- // 0 if succeeded.
- //
- WebRtc_Word16 SetAudioBuffer(WebRtcACMAudioBuff& audioBuff);
+ ///////////////////////////////////////////////////////////////////////////
+ // UpdateDecoderSampFreq()
+ // For most of the codecs this function does nothing. It must be
+ // implemented for those codecs that one codec instance serves as the
+ // decoder for different flavers of the codec. One example is iSAC. there,
+ // iSAC 16 kHz and iSAC 32 kHz are treated as two different codecs with
+ // different payload types, however, there is only one iSAC instance to
+ // decode. The reason for that is we would like to decode and encode with
+ // the same codec instance for bandwidth estimator to work.
+ //
+ // Each time that we receive a new payload type, we call this funtion to
+ // prepare the decoder associated with the new payload. Normally, decoders
+ // doesn't have to do anything. For iSAC the decoder has to change it's
+ // sampling rate. The input parameter specifies the current flaver of the
+ // codec in codec database. For instance, if we just got a SWB payload then
+ // the input parameter is ACMCodecDB::isacswb.
+ //
+ // Input:
+ // -codecId : the ID of the codec associated with the
+ // payload type that we just received.
+ //
+ // Return value:
+ // 0 if succeeded in updating the decoder.
+ // -1 if failed to update.
+ //
+ virtual WebRtc_Word16 UpdateDecoderSampFreq(WebRtc_Word16 /* codecId */) {
+ return 0;
+ }
+ ///////////////////////////////////////////////////////////////////////////
+ // UpdateEncoderSampFreq()
+ // Call this function to update the encoder sampling frequency. This
+ // is for codecs where one payload-name supports several encoder sampling
+ // frequencies. Otherwise, to change the sampling frequency we need to
+ // register new codec. ACM will consider that as registration of a new
+ // codec, not a change in parameter. For iSAC, switching from WB to SWB
+ // is treated as a change in parameter. Therefore, we need this function.
+ //
+ // Input:
+ // -encoderSampFreqHz : encoder sampling frequency.
+ //
+ // Return value:
+ // -1 if failed, or if this is meaningless for the given codec.
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 UpdateEncoderSampFreq(WebRtc_UWord16 encoderSampFreqHz);
+ ///////////////////////////////////////////////////////////////////////////
+ // EncoderSampFreq()
+ // Get the sampling frequency that the encoder (WebRtc wrapper) expects.
+ //
+ // Output:
+ // -sampFreqHz : sampling frequency, in Hertz, which the encoder
+ // should be fed with.
+ //
+ // Return value:
+ // -1 if failed to output sampling rate.
+ // 0 if the sample rate is returned successfully.
+ //
+ virtual WebRtc_Word16 EncoderSampFreq(WebRtc_UWord16& sampFreqHz);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 SetVAD()
- // This is called to set VAD & DTX. If the codec has internal DTX that will
- // be used. If DTX is enabled and the codec does not have internal DTX,
- // WebRtc-VAD will be used to decide if the frame is active. If DTX is
- // disabled but VAD is enabled. The audio is passed through VAD to label it
- // as active or passive, but the frame is encoded normally. However the
- // bit-stream is labeled properly so that ACM::Process() can use this
- // information. In case of failure, the previous states of the VAD & DTX
- // are kept.
- //
- // Inputs:
- // -enableDTX : if true DTX will be enabled otherwise the DTX is
- // disabled. If codec has internal DTX that will be
- // used, otherwise WebRtc-CNG is used. In the latter
- // case VAD is automatically activated.
- // -enableVAD : if true WebRtc-VAD is enabled, otherwise VAD is
- // disabled, except for the case that DTX is enabled
- // but codec doesn't have internal DTX. In this case
- // VAD is enabled regardless of the value of
- // "enableVAD."
- // -mode : this specifies the aggressiveness of VAD.
- //
- // Return value
- // -1 if failed to set DTX & VAD as specified,
- // 0 if succeeded.
- //
- WebRtc_Word16 SetVAD(
- const bool enableDTX = true,
- const bool enableVAD = false,
- const ACMVADMode mode = VADNormal);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 ConfigISACBandwidthEstimator()
+ // Call this function to configure the bandwidth estimator of ISAC.
+ // During the adaptation of bit-rate, iSAC atomatically adjusts the
+ // frame-size (either 30 or 60 ms) to save on RTP header. The initial
+ // frame-size can be specified by the first argument. The configuration also
+ // regards the initial estimate of bandwidths. The estimator starts from
+ // this point and converges to the actual bottleneck. This is given by the
+ // second parameter. Furthermore, it is also possible to control the
+ // adaptation of frame-size. This is specified by the last parameter.
+ //
+ // Input:
+ // -initFrameSizeMsec : initial frame-size in milisecods. For iSAC-wb
+ // 30 ms and 60 ms (default) are acceptable values,
+ // and for iSAC-swb 30 ms is the only acceptable
+ // value. Zero indiates default value.
+ // -initRateBitPerSec : initial estimate of the bandwidth. Values
+ // between 10000 and 58000 are acceptable.
+ // -enforceFrameSize : if true, the frame-size will not be adapted.
+ //
+ // Return value:
+ // -1 if failed to configure the bandwidth estimator,
+ // 0 if the configuration was successfully applied.
+ //
+ virtual WebRtc_Word32 ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 initFrameSizeMsec,
+ const WebRtc_UWord16 initRateBitPerSec, const bool enforceFrameSize);
+ ///////////////////////////////////////////////////////////////////////////
+ // SetISACMaxPayloadSize()
+ // Set the maximum payload size of iSAC packets. No iSAC payload,
+ // regardless of its frame-size, may exceed the given limit. For
+ // an iSAC payload of size B bits and frame-size T sec we have;
+ // (B < maxPayloadLenBytes * 8) and (B/T < maxRateBitPerSec), c.f.
+ // SetISACMaxRate().
+ //
+ // Input:
+ // -maxPayloadLenBytes : maximum payload size in bytes.
+ //
+ // Return value:
+ // -1 if failed to set the maximm payload-size.
+ // 0 if the given linit is seet successfully.
+ //
+ virtual WebRtc_Word32 SetISACMaxPayloadSize(
+ const WebRtc_UWord16 maxPayloadLenBytes);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word32 ReplaceInternalDTX()
- // This is called to replace the codec internal DTX with WebRtc DTX.
- // This is only valid for G729 where the user has possibility to replace
- // AnnexB with WebRtc DTX. For other codecs this function has no effect.
- //
- // Input:
- // -replaceInternalDTX : if true the internal DTX is replaced with WebRtc.
- //
- // Return value
- // -1 if failed to replace internal DTX,
- // 0 if succeeded.
- //
- WebRtc_Word32 ReplaceInternalDTX(const bool replaceInternalDTX);
+ ///////////////////////////////////////////////////////////////////////////
+ // SetISACMaxRate()
+ // Set the maximum instantaneous rate of iSAC. For a payload of B bits
+ // with a frame-size of T sec the instantaneous rate is B/T bist per
+ // second. Therefore, (B/T < maxRateBitPerSec) and
+ // (B < maxPayloadLenBytes * 8) are always satisfied for iSAC payloads,
+ // c.f SetISACMaxPayloadSize().
+ //
+ // Input:
+ // -maxRateBitPerSec : maximum instantaneous bit-rate given in bits/sec.
+ //
+ // Return value:
+ // -1 if failed to set the maximum rate.
+ // 0 if the maximum rate is set successfully.
+ //
+ virtual WebRtc_Word32 SetISACMaxRate(const WebRtc_UWord32 maxRateBitPerSec);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word32 IsInternalDTXReplaced()
- // This is called to check if the codec internal DTX is replaced by WebRtc DTX.
- // This is only valid for G729 where the user has possibility to replace
- // AnnexB with WebRtc DTX. For other codecs this function has no effect.
- //
- // Output:
- // -internalDTXReplaced : if true the internal DTX is replaced with WebRtc.
- //
- // Return value
- // -1 if failed to check if replace internal DTX or replacement not feasible,
- // 0 if succeeded.
- //
- WebRtc_Word32 IsInternalDTXReplaced(bool* internalDTXReplaced);
+ ///////////////////////////////////////////////////////////////////////////
+ // SaveDecoderParamS()
+ // Save the parameters of decoder.
+ //
+ // Input:
+ // -codecParams : pointer to a struct where the parameters of
+ // decoder is stored in.
+ //
+ void SaveDecoderParam(const WebRtcACMCodecParams* codecParams);
- ///////////////////////////////////////////////////////////////////////////
- // void SetNetEqDecodeLock()
- // Passes the NetEq lock to the codec.
- //
- // Input:
- // -netEqDecodeLock : pointer to the lock associated with NetEQ of ACM.
- //
- void SetNetEqDecodeLock(
- RWLockWrapper* netEqDecodeLock)
- {
- _netEqDecodeLock = netEqDecodeLock;
- }
+ WebRtc_Word32 FrameSize() {
+ return _frameLenSmpl;
+ }
+ void SetIsMaster(bool isMaster);
- ///////////////////////////////////////////////////////////////////////////
- // bool HasInternalDTX()
- // Used to check if the codec has internal DTX.
- //
- // Return value:
- // true if the codec has an internal DTX, e.g. G729,
- // false otherwise.
- //
- bool HasInternalDTX() const
- {
- return _hasInternalDTX;
- }
+ ///////////////////////////////////////////////////////////////////////////
+ // REDPayloadISAC()
+ // This is an iSAC-specific function. The function is called to get RED
+ // paylaod from a default-encoder.
+ //
+ // Inputs:
+ // -isacRate : the target rate of the main payload. A RED
+ // paylaod is generated according to the rate of
+ // main paylaod. Note that we are not specifying the
+ // rate of RED payload, but the main payload.
+ // -isacBwEstimate : bandwidth information should be inserted in
+ // RED payload.
+ //
+ // Output:
+ // -payload : pointer to a buffer where the RED paylaod will
+ // written to.
+ // -paylaodLenBytes : a place-holder to write the length of the RED
+ // payload in Bytes.
+ //
+ // Return value:
+ // -1 if an error occures, otherwise the length of the payload (in Bytes)
+ // is returned.
+ //
+ //
+ virtual WebRtc_Word16 REDPayloadISAC(const WebRtc_Word32 isacRate,
+ const WebRtc_Word16 isacBwEstimate,
+ WebRtc_UWord8* payload,
+ WebRtc_Word16* payloadLenBytes);
+ ///////////////////////////////////////////////////////////////////////////
+ // IsTrueStereoCodec()
+ // Call to see if current encoder is a true stereo codec. This function
+ // should be overwritten for codecs which are true stereo codecs
+ // Return value:
+ // -true if stereo codec
+ // -false if not stereo codec.
+ //
+ virtual bool IsTrueStereoCodec() {
+ return false;
+ }
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word32 GetEstimatedBandwidth()
- // Used to get decoder estimated bandwidth. Only iSAC will provide a value.
- //
- //
- // Return value:
- // -1 if fails to get decoder estimated bandwidth,
- // >0 estimated bandwidth in bits/sec.
- //
- WebRtc_Word32 GetEstimatedBandwidth();
+ protected:
+ ///////////////////////////////////////////////////////////////////////////
+ // All the functions with FunctionNameSafe(...) contain the actual
+ // implementation of FunctionName(...). FunctionName() acquires an
+ // appropriate lock and calls FunctionNameSafe() to do the actual work.
+ // Therefore, for the description of functionality, input/output arguments
+ // and return value we refer to FunctionName()
+ //
+ ///////////////////////////////////////////////////////////////////////////
+ // See Encode() for the description of function, input(s)/output(s) and
+ // return value.
+ //
+ WebRtc_Word16 EncodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_UWord32* timeStamp,
+ WebRtcACMEncodingType* encodingType);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word32 SetEstimatedBandwidth()
- // Used to set estiamted bandwidth sent out of band from other side. Only
- // iSAC will have use for the value.
- //
- // Input:
- // -estimatedBandwidth: estimated bandwidth in bits/sec
- //
- // Return value:
- // -1 if fails to set estimated bandwidth,
- // 0 on success.
- //
- WebRtc_Word32 SetEstimatedBandwidth(WebRtc_Word32 estimatedBandwidth);
+ ///////////////////////////////////////////////////////////////////////////
+ // See Decode() for the description of function, input(s)/output(s) and
+ // return value.
+ //
+ virtual WebRtc_Word16 DecodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType) = 0;
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word32 GetRedPayload()
- // Used to get codec specific RED payload (if such is implemented).
- // Currently only done in iSAC.
- //
- // Outputs:
- // -redPayload : a pointer to the data for RED payload.
- // -payloadBytes : number of bytes in RED payload.
- //
- // Return value:
- // -1 if fails to get codec specific RED,
- // 0 if succeeded.
- //
- WebRtc_Word32 GetRedPayload(
- WebRtc_UWord8* redPayload,
- WebRtc_Word16* payloadBytes);
-
-
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 ResetEncoder()
- // By calling this function you would re-initialize the encoder with the
- // current parameters. All the settings, e.g. VAD/DTX, frame-size... should
- // remain unchanged. (In case of iSAC we don't want to lose BWE history.)
- //
- // Return value
- // -1 if failed,
- // 0 if succeeded.
- //
- WebRtc_Word16 ResetEncoder();
-
-
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 ResetEncoder()
- // By calling this function you would re-initialize the decoder with the
- // current parameters.
- //
- // Return value
- // -1 if failed,
- // 0 if succeeded.
- //
- WebRtc_Word16 ResetDecoder(
- WebRtc_Word16 payloadType);
-
-
- ///////////////////////////////////////////////////////////////////////////
- // void DestructEncoder()
- // This function is called to delete the encoder instance, if possible, to
- // have a fresh start. For codecs where encoder and decoder share the same
- // instance we cannot delete the encoder and instead we will initialize the
- // encoder. We also delete VAD and DTX if they have been created.
- //
- void DestructEncoder();
-
-
- ///////////////////////////////////////////////////////////////////////////
- // void DestructDecoder()
- // This function is called to delete the decoder instance, if possible, to
- // have a fresh start. For codecs where encoder and decoder share the same
- // instance we cannot delete the encoder and instead we will initialize the
- // decoder. Before deleting decoder instance it has to be removed from the
- // NetEq list.
- //
- void DestructDecoder();
-
-
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 SamplesLeftToEncode()
- // Returns the number of samples required to be able to do encoding.
- //
- // Return value:
- // Number of samples.
- //
- WebRtc_Word16 SamplesLeftToEncode();
-
-
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_UWord32 LastEncodedTimestamp()
- // Returns the timestamp of the last frame it encoded.
- //
- // Return value:
- // Timestamp.
- //
- WebRtc_UWord32 LastEncodedTimestamp() const;
-
-
- ///////////////////////////////////////////////////////////////////////////
- // SetUniqueID()
- // Set a unique ID for the codec to be used for tracing and debuging
- //
- // Input
- // -id : A number to identify the codec.
- //
- void SetUniqueID(
- const WebRtc_UWord32 id);
-
-
- ///////////////////////////////////////////////////////////////////////////
- // IsAudioBufferFresh()
- // Specifies if ever audio is injected to this codec.
- //
- // Return value
- // -true; no audio is feed into this codec
- // -false; audio has already been fed to the codec.
- //
- bool IsAudioBufferFresh() const;
-
-
- ///////////////////////////////////////////////////////////////////////////
- // UpdateDecoderSampFreq()
- // For most of the codecs this function does nothing. It must be
- // implemented for those codecs that one codec instance serves as the
- // decoder for different flavers of the codec. One example is iSAC. there,
- // iSAC 16 kHz and iSAC 32 kHz are treated as two different codecs with
- // different payload types, however, there is only one iSAC instance to
- // decode. The reason for that is we would like to decode and encode with
- // the same codec instance for bandwidth estimator to work.
- //
- // Each time that we receive a new payload type, we call this funtion to
- // prepare the decoder associated with the new payload. Normally, decoders
- // doesn't have to do anything. For iSAC the decoder has to change it's
- // sampling rate. The input parameter specifies the current flaver of the
- // codec in codec database. For instance, if we just got a SWB payload then
- // the input parameter is ACMCodecDB::isacswb.
- //
- // Input:
- // -codecId : the ID of the codec associated with the
- // payload type that we just received.
- //
- // Return value:
- // 0 if succeeded in updating the decoder.
- // -1 if failed to update.
- //
- virtual WebRtc_Word16 UpdateDecoderSampFreq(
- WebRtc_Word16 /* codecId */)
- {
- return 0;
- }
-
-
- ///////////////////////////////////////////////////////////////////////////
- // UpdateEncoderSampFreq()
- // Call this function to update the encoder sampling frequency. This
- // is for codecs where one payload-name supports several encoder sampling
- // frequencies. Otherwise, to change the sampling frequency we need to
- // register new codec. ACM will consider that as registration of a new
- // codec, not a change in parameter. For iSAC, switching from WB to SWB
- // is treated as a change in parameter. Therefore, we need this function.
- //
- // Input:
- // -encoderSampFreqHz : encoder sampling frequency.
- //
- // Return value:
- // -1 if failed, or if this is meaningless for the given codec.
- // 0 if succeeded.
- //
- virtual WebRtc_Word16 UpdateEncoderSampFreq(
- WebRtc_UWord16 encoderSampFreqHz);
-
-
- ///////////////////////////////////////////////////////////////////////////
- // EncoderSampFreq()
- // Get the sampling frequency that the encoder (WebRtc wrapper) expects.
- //
- // Output:
- // -sampFreqHz : sampling frequency, in Hertz, which the encoder
- // should be fed with.
- //
- // Return value:
- // -1 if failed to output sampling rate.
- // 0 if the sample rate is returned successfully.
- //
- virtual WebRtc_Word16 EncoderSampFreq(
- WebRtc_UWord16& sampFreqHz);
-
-
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word32 ConfigISACBandwidthEstimator()
- // Call this function to configure the bandwidth estimator of ISAC.
- // During the adaptation of bit-rate, iSAC atomatically adjusts the
- // frame-size (either 30 or 60 ms) to save on RTP header. The initial
- // frame-size can be specified by the first argument. The configuration also
- // regards the initial estimate of bandwidths. The estimator starts from
- // this point and converges to the actual bottleneck. This is given by the
- // second parameter. Furthermore, it is also possible to control the
- // adaptation of frame-size. This is specified by the last parameter.
- //
- // Input:
- // -initFrameSizeMsec : initial frame-size in milisecods. For iSAC-wb
- // 30 ms and 60 ms (default) are acceptable values,
- // and for iSAC-swb 30 ms is the only acceptable
- // value. Zero indiates default value.
- // -initRateBitPerSec : initial estimate of the bandwidth. Values
- // between 10000 and 58000 are acceptable.
- // -enforceFrameSize : if true, the frame-size will not be adapted.
- //
- // Return value:
- // -1 if failed to configure the bandwidth estimator,
- // 0 if the configuration was successfully applied.
- //
- virtual WebRtc_Word32 ConfigISACBandwidthEstimator(
- const WebRtc_UWord8 initFrameSizeMsec,
- const WebRtc_UWord16 initRateBitPerSec,
- const bool enforceFrameSize);
-
-
- ///////////////////////////////////////////////////////////////////////////
- // SetISACMaxPayloadSize()
- // Set the maximum payload size of iSAC packets. No iSAC payload,
- // regardless of its frame-size, may exceed the given limit. For
- // an iSAC payload of size B bits and frame-size T sec we have;
- // (B < maxPayloadLenBytes * 8) and (B/T < maxRateBitPerSec), c.f.
- // SetISACMaxRate().
- //
- // Input:
- // -maxPayloadLenBytes : maximum payload size in bytes.
- //
- // Return value:
- // -1 if failed to set the maximm payload-size.
- // 0 if the given linit is seet successfully.
- //
- virtual WebRtc_Word32 SetISACMaxPayloadSize(
- const WebRtc_UWord16 maxPayloadLenBytes);
-
-
- ///////////////////////////////////////////////////////////////////////////
- // SetISACMaxRate()
- // Set the maximum instantaneous rate of iSAC. For a payload of B bits
- // with a frame-size of T sec the instantaneous rate is B/T bist per
- // second. Therefore, (B/T < maxRateBitPerSec) and
- // (B < maxPayloadLenBytes * 8) are always satisfied for iSAC payloads,
- // c.f SetISACMaxPayloadSize().
- //
- // Input:
- // -maxRateBitPerSec : maximum instantaneous bit-rate given in bits/sec.
- //
- // Return value:
- // -1 if failed to set the maximum rate.
- // 0 if the maximum rate is set successfully.
- //
- virtual WebRtc_Word32 SetISACMaxRate(
- const WebRtc_UWord32 maxRateBitPerSec);
-
-
- ///////////////////////////////////////////////////////////////////////////
- // SaveDecoderParamS()
- // Save the parameters of decoder.
- //
- // Input:
- // -codecParams : pointer to a struct where the parameters of
- // decoder is stored in.
- //
- void SaveDecoderParam(
- const WebRtcACMCodecParams* codecParams);
-
-
- WebRtc_Word32 FrameSize()
- {
- return _frameLenSmpl;
- }
-
- void SetIsMaster(bool isMaster);
-
-
-
-
- ///////////////////////////////////////////////////////////////////////////
- // REDPayloadISAC()
- // This is an iSAC-specific function. The function is called to get RED
- // paylaod from a default-encoder.
- //
- // Inputs:
- // -isacRate : the target rate of the main payload. A RED
- // paylaod is generated according to the rate of
- // main paylaod. Note that we are not specifying the
- // rate of RED payload, but the main payload.
- // -isacBwEstimate : bandwidth information should be inserted in
- // RED payload.
- //
- // Output:
- // -payload : pointer to a buffer where the RED paylaod will
- // written to.
- // -paylaodLenBytes : a place-holder to write the length of the RED
- // payload in Bytes.
- //
- // Return value:
- // -1 if an error occures, otherwise the length of the payload (in Bytes)
- // is returned.
- //
- //
- virtual WebRtc_Word16 REDPayloadISAC(
- const WebRtc_Word32 isacRate,
- const WebRtc_Word16 isacBwEstimate,
- WebRtc_UWord8* payload,
- WebRtc_Word16* payloadLenBytes);
-
- ///////////////////////////////////////////////////////////////////////////
- // IsTrueStereoCodec()
- // Call to see if current encoder is a true stereo codec. This function
- // should be overwritten for codecs which are true stereo codecs
- // Return value:
- // -true if stereo codec
- // -false if not stereo codec.
- //
- virtual bool IsTrueStereoCodec() {
- return false;
- }
-
-protected:
- ///////////////////////////////////////////////////////////////////////////
- // All the functions with FunctionNameSafe(...) contain the actual
- // implementation of FunctionName(...). FunctionName() acquires an
- // appropriate lock and calls FunctionNameSafe() to do the actual work.
- // Therefore, for the description of functionality, input/output arguments
- // and return value we refer to FunctionName()
- //
-
- ///////////////////////////////////////////////////////////////////////////
- // See Encode() for the description of function, input(s)/output(s) and
- // return value.
- //
- WebRtc_Word16 EncodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte,
- WebRtc_UWord32* timeStamp,
- WebRtcACMEncodingType* encodingType);
-
- ///////////////////////////////////////////////////////////////////////////
- // See Decode() for the description of function, input(s)/output(s) and
- // return value.
- //
- virtual WebRtc_Word16 DecodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType) = 0;
-
- ///////////////////////////////////////////////////////////////////////////
- // See Add10MsSafe() for the description of function, input(s)/output(s)
- // and return value.
- //
- virtual WebRtc_Word32 Add10MsDataSafe(
- const WebRtc_UWord32 timeStamp,
- const WebRtc_Word16* data,
- const WebRtc_UWord16 length,
- const WebRtc_UWord8 audioChannel);
-
- ///////////////////////////////////////////////////////////////////////////
- // See RegisterInNetEq() for the description of function,
- // input(s)/output(s) and return value.
- //
- virtual WebRtc_Word32 CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst) = 0;
-
- ///////////////////////////////////////////////////////////////////////////
- // See EncoderParam() for the description of function, input(s)/output(s)
- // and return value.
- //
- WebRtc_Word16 EncoderParamsSafe(
- WebRtcACMCodecParams *encParams);
-
- ///////////////////////////////////////////////////////////////////////////
- // See DecoderParam for the description of function, input(s)/output(s)
- // and return value.
- //
- // Note:
- // Any Class where a single instance handle several flavers of the
- // same codec, therefore, several payload types are associated with
- // the same instance have to implement this function.
- //
- // Currently only iSAC is implementing it. A single iSAC instance is
- // used for decoding both WB & SWB stream. At one moment both WB & SWB
- // can be registered as receive codec. Hence two payloads are associated
- // with a single codec instance.
- //
- virtual bool DecoderParamsSafe(
- WebRtcACMCodecParams *decParams,
- const WebRtc_UWord8 payloadType);
-
- ///////////////////////////////////////////////////////////////////////////
- // See ResetEncoder() for the description of function, input(s)/output(s)
- // and return value.
- //
- WebRtc_Word16 ResetEncoderSafe();
-
- ///////////////////////////////////////////////////////////////////////////
- // See InitEncoder() for the description of function, input(s)/output(s)
- // and return value.
- //
- WebRtc_Word16 InitEncoderSafe(
- WebRtcACMCodecParams *codecParams,
- bool forceInitialization);
-
- ///////////////////////////////////////////////////////////////////////////
- // See InitDecoder() for the description of function, input(s)/output(s)
- // and return value.
- //
- WebRtc_Word16 InitDecoderSafe(
- WebRtcACMCodecParams *codecParams,
- bool forceInitialization);
-
- ///////////////////////////////////////////////////////////////////////////
- // See ResetDecoder() for the description of function, input(s)/output(s)
- // and return value.
- //
- WebRtc_Word16 ResetDecoderSafe(
- WebRtc_Word16 payloadType);
-
- ///////////////////////////////////////////////////////////////////////////
- // See DestructEncoder() for the description of function,
- // input(s)/output(s) and return value.
- //
- virtual void DestructEncoderSafe() = 0;
-
- ///////////////////////////////////////////////////////////////////////////
- // See DestructDecoder() for the description of function,
- // input(s)/output(s) and return value.
- //
- virtual void DestructDecoderSafe() = 0;
-
- ///////////////////////////////////////////////////////////////////////////
- // See SetBitRate() for the description of function, input(s)/output(s)
- // and return value.
- //
- // Any codec that can change the bit-rate has to implement this.
- //
- virtual WebRtc_Word16 SetBitRateSafe(
- const WebRtc_Word32 bitRateBPS);
-
- ///////////////////////////////////////////////////////////////////////////
- // See GetEstimatedBandwidth() for the description of function, input(s)/output(s)
- // and return value.
- //
- virtual WebRtc_Word32 GetEstimatedBandwidthSafe();
-
- ///////////////////////////////////////////////////////////////////////////
- // See SetEstimatedBandwidth() for the description of function, input(s)/output(s)
- // and return value.
- //
- virtual WebRtc_Word32 SetEstimatedBandwidthSafe(WebRtc_Word32 estimatedBandwidth);
-
- ///////////////////////////////////////////////////////////////////////////
- // See GetRedPayload() for the description of function, input(s)/output(s)
- // and return value.
- //
- virtual WebRtc_Word32 GetRedPayloadSafe(
- WebRtc_UWord8* redPayload,
- WebRtc_Word16* payloadBytes);
-
- ///////////////////////////////////////////////////////////////////////////
- // See SetVAD() for the description of function, input(s)/output(s) and
- // return value.
- //
- WebRtc_Word16 SetVADSafe(
- const bool enableDTX = true,
- const bool enableVAD = false,
- const ACMVADMode mode = VADNormal);
-
- ///////////////////////////////////////////////////////////////////////////
- // See ReplaceInternalDTX() for the description of function, input and
- // return value.
- //
- virtual WebRtc_Word32 ReplaceInternalDTXSafe(
- const bool replaceInternalDTX);
-
- ///////////////////////////////////////////////////////////////////////////
- // See IsInternalDTXReplaced() for the description of function, input and
- // return value.
- //
- virtual WebRtc_Word32 IsInternalDTXReplacedSafe(
- bool* internalDTXReplaced);
-
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 CreateEncoder()
- // Creates the encoder instance.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- WebRtc_Word16 CreateEncoder();
+ ///////////////////////////////////////////////////////////////////////////
+ // See Add10MsSafe() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ virtual WebRtc_Word32 Add10MsDataSafe(const WebRtc_UWord32 timeStamp,
+ const WebRtc_Word16* data,
+ const WebRtc_UWord16 length,
+ const WebRtc_UWord8 audioChannel);
+ ///////////////////////////////////////////////////////////////////////////
+ // See RegisterInNetEq() for the description of function,
+ // input(s)/output(s) and return value.
+ //
+ virtual WebRtc_Word32 CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst) = 0;
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 CreateDecoder()
- // Creates the decoder instance.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- WebRtc_Word16 CreateDecoder();
+ ///////////////////////////////////////////////////////////////////////////
+ // See EncoderParam() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ WebRtc_Word16 EncoderParamsSafe(WebRtcACMCodecParams *encParams);
+ ///////////////////////////////////////////////////////////////////////////
+ // See DecoderParam for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ // Note:
+ // Any Class where a single instance handle several flavers of the
+ // same codec, therefore, several payload types are associated with
+ // the same instance have to implement this function.
+ //
+ // Currently only iSAC is implementing it. A single iSAC instance is
+ // used for decoding both WB & SWB stream. At one moment both WB & SWB
+ // can be registered as receive codec. Hence two payloads are associated
+ // with a single codec instance.
+ //
+ virtual bool DecoderParamsSafe(WebRtcACMCodecParams *decParams,
+ const WebRtc_UWord8 payloadType);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 EnableVAD();
- // Enables VAD with the given mode. The VAD instance will be created if
- // it does not exists.
- //
- // Input:
- // -mode : VAD mode c.f. audio_coding_module_typedefs.h for
- // the options.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- WebRtc_Word16 EnableVAD(ACMVADMode mode);
+ ///////////////////////////////////////////////////////////////////////////
+ // See ResetEncoder() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ WebRtc_Word16 ResetEncoderSafe();
+ ///////////////////////////////////////////////////////////////////////////
+ // See InitEncoder() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ WebRtc_Word16 InitEncoderSafe(WebRtcACMCodecParams *codecParams,
+ bool forceInitialization);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 DisableVAD()
- // Disables VAD.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- WebRtc_Word16 DisableVAD();
+ ///////////////////////////////////////////////////////////////////////////
+ // See InitDecoder() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ WebRtc_Word16 InitDecoderSafe(WebRtcACMCodecParams *codecParams,
+ bool forceInitialization);
+ ///////////////////////////////////////////////////////////////////////////
+ // See ResetDecoder() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ WebRtc_Word16 ResetDecoderSafe(WebRtc_Word16 payloadType);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 EnableDTX()
- // Enables DTX. This method should be overwritten for codecs which have
- // internal DTX.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- virtual WebRtc_Word16 EnableDTX();
+ ///////////////////////////////////////////////////////////////////////////
+ // See DestructEncoder() for the description of function,
+ // input(s)/output(s) and return value.
+ //
+ virtual void DestructEncoderSafe() = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // See DestructDecoder() for the description of function,
+ // input(s)/output(s) and return value.
+ //
+ virtual void DestructDecoderSafe() = 0;
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 DisableDTX()
- // Disables usage of DTX. This method should be overwritten for codecs which
- // have internal DTX.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- virtual WebRtc_Word16 DisableDTX();
+ ///////////////////////////////////////////////////////////////////////////
+ // See SetBitRate() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ // Any codec that can change the bit-rate has to implement this.
+ //
+ virtual WebRtc_Word16 SetBitRateSafe(const WebRtc_Word32 bitRateBPS);
+ ///////////////////////////////////////////////////////////////////////////
+ // See GetEstimatedBandwidth() for the description of function,
+ // input(s)/output(s) and return value.
+ //
+ virtual WebRtc_Word32 GetEstimatedBandwidthSafe();
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 InternalEncode()
- // This is a codec-specific function called in EncodeSafe() to actually
- // encode a frame of audio.
- //
- // Outputs:
- // -bitStream : pointer to a buffer where the bit-stream is
- // written to.
- // -bitStreamLenByte : the length of the bit-stream in bytes,
- // a negative value indicates error.
- //
- // Return value:
- // -1 if failed,
- // otherwise the length of the bit-stream is returned.
- //
- virtual WebRtc_Word16 InternalEncode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte) = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // See SetEstimatedBandwidth() for the description of function,
+ // input(s)/output(s) and return value.
+ //
+ virtual WebRtc_Word32 SetEstimatedBandwidthSafe(
+ WebRtc_Word32 estimatedBandwidth);
+ ///////////////////////////////////////////////////////////////////////////
+ // See GetRedPayload() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ virtual WebRtc_Word32 GetRedPayloadSafe(WebRtc_UWord8* redPayload,
+ WebRtc_Word16* payloadBytes);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 InternalInitEncoder()
- // This is a codec-specific function called in InitEncoderSafe(), it has to
- // do all codec-specific operation to initialize the encoder given the
- // encoder parameters.
- //
- // Input:
- // -codecParams : pointer to a structure that contains parameters to
- // initialize encoder.
- // Set codecParam->CodecInst.rate to -1 for
- // iSAC to operate in adaptive mode.
- // (to do: if frame-length is -1 frame-length will be
- // automatically adjusted, otherwise, given
- // frame-length is forced)
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- virtual WebRtc_Word16 InternalInitEncoder(
- WebRtcACMCodecParams *codecParams) = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // See SetVAD() for the description of function, input(s)/output(s) and
+ // return value.
+ //
+ WebRtc_Word16 SetVADSafe(const bool enableDTX = true, const bool enableVAD =
+ false,
+ const ACMVADMode mode = VADNormal);
+ ///////////////////////////////////////////////////////////////////////////
+ // See ReplaceInternalDTX() for the description of function, input and
+ // return value.
+ //
+ virtual WebRtc_Word32 ReplaceInternalDTXSafe(const bool replaceInternalDTX);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 InternalInitDecoder()
- // This is a codec-specific function called in InitDecoderSafe(), it has to
- // do all codec-specific operation to initialize the decoder given the
- // decoder parameters.
- //
- // Input:
- // -codecParams : pointer to a structure that contains parameters to
- // initialize encoder.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- virtual WebRtc_Word16 InternalInitDecoder(
- WebRtcACMCodecParams *codecParams) = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // See IsInternalDTXReplaced() for the description of function, input and
+ // return value.
+ //
+ virtual WebRtc_Word32 IsInternalDTXReplacedSafe(bool* internalDTXReplaced);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 CreateEncoder()
+ // Creates the encoder instance.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 CreateEncoder();
- ///////////////////////////////////////////////////////////////////////////
- // void IncreaseNoMissedSamples()
- // This method is called to increase the number of samples that are
- // overwritten in the audio buffer.
- //
- // Input:
- // -noSamples : the number of overwritten samples is incremented
- // by this value.
- //
- void IncreaseNoMissedSamples(
- const WebRtc_Word16 noSamples);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 CreateDecoder()
+ // Creates the decoder instance.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 CreateDecoder();
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 EnableVAD();
+ // Enables VAD with the given mode. The VAD instance will be created if
+ // it does not exists.
+ //
+ // Input:
+ // -mode : VAD mode c.f. audio_coding_module_typedefs.h for
+ // the options.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 EnableVAD(ACMVADMode mode);
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 InternalCreateEncoder()
- // This is a codec-specific method called in CreateEncoderSafe() it is
- // supposed to perform all codec-specific operations to create encoder
- // instance.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- virtual WebRtc_Word16 InternalCreateEncoder() = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 DisableVAD()
+ // Disables VAD.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 DisableVAD();
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 EnableDTX()
+ // Enables DTX. This method should be overwritten for codecs which have
+ // internal DTX.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 EnableDTX();
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 InternalCreateDecoder()
- // This is a codec-specific method called in CreateDecoderSafe() it is
- // supposed to perform all codec-specific operations to create decoder
- // instance.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- virtual WebRtc_Word16 InternalCreateDecoder() = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 DisableDTX()
+ // Disables usage of DTX. This method should be overwritten for codecs which
+ // have internal DTX.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 DisableDTX();
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalEncode()
+ // This is a codec-specific function called in EncodeSafe() to actually
+ // encode a frame of audio.
+ //
+ // Outputs:
+ // -bitStream : pointer to a buffer where the bit-stream is
+ // written to.
+ // -bitStreamLenByte : the length of the bit-stream in bytes,
+ // a negative value indicates error.
+ //
+ // Return value:
+ // -1 if failed,
+ // otherwise the length of the bit-stream is returned.
+ //
+ virtual WebRtc_Word16 InternalEncode(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte) = 0;
- ///////////////////////////////////////////////////////////////////////////
- // void InternalDestructEncoderInst()
- // This is a codec-specific method, used in conferencing, called from
- // DestructEncoderInst(). The input argument is pointer to encoder instance
- // (codec instance for codecs that encoder and decoder share the same
- // instance). This method is called to free the memory that "ptrInst" is
- // pointing to.
- //
- // Input:
- // -ptrInst : pointer to encoder instance.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- virtual void InternalDestructEncoderInst(
- void* ptrInst) = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalInitEncoder()
+ // This is a codec-specific function called in InitEncoderSafe(), it has to
+ // do all codec-specific operation to initialize the encoder given the
+ // encoder parameters.
+ //
+ // Input:
+ // -codecParams : pointer to a structure that contains parameters to
+ // initialize encoder.
+ // Set codecParam->CodecInst.rate to -1 for
+ // iSAC to operate in adaptive mode.
+ // (to do: if frame-length is -1 frame-length will be
+ // automatically adjusted, otherwise, given
+ // frame-length is forced)
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams) = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalInitDecoder()
+ // This is a codec-specific function called in InitDecoderSafe(), it has to
+ // do all codec-specific operation to initialize the decoder given the
+ // decoder parameters.
+ //
+ // Input:
+ // -codecParams : pointer to a structure that contains parameters to
+ // initialize encoder.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams) = 0;
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 InternalResetEncoder()
- // This method is called to reset the states of encoder. However, the
- // current parameters, e.g. frame-length, should remain as they are. For
- // most of the codecs a re-initialization of the encoder is what needs to
- // be down. But for iSAC we like to keep the BWE history so we cannot
- // re-initialize. As soon as such an API is implemented in iSAC this method
- // has to be overwritten in ACMISAC class.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- virtual WebRtc_Word16 InternalResetEncoder();
+ ///////////////////////////////////////////////////////////////////////////
+ // void IncreaseNoMissedSamples()
+ // This method is called to increase the number of samples that are
+ // overwritten in the audio buffer.
+ //
+ // Input:
+ // -noSamples : the number of overwritten samples is incremented
+ // by this value.
+ //
+ void IncreaseNoMissedSamples(const WebRtc_Word16 noSamples);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalCreateEncoder()
+ // This is a codec-specific method called in CreateEncoderSafe() it is
+ // supposed to perform all codec-specific operations to create encoder
+ // instance.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 InternalCreateEncoder() = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalCreateDecoder()
+ // This is a codec-specific method called in CreateDecoderSafe() it is
+ // supposed to perform all codec-specific operations to create decoder
+ // instance.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 InternalCreateDecoder() = 0;
- ///////////////////////////////////////////////////////////////////////////
- // WebRtc_Word16 ProcessFrameVADDTX()
- // This function is called when a full frame of audio is available. It will
- // break the audio frame into blocks such that each block could be processed
- // by VAD & CN/DTX. If a frame is divided into two blocks then there are two
- // cases. First, the first block is active, the second block will not be
- // processed by CN/DTX but only by VAD and return to caller with
- // '*samplesProcessed' set to zero. There, the audio frame will be encoded
- // by the encoder. Second, the first block is inactive and is processed by
- // CN/DTX, then we stop processing the next block and return to the caller
- // which is EncodeSafe(), with "*samplesProcessed" equal to the number of
- // samples in first block.
- //
- // Output:
- // -bitStream : pointer to a buffer where DTX frame, if
- // generated, will be written to.
- // -bitStreamLenByte : contains the length of bit-stream in bytes, if
- // generated. Zero if no bit-stream is generated.
- // -noSamplesProcessed : contains no of samples that actually CN has
- // processed. Those samples processed by CN will not
- // be encoded by the encoder, obviously. If
- // contains zero, it means that the frame has been
- // identified as active by VAD. Note that
- // "*noSamplesProcessed" might be non-zero but
- // "*bitStreamLenByte" be zero.
- //
- // Return value:
- // -1 if failed,
- // 0 if succeeded.
- //
- WebRtc_Word16 ProcessFrameVADDTX(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte,
- WebRtc_Word16* samplesProcessed);
+ ///////////////////////////////////////////////////////////////////////////
+ // void InternalDestructEncoderInst()
+ // This is a codec-specific method, used in conferencing, called from
+ // DestructEncoderInst(). The input argument is pointer to encoder instance
+ // (codec instance for codecs that encoder and decoder share the same
+ // instance). This method is called to free the memory that "ptrInst" is
+ // pointing to.
+ //
+ // Input:
+ // -ptrInst : pointer to encoder instance.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual void InternalDestructEncoderInst(void* ptrInst) = 0;
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalResetEncoder()
+ // This method is called to reset the states of encoder. However, the
+ // current parameters, e.g. frame-length, should remain as they are. For
+ // most of the codecs a re-initialization of the encoder is what needs to
+ // be down. But for iSAC we like to keep the BWE history so we cannot
+ // re-initialize. As soon as such an API is implemented in iSAC this method
+ // has to be overwritten in ACMISAC class.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 InternalResetEncoder();
- ///////////////////////////////////////////////////////////////////////////
- // CanChangeEncodingParam()
- // Check if the codec parameters can be changed. In conferencing normally
- // codec parametrs cannot be changed. The exception is bit-rate of isac.
- //
- // return value:
- // -true if codec parameters are allowed to change.
- // -flase otherwise.
- //
- virtual bool CanChangeEncodingParam(CodecInst& codecInst);
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 ProcessFrameVADDTX()
+ // This function is called when a full frame of audio is available. It will
+ // break the audio frame into blocks such that each block could be processed
+ // by VAD & CN/DTX. If a frame is divided into two blocks then there are two
+ // cases. First, the first block is active, the second block will not be
+ // processed by CN/DTX but only by VAD and return to caller with
+ // '*samplesProcessed' set to zero. There, the audio frame will be encoded
+ // by the encoder. Second, the first block is inactive and is processed by
+ // CN/DTX, then we stop processing the next block and return to the caller
+ // which is EncodeSafe(), with "*samplesProcessed" equal to the number of
+ // samples in first block.
+ //
+ // Output:
+ // -bitStream : pointer to a buffer where DTX frame, if
+ // generated, will be written to.
+ // -bitStreamLenByte : contains the length of bit-stream in bytes, if
+ // generated. Zero if no bit-stream is generated.
+ // -noSamplesProcessed : contains no of samples that actually CN has
+ // processed. Those samples processed by CN will not
+ // be encoded by the encoder, obviously. If
+ // contains zero, it means that the frame has been
+ // identified as active by VAD. Note that
+ // "*noSamplesProcessed" might be non-zero but
+ // "*bitStreamLenByte" be zero.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 ProcessFrameVADDTX(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_Word16* samplesProcessed);
+ ///////////////////////////////////////////////////////////////////////////
+ // CanChangeEncodingParam()
+ // Check if the codec parameters can be changed. In conferencing normally
+ // codec parametrs cannot be changed. The exception is bit-rate of isac.
+ //
+ // return value:
+ // -true if codec parameters are allowed to change.
+ // -flase otherwise.
+ //
+ virtual bool CanChangeEncodingParam(CodecInst& codecInst);
- ///////////////////////////////////////////////////////////////////////////
- // CurrentRate()
- // Call to get the current encoding rate of the encoder. This function
- // should be overwritten for codecs whic automatically change their
- // target rate. One example is iSAC. The output of the function is the
- // current target rate.
- //
- // Output:
- // -rateBitPerSec : the current target rate of the codec.
- //
- virtual void CurrentRate(
- WebRtc_Word32& /* rateBitPerSec */)
- {
- return;
- }
+ ///////////////////////////////////////////////////////////////////////////
+ // CurrentRate()
+ // Call to get the current encoding rate of the encoder. This function
+ // should be overwritten for codecs whic automatically change their
+ // target rate. One example is iSAC. The output of the function is the
+ // current target rate.
+ //
+ // Output:
+ // -rateBitPerSec : the current target rate of the codec.
+ //
+ virtual void CurrentRate(WebRtc_Word32& /* rateBitPerSec */) {
+ return;
+ }
- virtual void SaveDecoderParamSafe(
- const WebRtcACMCodecParams* codecParams);
+ virtual void SaveDecoderParamSafe(const WebRtcACMCodecParams* codecParams);
- // &_inAudio[_inAudioIxWrite] always point to where new audio can be
- // written to
- WebRtc_Word16 _inAudioIxWrite;
+ // &_inAudio[_inAudioIxWrite] always point to where new audio can be
+ // written to
+ WebRtc_Word16 _inAudioIxWrite;
- // &_inAudio[_inAudioIxRead] points to where audio has to be read from
- WebRtc_Word16 _inAudioIxRead;
+ // &_inAudio[_inAudioIxRead] points to where audio has to be read from
+ WebRtc_Word16 _inAudioIxRead;
- WebRtc_Word16 _inTimestampIxWrite;
+ WebRtc_Word16 _inTimestampIxWrite;
- // Where the audio is stored before encoding,
- // To save memory the following buffer can be allocated
- // dynamically for 80ms depending on the sampling frequency
- // of the codec.
- WebRtc_Word16* _inAudio;
- WebRtc_UWord32* _inTimestamp;
+ // Where the audio is stored before encoding,
+ // To save memory the following buffer can be allocated
+ // dynamically for 80ms depending on the sampling frequency
+ // of the codec.
+ WebRtc_Word16* _inAudio;
+ WebRtc_UWord32* _inTimestamp;
- WebRtc_Word16 _frameLenSmpl;
- WebRtc_UWord16 _noChannels;
+ WebRtc_Word16 _frameLenSmpl;
+ WebRtc_UWord16 _noChannels;
- // This will point to a static database of the supported codecs
- WebRtc_Word16 _codecID;
+ // This will point to a static database of the supported codecs
+ WebRtc_Word16 _codecID;
- // This will account for the No of samples were not encoded
- // the case is rare, either samples are missed due to overwite
- // at input buffer or due to encoding error
- WebRtc_UWord32 _noMissedSamples;
+ // This will account for the No of samples were not encoded
+ // the case is rare, either samples are missed due to overwite
+ // at input buffer or due to encoding error
+ WebRtc_UWord32 _noMissedSamples;
- // True if the encoder instance created
- bool _encoderExist;
- bool _decoderExist;
- // True if the ecncoder instance initialized
- bool _encoderInitialized;
- bool _decoderInitialized;
+ // True if the encoder instance created
+ bool _encoderExist;
+ bool _decoderExist;
+ // True if the ecncoder instance initialized
+ bool _encoderInitialized;
+ bool _decoderInitialized;
- bool _registeredInNetEq;
+ bool _registeredInNetEq;
- // VAD/DTX
- bool _hasInternalDTX;
- WebRtcVadInst* _ptrVADInst;
- bool _vadEnabled;
- ACMVADMode _vadMode;
- WebRtc_Word16 _vadLabel[MAX_FRAME_SIZE_10MSEC];
- bool _dtxEnabled;
- WebRtcCngEncInst* _ptrDTXInst;
- WebRtc_UWord8 _numLPCParams;
- bool _sentCNPrevious;
- bool _isMaster;
- int16_t _prev_frame_cng;
+ // VAD/DTX
+ bool _hasInternalDTX;
+ WebRtcVadInst* _ptrVADInst;
+ bool _vadEnabled;
+ ACMVADMode _vadMode;
+ WebRtc_Word16 _vadLabel[MAX_FRAME_SIZE_10MSEC];
+ bool _dtxEnabled;
+ WebRtcCngEncInst* _ptrDTXInst;
+ WebRtc_UWord8 _numLPCParams;
+ bool _sentCNPrevious;
+ bool _isMaster;
+ int16_t _prev_frame_cng;
- WebRtcACMCodecParams _encoderParams;
- WebRtcACMCodecParams _decoderParams;
+ WebRtcACMCodecParams _encoderParams;
+ WebRtcACMCodecParams _decoderParams;
- // Used as a global lock for all avaiable decoders
- // so that no decoder is used when NetEQ decodes.
- RWLockWrapper* _netEqDecodeLock;
- // Used to lock wrapper internal data
- // such as buffers and state variables.
- RWLockWrapper& _codecWrapperLock;
+ // Used as a global lock for all avaiable decoders
+ // so that no decoder is used when NetEQ decodes.
+ RWLockWrapper* _netEqDecodeLock;
+ // Used to lock wrapper internal data
+ // such as buffers and state variables.
+ RWLockWrapper& _codecWrapperLock;
- WebRtc_UWord32 _lastEncodedTimestamp;
- WebRtc_UWord32 _lastTimestamp;
- bool _isAudioBuffFresh;
- WebRtc_UWord32 _uniqueID;
+ WebRtc_UWord32 _lastEncodedTimestamp;
+ WebRtc_UWord32 _lastTimestamp;
+ bool _isAudioBuffFresh;
+ WebRtc_UWord32 _uniqueID;
};
-} // namespace webrt
+} // namespace webrt
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_GENERIC_CODEC_H_
diff --git a/modules/audio_coding/main/source/acm_gsmfr.cc b/modules/audio_coding/main/source/acm_gsmfr.cc
index 2343241..18c3077 100644
--- a/modules/audio_coding/main/source/acm_gsmfr.cc
+++ b/modules/audio_coding/main/source/acm_gsmfr.cc
@@ -16,25 +16,10 @@
#include "webrtc_neteq_help_macros.h"
#ifdef WEBRTC_CODEC_GSMFR
- // NOTE! GSM-FR is not included in the open-source package. The following
- // interface file is needed:
- //
- // /modules/audio_coding/codecs/gsmfr/main/interface/gsmfr_interface.h
- //
- // The API in the header file should match the one below.
- //
- // int16_t WebRtcGSMFR_CreateEnc(GSMFR_encinst_t_** inst);
- // int16_t WebRtcGSMFR_CreateDec(GSMFR_decinst_t_** inst);
- // int16_t WebRtcGSMFR_FreeEnc(GSMFR_encinst_t_* inst);
- // int16_t WebRtcGSMFR_FreeDec(GSMFR_decinst_t_* inst);
- // int16_t WebRtcGSMFR_Encode(GSMFR_encinst_t_* encInst, int16_t* input,
- // int16_t len, int16_t* output);
- // int16_t WebRtcGSMFR_EncoderInit(GSMFR_encinst_t_* encInst, int16_t mode);
- // int16_t WebRtcGSMFR_Decode(GSMFR_decinst_t_* decInst);
- // int16_t WebRtcGSMFR_DecodeBwe(GSMFR_decinst_t_* decInst, int16_t* input);
- // int16_t WebRtcGSMFR_DecodePlc(GSMFR_decinst_t_* decInst);
- // int16_t WebRtcGSMFR_DecoderInit(GSMFR_decinst_t_* decInst);
- #include "gsmfr_interface.h"
+// NOTE! GSM-FR is not included in the open-source package. Modify this file
+// or your codec API to match the function calls and names of used GSM-FR API
+// file.
+#include "gsmfr_interface.h"
#endif
namespace webrtc {
@@ -47,340 +32,228 @@
return;
}
-
-ACMGSMFR::~ACMGSMFR()
-{
- return;
+ACMGSMFR::~ACMGSMFR() {
+ return;
}
-
-WebRtc_Word16
-ACMGSMFR::InternalEncode(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16* /* bitStreamLenByte */)
-{
- return -1;
+WebRtc_Word16 ACMGSMFR::InternalEncode(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMGSMFR::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
- return -1;
+WebRtc_Word16 ACMGSMFR::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMGSMFR::EnableDTX()
-{
- return -1;
+WebRtc_Word16 ACMGSMFR::EnableDTX() {
+ return -1;
}
-
-WebRtc_Word16
-ACMGSMFR::DisableDTX()
-{
- return -1;
+WebRtc_Word16 ACMGSMFR::DisableDTX() {
+ return -1;
}
-
-WebRtc_Word16
-ACMGSMFR::InternalInitEncoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- return -1;
+WebRtc_Word16 ACMGSMFR::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMGSMFR::InternalInitDecoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- return -1;
+WebRtc_Word16 ACMGSMFR::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
}
-
-WebRtc_Word32
-ACMGSMFR::CodecDef(
- WebRtcNetEQ_CodecDef& /* codecDef */,
- const CodecInst& /* codecInst */)
-{
- return -1;
+WebRtc_Word32 ACMGSMFR::CodecDef(WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */) {
+ return -1;
}
-
-ACMGenericCodec*
-ACMGSMFR::CreateInstance(void)
-{
- return NULL;
+ACMGenericCodec* ACMGSMFR::CreateInstance(void) {
+ return NULL;
}
-
-WebRtc_Word16
-ACMGSMFR::InternalCreateEncoder()
-{
- return -1;
+WebRtc_Word16 ACMGSMFR::InternalCreateEncoder() {
+ return -1;
}
-
-void
-ACMGSMFR::DestructEncoderSafe()
-{
- return;
+void ACMGSMFR::DestructEncoderSafe() {
+ return;
}
-
-WebRtc_Word16
-ACMGSMFR::InternalCreateDecoder()
-{
- return -1;
+WebRtc_Word16 ACMGSMFR::InternalCreateDecoder() {
+ return -1;
}
-
-void
-ACMGSMFR::DestructDecoderSafe()
-{
- return;
+void ACMGSMFR::DestructDecoderSafe() {
+ return;
}
-
-void
-ACMGSMFR::InternalDestructEncoderInst(
- void* /* ptrInst */)
-{
- return;
+void ACMGSMFR::InternalDestructEncoderInst(void* /* ptrInst */) {
+ return;
}
#else //===================== Actual Implementation =======================
-ACMGSMFR::ACMGSMFR(
- WebRtc_Word16 codecID):
-_encoderInstPtr(NULL),
-_decoderInstPtr(NULL)
-{
- _codecID = codecID;
- _hasInternalDTX = true;
- return;
+ACMGSMFR::ACMGSMFR(WebRtc_Word16 codecID)
+ : _encoderInstPtr(NULL),
+ _decoderInstPtr(NULL) {
+ _codecID = codecID;
+ _hasInternalDTX = true;
+ return;
}
-
-ACMGSMFR::~ACMGSMFR()
-{
- if(_encoderInstPtr != NULL)
- {
- WebRtcGSMFR_FreeEnc(_encoderInstPtr);
- _encoderInstPtr = NULL;
- }
- if(_decoderInstPtr != NULL)
- {
- WebRtcGSMFR_FreeDec(_decoderInstPtr);
- _decoderInstPtr = NULL;
- }
- return;
+ACMGSMFR::~ACMGSMFR() {
+ if (_encoderInstPtr != NULL) {
+ WebRtcGSMFR_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if (_decoderInstPtr != NULL) {
+ WebRtcGSMFR_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
}
-
-WebRtc_Word16
-ACMGSMFR::InternalEncode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte)
-{
- *bitStreamLenByte = WebRtcGSMFR_Encode(_encoderInstPtr,
- &_inAudio[_inAudioIxRead], _frameLenSmpl, (WebRtc_Word16*)bitStream);
- // increment the read index this tell the caller that how far
- // we have gone forward in reading the audio buffer
- _inAudioIxRead += _frameLenSmpl;
- return *bitStreamLenByte;
+WebRtc_Word16 ACMGSMFR::InternalEncode(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte) {
+ *bitStreamLenByte = WebRtcGSMFR_Encode(_encoderInstPtr,
+ &_inAudio[_inAudioIxRead],
+ _frameLenSmpl,
+ (WebRtc_Word16*) bitStream);
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _frameLenSmpl;
+ return *bitStreamLenByte;
}
+WebRtc_Word16 ACMGSMFR::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ return 0;
+}
-WebRtc_Word16
-ACMGSMFR::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
+WebRtc_Word16 ACMGSMFR::EnableDTX() {
+ if (_dtxEnabled) {
return 0;
-}
-
-
-WebRtc_Word16
-ACMGSMFR::EnableDTX()
-{
- if(_dtxEnabled)
- {
- return 0;
- }
- else if(_encoderExist)
- {
- if(WebRtcGSMFR_EncoderInit(_encoderInstPtr, 1) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "EnableDTX: cannot init encoder for GSMFR");
- return -1;
- }
- _dtxEnabled = true;
- return 0;
- }
- else
- {
- return -1;
- }
-}
-
-
-WebRtc_Word16
-ACMGSMFR::DisableDTX()
-{
- if(!_dtxEnabled)
- {
- return 0;
- }
- else if(_encoderExist)
- {
- if(WebRtcGSMFR_EncoderInit(_encoderInstPtr, 0) < 0)
- {
+ } else if (_encoderExist) {
+ if (WebRtcGSMFR_EncoderInit(_encoderInstPtr, 1) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "DisableDTX: cannot init encoder for GSMFR");
- return -1;
- }
- _dtxEnabled = false;
- return 0;
+ "EnableDTX: cannot init encoder for GSMFR");
+ return -1;
}
- else
- {
- // encoder doesn't exists, therefore disabling is harmless
- return 0;
- }
-}
-
-
-WebRtc_Word16
-ACMGSMFR::InternalInitEncoder(
- WebRtcACMCodecParams* codecParams)
-{
- if (WebRtcGSMFR_EncoderInit(_encoderInstPtr, ((codecParams->enableDTX)? 1:0)) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalInitEncoder: cannot init encoder for GSMFR");
- }
- return 0;
-}
-
-
-WebRtc_Word16
-ACMGSMFR::InternalInitDecoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- if (WebRtcGSMFR_DecoderInit(_decoderInstPtr) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalInitDecoder: cannot init decoder for GSMFR");
- return -1;
- }
- return 0;
-}
-
-
-WebRtc_Word32
-ACMGSMFR::CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst)
-{
- if (!_decoderInitialized)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "CodecDef: decoder is not initialized for GSMFR");
- return -1;
- }
- // Fill up the structure by calling
- // "SET_CODEC_PAR" & "SET_GSMFR_FUNCTION."
- // Then call NetEQ to add the codec to it's
- // database.
- SET_CODEC_PAR((codecDef), kDecoderGSMFR, codecInst.pltype,
- _decoderInstPtr, 8000);
- SET_GSMFR_FUNCTIONS((codecDef));
+ _dtxEnabled = true;
return 0;
-}
-
-
-ACMGenericCodec*
-ACMGSMFR::CreateInstance(void)
-{
- return NULL;
-}
-
-
-WebRtc_Word16
-ACMGSMFR::InternalCreateEncoder()
-{
- if (WebRtcGSMFR_CreateEnc(&_encoderInstPtr) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalCreateEncoder: cannot create instance for GSMFR encoder");
+ } else {
return -1;
}
- return 0;
}
-
-void
-ACMGSMFR::DestructEncoderSafe()
-{
- if(_encoderInstPtr != NULL)
- {
- WebRtcGSMFR_FreeEnc(_encoderInstPtr);
- _encoderInstPtr = NULL;
+WebRtc_Word16 ACMGSMFR::DisableDTX() {
+ if (!_dtxEnabled) {
+ return 0;
+ } else if (_encoderExist) {
+ if (WebRtcGSMFR_EncoderInit(_encoderInstPtr, 0) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "DisableDTX: cannot init encoder for GSMFR");
+ return -1;
}
- _encoderExist = false;
- _encoderInitialized = false;
+ _dtxEnabled = false;
+ return 0;
+ } else {
+ // encoder doesn't exists, therefore disabling is harmless
+ return 0;
+ }
}
-
-WebRtc_Word16
-ACMGSMFR::InternalCreateDecoder()
-{
- if (WebRtcGSMFR_CreateDec(&_decoderInstPtr) < 0)
- {
+WebRtc_Word16 ACMGSMFR::InternalInitEncoder(WebRtcACMCodecParams* codecParams) {
+ if (WebRtcGSMFR_EncoderInit(_encoderInstPtr,
+ ((codecParams->enableDTX) ? 1 : 0)) < 0) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalCreateDecoder: cannot create instance for GSMFR decoder");
+ "InternalInitEncoder: cannot init encoder for GSMFR");
+ }
+ return 0;
+}
+
+WebRtc_Word16 ACMGSMFR::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ if (WebRtcGSMFR_DecoderInit(_decoderInstPtr) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitDecoder: cannot init decoder for GSMFR");
return -1;
}
return 0;
}
-
-void
-ACMGSMFR::DestructDecoderSafe()
-{
- if(_decoderInstPtr != NULL)
- {
- WebRtcGSMFR_FreeDec(_decoderInstPtr);
- _decoderInstPtr = NULL;
- }
- _decoderExist = false;
- _decoderInitialized = false;
+WebRtc_Word32 ACMGSMFR::CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst) {
+ if (!_decoderInitialized) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CodecDef: decoder is not initialized for GSMFR");
+ return -1;
+ }
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_GSMFR_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderGSMFR, codecInst.pltype, _decoderInstPtr,
+ 8000);
+ SET_GSMFR_FUNCTIONS((codecDef));
+ return 0;
}
+ACMGenericCodec* ACMGSMFR::CreateInstance(void) {
+ return NULL;
+}
-void
-ACMGSMFR::InternalDestructEncoderInst(
- void* ptrInst)
-{
- if(ptrInst != NULL)
- {
- WebRtcGSMFR_FreeEnc((GSMFR_encinst_t_*)ptrInst);
- }
- return;
+WebRtc_Word16 ACMGSMFR::InternalCreateEncoder() {
+ if (WebRtcGSMFR_CreateEnc(&_encoderInstPtr) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateEncoder: cannot create instance for GSMFR encoder");
+ return -1;
+ }
+ return 0;
+}
+
+void ACMGSMFR::DestructEncoderSafe() {
+ if (_encoderInstPtr != NULL) {
+ WebRtcGSMFR_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ _encoderExist = false;
+ _encoderInitialized = false;
+}
+
+WebRtc_Word16 ACMGSMFR::InternalCreateDecoder() {
+ if (WebRtcGSMFR_CreateDec(&_decoderInstPtr) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateDecoder: cannot create instance for GSMFR decoder");
+ return -1;
+ }
+ return 0;
+}
+
+void ACMGSMFR::DestructDecoderSafe() {
+ if (_decoderInstPtr != NULL) {
+ WebRtcGSMFR_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ _decoderExist = false;
+ _decoderInitialized = false;
+}
+
+void ACMGSMFR::InternalDestructEncoderInst(void* ptrInst) {
+ if (ptrInst != NULL) {
+ WebRtcGSMFR_FreeEnc((GSMFR_encinst_t_*) ptrInst);
+ }
+ return;
}
#endif
-} // namespace webrtc
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_gsmfr.h b/modules/audio_coding/main/source/acm_gsmfr.h
index 8991de8..370d85e 100644
--- a/modules/audio_coding/main/source/acm_gsmfr.h
+++ b/modules/audio_coding/main/source/acm_gsmfr.h
@@ -19,55 +19,48 @@
namespace webrtc {
-class ACMGSMFR : public ACMGenericCodec
-{
-public:
- ACMGSMFR(WebRtc_Word16 codecID);
- ~ACMGSMFR();
- // for FEC
- ACMGenericCodec* CreateInstance(void);
+class ACMGSMFR : public ACMGenericCodec {
+ public:
+ ACMGSMFR(WebRtc_Word16 codecID);
+ ~ACMGSMFR();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
- WebRtc_Word16 InternalEncode(
- WebRtc_UWord8* bitstream,
- WebRtc_Word16* bitStreamLenByte);
+ WebRtc_Word16 InternalEncode(WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
- WebRtc_Word16 InternalInitEncoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitEncoder(WebRtcACMCodecParams *codecParams);
- WebRtc_Word16 InternalInitDecoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitDecoder(WebRtcACMCodecParams *codecParams);
-protected:
- WebRtc_Word16 DecodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType);
+ protected:
+ WebRtc_Word16 DecodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
- WebRtc_Word32 CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst);
+ WebRtc_Word32 CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
- void DestructEncoderSafe();
+ void DestructEncoderSafe();
- void DestructDecoderSafe();
+ void DestructDecoderSafe();
- WebRtc_Word16 InternalCreateEncoder();
+ WebRtc_Word16 InternalCreateEncoder();
- WebRtc_Word16 InternalCreateDecoder();
+ WebRtc_Word16 InternalCreateDecoder();
- void InternalDestructEncoderInst(
- void* ptrInst);
+ void InternalDestructEncoderInst(void* ptrInst);
- WebRtc_Word16 EnableDTX();
+ WebRtc_Word16 EnableDTX();
- WebRtc_Word16 DisableDTX();
+ WebRtc_Word16 DisableDTX();
- GSMFR_encinst_t_* _encoderInstPtr;
- GSMFR_decinst_t_* _decoderInstPtr;
+ GSMFR_encinst_t_* _encoderInstPtr;
+ GSMFR_decinst_t_* _decoderInstPtr;
};
-} // namespace webrtc
+} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_GSMFR_H_
diff --git a/modules/audio_coding/main/source/acm_ilbc.cc b/modules/audio_coding/main/source/acm_ilbc.cc
index 0721619..53f0019 100644
--- a/modules/audio_coding/main/source/acm_ilbc.cc
+++ b/modules/audio_coding/main/source/acm_ilbc.cc
@@ -16,11 +16,10 @@
#include "webrtc_neteq_help_macros.h"
#ifdef WEBRTC_CODEC_ILBC
- #include "ilbc.h"
+#include "ilbc.h"
#endif
-namespace webrtc
-{
+namespace webrtc {
#ifndef WEBRTC_CODEC_ILBC
@@ -30,333 +29,224 @@
return;
}
-
-ACMILBC::~ACMILBC()
-{
- return;
+ACMILBC::~ACMILBC() {
+ return;
}
+WebRtc_Word16 ACMILBC::InternalEncode(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */) {
+ return -1;
+}
-WebRtc_Word16
-ACMILBC::InternalEncode(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16* /* bitStreamLenByte */)
-{
+WebRtc_Word16 ACMILBC::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ return -1;
+}
+
+WebRtc_Word16 ACMILBC::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
+}
+
+WebRtc_Word16 ACMILBC::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
+}
+
+WebRtc_Word32 ACMILBC::CodecDef(WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */) {
+ return -1;
+}
+
+ACMGenericCodec* ACMILBC::CreateInstance(void) {
+ return NULL;
+}
+
+WebRtc_Word16 ACMILBC::InternalCreateEncoder() {
+ return -1;
+}
+
+void ACMILBC::DestructEncoderSafe() {
+ return;
+}
+
+WebRtc_Word16 ACMILBC::InternalCreateDecoder() {
+ return -1;
+}
+
+void ACMILBC::DestructDecoderSafe() {
+ return;
+}
+
+void ACMILBC::InternalDestructEncoderInst(void* /* ptrInst */) {
+ return;
+}
+
+WebRtc_Word16 ACMILBC::SetBitRateSafe(const WebRtc_Word32 /* rate */) {
+ return -1;
+}
+
+#else //===================== Actual Implementation =======================
+
+ACMILBC::ACMILBC(WebRtc_Word16 codecID)
+ : _encoderInstPtr(NULL),
+ _decoderInstPtr(NULL) {
+ _codecID = codecID;
+ return;
+}
+
+ACMILBC::~ACMILBC() {
+ if (_encoderInstPtr != NULL) {
+ WebRtcIlbcfix_EncoderFree(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if (_decoderInstPtr != NULL) {
+ WebRtcIlbcfix_DecoderFree(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
+}
+
+WebRtc_Word16 ACMILBC::InternalEncode(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte) {
+ *bitStreamLenByte = WebRtcIlbcfix_Encode(_encoderInstPtr,
+ &_inAudio[_inAudioIxRead],
+ _frameLenSmpl,
+ (WebRtc_Word16*) bitStream);
+ if (*bitStreamLenByte < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalEncode: error in encode for ILBC");
return -1;
+ }
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _frameLenSmpl;
+ return *bitStreamLenByte;
}
+WebRtc_Word16 ACMILBC::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ return 0;
+}
-WebRtc_Word16
-ACMILBC::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
+WebRtc_Word16 ACMILBC::InternalInitEncoder(WebRtcACMCodecParams* codecParams) {
+ // initialize with a correct processing block length
+ if ((160 == (codecParams->codecInstant).pacsize) ||
+ (320 == (codecParams->codecInstant).pacsize)) {
+ // processing block of 20ms
+ return WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 20);
+ } else if ((240 == (codecParams->codecInstant).pacsize) ||
+ (480 == (codecParams->codecInstant).pacsize)) {
+ // processing block of 30ms
+ return WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 30);
+ } else {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitEncoder: invalid processing block");
return -1;
+ }
}
-
-WebRtc_Word16
-ACMILBC::InternalInitEncoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
+WebRtc_Word16 ACMILBC::InternalInitDecoder(WebRtcACMCodecParams* codecParams) {
+ // initialize with a correct processing block length
+ if ((160 == (codecParams->codecInstant).pacsize) ||
+ (320 == (codecParams->codecInstant).pacsize)) {
+ // processing block of 20ms
+ return WebRtcIlbcfix_DecoderInit(_decoderInstPtr, 20);
+ } else if ((240 == (codecParams->codecInstant).pacsize) ||
+ (480 == (codecParams->codecInstant).pacsize)) {
+ // processing block of 30ms
+ return WebRtcIlbcfix_DecoderInit(_decoderInstPtr, 30);
+ } else {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitDecoder: invalid processing block");
return -1;
+ }
}
-
-WebRtc_Word16
-ACMILBC::InternalInitDecoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
+WebRtc_Word32 ACMILBC::CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst) {
+ if (!_decoderInitialized) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CodeDef: decoder not initialized for ILBC");
return -1;
+ }
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_ILBC_FUNCTION."
+ // Then return the structure back to NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderILBC, codecInst.pltype, _decoderInstPtr,
+ 8000);
+ SET_ILBC_FUNCTIONS((codecDef));
+ return 0;
}
+ACMGenericCodec* ACMILBC::CreateInstance(void) {
+ return NULL;
+}
-WebRtc_Word32
-ACMILBC::CodecDef(
- WebRtcNetEQ_CodecDef& /* codecDef */,
- const CodecInst& /* codecInst */)
-{
+WebRtc_Word16 ACMILBC::InternalCreateEncoder() {
+ if (WebRtcIlbcfix_EncoderCreate(&_encoderInstPtr) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateEncoder: cannot create instance for ILBC encoder");
return -1;
+ }
+ return 0;
}
-
-ACMGenericCodec*
-ACMILBC::CreateInstance(void)
-{
- return NULL;
+void ACMILBC::DestructEncoderSafe() {
+ _encoderInitialized = false;
+ _encoderExist = false;
+ if (_encoderInstPtr != NULL) {
+ WebRtcIlbcfix_EncoderFree(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
}
-
-WebRtc_Word16
-ACMILBC::InternalCreateEncoder()
-{
+WebRtc_Word16 ACMILBC::InternalCreateDecoder() {
+ if (WebRtcIlbcfix_DecoderCreate(&_decoderInstPtr) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateDecoder: cannot create instance for ILBC decoder");
return -1;
+ }
+ return 0;
}
-
-void
-ACMILBC::DestructEncoderSafe()
-{
- return;
+void ACMILBC::DestructDecoderSafe() {
+ _decoderInitialized = false;
+ _decoderExist = false;
+ if (_decoderInstPtr != NULL) {
+ WebRtcIlbcfix_DecoderFree(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
}
+void ACMILBC::InternalDestructEncoderInst(void* ptrInst) {
+ if (ptrInst != NULL) {
+ WebRtcIlbcfix_EncoderFree((iLBC_encinst_t_*) ptrInst);
+ }
+ return;
+}
-WebRtc_Word16
-ACMILBC::InternalCreateDecoder()
-{
+WebRtc_Word16 ACMILBC::SetBitRateSafe(const WebRtc_Word32 rate) {
+ // Check that rate is valid. No need to store the value
+ if (rate == 13300) {
+ WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 30);
+ } else if (rate == 15200) {
+ WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 20);
+ } else {
return -1;
-}
+ }
+ _encoderParams.codecInstant.rate = rate;
-
-void
-ACMILBC::DestructDecoderSafe()
-{
- return;
-}
-
-
-void
-ACMILBC::InternalDestructEncoderInst(
- void* /* ptrInst */)
-{
- return;
-}
-
-WebRtc_Word16
-ACMILBC::SetBitRateSafe(const WebRtc_Word32 /* rate */)
-{
- return -1;
-}
-
-#else //===================== Actual Implementation =======================
-
-
-ACMILBC::ACMILBC(
- WebRtc_Word16 codecID):
-_encoderInstPtr(NULL),
-_decoderInstPtr(NULL)
-{
- _codecID = codecID;
- return;
-}
-
-
-ACMILBC::~ACMILBC()
-{
- if(_encoderInstPtr != NULL)
- {
- WebRtcIlbcfix_EncoderFree(_encoderInstPtr);
- _encoderInstPtr = NULL;
- }
- if(_decoderInstPtr != NULL)
- {
- WebRtcIlbcfix_DecoderFree(_decoderInstPtr);
- _decoderInstPtr = NULL;
- }
- return;
-}
-
-
-WebRtc_Word16
-ACMILBC::InternalEncode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte)
-{
- *bitStreamLenByte = WebRtcIlbcfix_Encode(_encoderInstPtr,
- &_inAudio[_inAudioIxRead], _frameLenSmpl, (WebRtc_Word16*)bitStream);
- if (*bitStreamLenByte < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalEncode: error in encode for ILBC");
- return -1;
- }
- // increment the read index this tell the caller that how far
- // we have gone forward in reading the audio buffer
- _inAudioIxRead += _frameLenSmpl;
- return *bitStreamLenByte;
-}
-
-
-WebRtc_Word16
-ACMILBC::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
- return 0;
-}
-
-
-WebRtc_Word16
-ACMILBC::InternalInitEncoder(
- WebRtcACMCodecParams* codecParams)
-{
- // initialize with a correct processing block length
- if((160 == (codecParams->codecInstant).pacsize) ||
- (320 == (codecParams->codecInstant).pacsize))
- {
- // processing block of 20ms
- return WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 20);
- }
- else if((240 == (codecParams->codecInstant).pacsize) ||
- (480 == (codecParams->codecInstant).pacsize))
- {
- // processing block of 30ms
- return WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 30);
- }
- else
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalInitEncoder: invalid processing block");
- return -1;
- }
-}
-
-
-WebRtc_Word16
-ACMILBC::InternalInitDecoder(
- WebRtcACMCodecParams* codecParams)
-{
- // initialize with a correct processing block length
- if((160 == (codecParams->codecInstant).pacsize) ||
- (320 == (codecParams->codecInstant).pacsize))
- {
- // processing block of 20ms
- return WebRtcIlbcfix_DecoderInit(_decoderInstPtr, 20);
- }
- else if((240 == (codecParams->codecInstant).pacsize) ||
- (480 == (codecParams->codecInstant).pacsize))
- {
- // processing block of 30ms
- return WebRtcIlbcfix_DecoderInit(_decoderInstPtr, 30);
- }
- else
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalInitDecoder: invalid processing block");
- return -1;
- }
-}
-
-
-WebRtc_Word32
-ACMILBC::CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst)
-{
- if (!_decoderInitialized)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "CodeDef: decoder not initialized for ILBC");
- return -1;
- }
- // Fill up the structure by calling
- // "SET_CODEC_PAR" & "SET_ILBC_FUNCTION."
- // Then return the structure back to NetEQ to add the codec to it's
- // database.
- SET_CODEC_PAR((codecDef), kDecoderILBC, codecInst.pltype,
- _decoderInstPtr, 8000);
- SET_ILBC_FUNCTIONS((codecDef));
- return 0;
-}
-
-
-ACMGenericCodec*
-ACMILBC::CreateInstance(void)
-{
- return NULL;
-}
-
-
-WebRtc_Word16
-ACMILBC::InternalCreateEncoder()
-{
- if (WebRtcIlbcfix_EncoderCreate(&_encoderInstPtr) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalCreateEncoder: cannot create instance for ILBC encoder");
- return -1;
- }
- return 0;
-}
-
-
-void
-ACMILBC::DestructEncoderSafe()
-{
- _encoderInitialized = false;
- _encoderExist = false;
- if(_encoderInstPtr != NULL)
- {
- WebRtcIlbcfix_EncoderFree(_encoderInstPtr);
- _encoderInstPtr = NULL;
- }
-}
-
-
-WebRtc_Word16
-ACMILBC::InternalCreateDecoder()
-{
- if (WebRtcIlbcfix_DecoderCreate(&_decoderInstPtr) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "InternalCreateDecoder: cannot create instance for ILBC decoder");
- return -1;
- }
- return 0;
-}
-
-
-void
-ACMILBC::DestructDecoderSafe()
-{
- _decoderInitialized = false;
- _decoderExist = false;
- if(_decoderInstPtr != NULL)
- {
- WebRtcIlbcfix_DecoderFree(_decoderInstPtr);
- _decoderInstPtr = NULL;
- }
-}
-
-
-void
-ACMILBC::InternalDestructEncoderInst(
- void* ptrInst)
-{
- if(ptrInst != NULL)
- {
- WebRtcIlbcfix_EncoderFree((iLBC_encinst_t_*)ptrInst);
- }
- return;
-}
-
-WebRtc_Word16
-ACMILBC::SetBitRateSafe(const WebRtc_Word32 rate)
-{
- // Check that rate is valid. No need to store the value
- if (rate == 13300)
- {
- WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 30);
- }
- else if (rate == 15200)
- {
- WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 20);
- }
- else
- {
- return -1;
- }
- _encoderParams.codecInstant.rate = rate;
-
- return 0;
+ return 0;
}
#endif
-} // namespace webrtc
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_ilbc.h b/modules/audio_coding/main/source/acm_ilbc.h
index 02eb6f1..c7d4466 100644
--- a/modules/audio_coding/main/source/acm_ilbc.h
+++ b/modules/audio_coding/main/source/acm_ilbc.h
@@ -17,58 +17,48 @@
struct iLBC_encinst_t_;
struct iLBC_decinst_t_;
-namespace webrtc
-{
+namespace webrtc {
-class ACMILBC : public ACMGenericCodec
-{
-public:
- ACMILBC(WebRtc_Word16 codecID);
- ~ACMILBC();
- // for FEC
- ACMGenericCodec* CreateInstance(void);
+class ACMILBC : public ACMGenericCodec {
+ public:
+ ACMILBC(WebRtc_Word16 codecID);
+ ~ACMILBC();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
- WebRtc_Word16 InternalEncode(
- WebRtc_UWord8* bitstream,
- WebRtc_Word16* bitStreamLenByte);
+ WebRtc_Word16 InternalEncode(WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
- WebRtc_Word16 InternalInitEncoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitEncoder(WebRtcACMCodecParams *codecParams);
- WebRtc_Word16 InternalInitDecoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitDecoder(WebRtcACMCodecParams *codecParams);
-protected:
- WebRtc_Word16 DecodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType);
+ protected:
+ WebRtc_Word16 DecodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
- WebRtc_Word32 CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst);
+ WebRtc_Word32 CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+ WebRtc_Word16 SetBitRateSafe(const WebRtc_Word32 rate);
- WebRtc_Word16 SetBitRateSafe(
- const WebRtc_Word32 rate);
+ void DestructEncoderSafe();
- void DestructEncoderSafe();
+ void DestructDecoderSafe();
- void DestructDecoderSafe();
+ WebRtc_Word16 InternalCreateEncoder();
- WebRtc_Word16 InternalCreateEncoder();
+ WebRtc_Word16 InternalCreateDecoder();
- WebRtc_Word16 InternalCreateDecoder();
+ void InternalDestructEncoderInst(void* ptrInst);
- void InternalDestructEncoderInst(
- void* ptrInst);
-
- iLBC_encinst_t_* _encoderInstPtr;
- iLBC_decinst_t_* _decoderInstPtr;
+ iLBC_encinst_t_* _encoderInstPtr;
+ iLBC_decinst_t_* _decoderInstPtr;
};
-} // namespace webrtc
+} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_ILBC_H_
diff --git a/modules/audio_coding/main/source/acm_isac.cc b/modules/audio_coding/main/source/acm_isac.cc
index b5ec6d2..0e88bc6 100644
--- a/modules/audio_coding/main/source/acm_isac.cc
+++ b/modules/audio_coding/main/source/acm_isac.cc
@@ -16,33 +16,29 @@
#include "webrtc_neteq.h"
#include "webrtc_neteq_help_macros.h"
-
#ifdef WEBRTC_CODEC_ISAC
- #include "acm_isac_macros.h"
- #include "isac.h"
+#include "acm_isac_macros.h"
+#include "isac.h"
#endif
#ifdef WEBRTC_CODEC_ISACFX
- #include "acm_isac_macros.h"
- #include "isacfix.h"
+#include "acm_isac_macros.h"
+#include "isacfix.h"
#endif
-namespace webrtc
-{
+namespace webrtc {
// we need this otherwise we cannot use forward declaration
// in the header file
#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
-struct ACMISACInst
-{
- ACM_ISAC_STRUCT *inst;
+struct ACMISACInst {
+ ACM_ISAC_STRUCT *inst;
};
#endif
#define ISAC_MIN_RATE 10000
#define ISAC_MAX_RATE 56000
-
// How the scaling is computed. iSAC computes a gain based on the
// bottleneck. It follows the following expression for that
//
@@ -60,24 +56,24 @@
#define ISAC_NUM_SUPPORTED_RATES 9
const WebRtc_UWord16 isacSuportedRates[ISAC_NUM_SUPPORTED_RATES] = {
32000, 30000, 26000, 23000, 21000,
- 19000, 17000, 15000, 12000};
+ 19000, 17000, 15000, 12000
+};
const float isacScale[ISAC_NUM_SUPPORTED_RATES] = {
1.0f, 0.8954f, 0.7178f, 0.6081f, 0.5445f,
- 0.4875f, 0.4365f, 0.3908f, 0.3311f};
+ 0.4875f, 0.4365f, 0.3908f, 0.3311f
+};
// Tables for bandwidth estimates
#define NR_ISAC_BANDWIDTHS 24
-const WebRtc_Word32 isacRatesWB[NR_ISAC_BANDWIDTHS] =
-{
+const WebRtc_Word32 isacRatesWB[NR_ISAC_BANDWIDTHS] = {
10000, 11100, 12300, 13700, 15200, 16900,
18800, 20900, 23300, 25900, 28700, 31900,
10100, 11200, 12400, 13800, 15300, 17000,
- 18900, 21000, 23400, 26000, 28800, 32000};
+ 18900, 21000, 23400, 26000, 28800, 32000
+};
-
-const WebRtc_Word32 isacRatesSWB[NR_ISAC_BANDWIDTHS] =
-{
+const WebRtc_Word32 isacRatesSWB[NR_ISAC_BANDWIDTHS] = {
10000, 11000, 12400, 13800, 15300, 17000,
18900, 21000, 23200, 25400, 27600, 29800,
32000, 34100, 36300, 38500, 40700, 42900,
@@ -100,215 +96,137 @@
return;
}
-
-ACMISAC::~ACMISAC()
-{
- return;
+ACMISAC::~ACMISAC() {
+ return;
}
-
-ACMGenericCodec*
-ACMISAC::CreateInstance(void)
-{
- return NULL;
+ACMGenericCodec* ACMISAC::CreateInstance(void) {
+ return NULL;
}
-
-WebRtc_Word16
-ACMISAC::InternalEncode(
- WebRtc_UWord8* /* bitstream */,
- WebRtc_Word16* /* bitStreamLenByte */)
-{
- return -1;
+WebRtc_Word16 ACMISAC::InternalEncode(WebRtc_UWord8* /* bitstream */,
+ WebRtc_Word16* /* bitStreamLenByte */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMISAC::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
- return 0;
+WebRtc_Word16 ACMISAC::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ return 0;
}
-
-WebRtc_Word16
-ACMISAC::InternalInitEncoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- return -1;
+WebRtc_Word16 ACMISAC::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMISAC::InternalInitDecoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- return -1;
+WebRtc_Word16 ACMISAC::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMISAC::InternalCreateDecoder()
-{
- return -1;
+WebRtc_Word16 ACMISAC::InternalCreateDecoder() {
+ return -1;
}
-
-void
-ACMISAC::DestructDecoderSafe()
-{
- return;
+void ACMISAC::DestructDecoderSafe() {
+ return;
}
-
-WebRtc_Word16
-ACMISAC::InternalCreateEncoder()
-{
- return -1;
+WebRtc_Word16 ACMISAC::InternalCreateEncoder() {
+ return -1;
}
-
-void
-ACMISAC::DestructEncoderSafe()
-{
- return;
+void ACMISAC::DestructEncoderSafe() {
+ return;
}
-
-WebRtc_Word32
-ACMISAC::CodecDef(
- WebRtcNetEQ_CodecDef& /* codecDef */,
- const CodecInst& /* codecInst */)
-{
- return -1;
+WebRtc_Word32 ACMISAC::CodecDef(WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */) {
+ return -1;
}
-
-void
-ACMISAC::InternalDestructEncoderInst(
- void* /* ptrInst */)
-{
- return;
+void ACMISAC::InternalDestructEncoderInst(void* /* ptrInst */) {
+ return;
}
-WebRtc_Word16
-ACMISAC::DeliverCachedIsacData(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16* /* bitStreamLenByte */,
- WebRtc_UWord32* /* timestamp */,
- WebRtcACMEncodingType* /* encodingType */,
- const WebRtc_UWord16 /* isacRate */,
- const WebRtc_UWord8 /* isacBWestimate */)
-{
- return -1;
+WebRtc_Word16 ACMISAC::DeliverCachedIsacData(
+ WebRtc_UWord8* /* bitStream */, WebRtc_Word16* /* bitStreamLenByte */,
+ WebRtc_UWord32* /* timestamp */, WebRtcACMEncodingType* /* encodingType */,
+ const WebRtc_UWord16 /* isacRate */,
+ const WebRtc_UWord8 /* isacBWestimate */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMISAC::Transcode(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16* /* bitStreamLenByte */,
- WebRtc_Word16 /* qBWE */,
- WebRtc_Word32 /* scale */,
- bool /* isRED */)
-{
- return -1;
+WebRtc_Word16 ACMISAC::Transcode(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */,
+ WebRtc_Word16 /* qBWE */,
+ WebRtc_Word32 /* scale */,
+ bool /* isRED */) {
+ return -1;
}
-WebRtc_Word16
-ACMISAC::SetBitRateSafe(
- WebRtc_Word32 /* bitRate */)
-{
- return -1;
+WebRtc_Word16 ACMISAC::SetBitRateSafe(WebRtc_Word32 /* bitRate */) {
+ return -1;
}
-WebRtc_Word32
-ACMISAC::GetEstimatedBandwidthSafe()
-{
- return -1;
+WebRtc_Word32 ACMISAC::GetEstimatedBandwidthSafe() {
+ return -1;
}
-WebRtc_Word32
-ACMISAC::SetEstimatedBandwidthSafe(
- WebRtc_Word32 /* estimatedBandwidth */)
-{
- return -1;
+WebRtc_Word32 ACMISAC::SetEstimatedBandwidthSafe(
+ WebRtc_Word32 /* estimatedBandwidth */) {
+ return -1;
}
-WebRtc_Word32
-ACMISAC::GetRedPayloadSafe(
- WebRtc_UWord8* /* redPayload */,
- WebRtc_Word16* /* payloadBytes */)
-{
- return -1;
+WebRtc_Word32 ACMISAC::GetRedPayloadSafe(WebRtc_UWord8* /* redPayload */,
+ WebRtc_Word16* /* payloadBytes */) {
+ return -1;
}
-WebRtc_Word16
-ACMISAC::UpdateDecoderSampFreq(
- WebRtc_Word16 /* codecId */)
-{
- return -1;
+WebRtc_Word16 ACMISAC::UpdateDecoderSampFreq(WebRtc_Word16 /* codecId */) {
+ return -1;
}
-
-WebRtc_Word16
-ACMISAC::UpdateEncoderSampFreq(
- WebRtc_UWord16 /* encoderSampFreqHz */)
-{
- return -1;
+WebRtc_Word16 ACMISAC::UpdateEncoderSampFreq(
+ WebRtc_UWord16 /* encoderSampFreqHz */) {
+ return -1;
}
-WebRtc_Word16
-ACMISAC::EncoderSampFreq(
- WebRtc_UWord16& /* sampFreqHz */)
-{
- return -1;
+WebRtc_Word16 ACMISAC::EncoderSampFreq(WebRtc_UWord16& /* sampFreqHz */) {
+ return -1;
}
-WebRtc_Word32
-ACMISAC::ConfigISACBandwidthEstimator(
- const WebRtc_UWord8 /* initFrameSizeMsec */,
+WebRtc_Word32 ACMISAC::ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 /* initFrameSizeMsec */,
const WebRtc_UWord16 /* initRateBitPerSec */,
- const bool /* enforceFrameSize */)
-{
- return -1;
+ const bool /* enforceFrameSize */) {
+ return -1;
}
-WebRtc_Word32
-ACMISAC::SetISACMaxPayloadSize(
- const WebRtc_UWord16 /* maxPayloadLenBytes */)
-{
- return -1;
+WebRtc_Word32 ACMISAC::SetISACMaxPayloadSize(
+ const WebRtc_UWord16 /* maxPayloadLenBytes */) {
+ return -1;
}
-WebRtc_Word32
-ACMISAC::SetISACMaxRate(
- const WebRtc_UWord32 /* maxRateBitPerSec */)
-{
- return -1;
+WebRtc_Word32 ACMISAC::SetISACMaxRate(
+ const WebRtc_UWord32 /* maxRateBitPerSec */) {
+ return -1;
}
-
-void
-ACMISAC::UpdateFrameLen()
-{
- return;
+void ACMISAC::UpdateFrameLen() {
+ return;
}
-void
-ACMISAC::CurrentRate(
- WebRtc_Word32& /*rateBitPerSec */)
-{
- return;
+void ACMISAC::CurrentRate(WebRtc_Word32& /*rateBitPerSec */) {
+ return;
}
bool
ACMISAC::DecoderParamsSafe(
- WebRtcACMCodecParams* /* decParams */,
+ WebRtcACMCodecParams* /* decParams */,
const WebRtc_UWord8 /* payloadType */)
{
return false;
@@ -321,136 +239,92 @@
return;
}
-WebRtc_Word16
-ACMISAC::REDPayloadISAC(
- const WebRtc_Word32 /* isacRate */,
- const WebRtc_Word16 /* isacBwEstimate */,
- WebRtc_UWord8* /* payload */,
- WebRtc_Word16* /* payloadLenBytes */)
-{
- return -1;
+WebRtc_Word16 ACMISAC::REDPayloadISAC(const WebRtc_Word32 /* isacRate */,
+ const WebRtc_Word16 /* isacBwEstimate */,
+ WebRtc_UWord8* /* payload */,
+ WebRtc_Word16* /* payloadLenBytes */) {
+ return -1;
}
-
#else //===================== Actual Implementation =======================
-
-
#ifdef WEBRTC_CODEC_ISACFX
-enum IsacSamplingRate
-{
- kIsacWideband = 16,
- kIsacSuperWideband = 32
+enum IsacSamplingRate {
+ kIsacWideband = 16,
+ kIsacSuperWideband = 32
};
-static float
-ACMISACFixTranscodingScale(
- WebRtc_UWord16 rate)
-{
- // find the scale for transcoding, the scale is rounded
- // downward
- float scale = -1;
- for(WebRtc_Word16 n=0; n < ISAC_NUM_SUPPORTED_RATES; n++)
- {
- if(rate >= isacSuportedRates[n])
- {
- scale = isacScale[n];
- break;
- }
+static float ACMISACFixTranscodingScale(WebRtc_UWord16 rate) {
+ // find the scale for transcoding, the scale is rounded
+ // downward
+ float scale = -1;
+ for (WebRtc_Word16 n = 0; n < ISAC_NUM_SUPPORTED_RATES; n++) {
+ if (rate >= isacSuportedRates[n]) {
+ scale = isacScale[n];
+ break;
}
- return scale;
+ }
+ return scale;
}
-static void
-ACMISACFixGetSendBitrate(
- ACM_ISAC_STRUCT* inst,
- WebRtc_Word32* bottleNeck)
-{
- *bottleNeck = WebRtcIsacfix_GetUplinkBw(inst);
+static void ACMISACFixGetSendBitrate(ACM_ISAC_STRUCT* inst,
+ WebRtc_Word32* bottleNeck) {
+ *bottleNeck = WebRtcIsacfix_GetUplinkBw(inst);
}
-static WebRtc_Word16
-ACMISACFixGetNewBitstream(
- ACM_ISAC_STRUCT* inst,
- WebRtc_Word16 BWEIndex,
- WebRtc_Word16 /* jitterIndex */,
- WebRtc_Word32 rate,
- WebRtc_Word16* bitStream,
- bool isRED)
-{
- if (isRED)
- {
- // RED not supported with iSACFIX
- return -1;
- }
- float scale = ACMISACFixTranscodingScale((WebRtc_UWord16)rate);
- return WebRtcIsacfix_GetNewBitStream(inst, BWEIndex, scale, bitStream);
+static WebRtc_Word16 ACMISACFixGetNewBitstream(ACM_ISAC_STRUCT* inst,
+ WebRtc_Word16 BWEIndex,
+ WebRtc_Word16 /* jitterIndex */,
+ WebRtc_Word32 rate,
+ WebRtc_Word16* bitStream,
+ bool isRED) {
+ if (isRED) {
+ // RED not supported with iSACFIX
+ return -1;
+ }
+ float scale = ACMISACFixTranscodingScale((WebRtc_UWord16) rate);
+ return WebRtcIsacfix_GetNewBitStream(inst, BWEIndex, scale, bitStream);
}
-
-static WebRtc_Word16
-ACMISACFixGetSendBWE(
- ACM_ISAC_STRUCT* inst,
- WebRtc_Word16* rateIndex,
- WebRtc_Word16* /* dummy */)
-{
- WebRtc_Word16 localRateIndex;
- WebRtc_Word16 status = WebRtcIsacfix_GetDownLinkBwIndex(inst, &localRateIndex);
- if(status < 0)
- {
- return -1;
- }
- else
- {
- *rateIndex = localRateIndex;
- return 0;
- }
+static WebRtc_Word16 ACMISACFixGetSendBWE(ACM_ISAC_STRUCT* inst,
+ WebRtc_Word16* rateIndex,
+ WebRtc_Word16* /* dummy */) {
+ WebRtc_Word16 localRateIndex;
+ WebRtc_Word16 status = WebRtcIsacfix_GetDownLinkBwIndex(inst,
+ &localRateIndex);
+ if (status < 0) {
+ return -1;
+ } else {
+ *rateIndex = localRateIndex;
+ return 0;
+ }
}
-static WebRtc_Word16
-ACMISACFixControlBWE(
- ACM_ISAC_STRUCT* inst,
- WebRtc_Word32 rateBPS,
- WebRtc_Word16 frameSizeMs,
- WebRtc_Word16 enforceFrameSize)
-{
- return WebRtcIsacfix_ControlBwe(inst, (WebRtc_Word16)rateBPS,
- frameSizeMs, enforceFrameSize);
+static WebRtc_Word16 ACMISACFixControlBWE(ACM_ISAC_STRUCT* inst,
+ WebRtc_Word32 rateBPS,
+ WebRtc_Word16 frameSizeMs,
+ WebRtc_Word16 enforceFrameSize) {
+ return WebRtcIsacfix_ControlBwe(inst, (WebRtc_Word16) rateBPS, frameSizeMs,
+ enforceFrameSize);
}
-static WebRtc_Word16
-ACMISACFixControl(
- ACM_ISAC_STRUCT* inst,
- WebRtc_Word32 rateBPS,
- WebRtc_Word16 frameSizeMs)
-{
- return WebRtcIsacfix_Control(inst, (WebRtc_Word16)rateBPS,
- frameSizeMs);
+static WebRtc_Word16 ACMISACFixControl(ACM_ISAC_STRUCT* inst,
+ WebRtc_Word32 rateBPS,
+ WebRtc_Word16 frameSizeMs) {
+ return WebRtcIsacfix_Control(inst, (WebRtc_Word16) rateBPS, frameSizeMs);
}
-static IsacSamplingRate
-ACMISACFixGetEncSampRate(
- ACM_ISAC_STRUCT* /* inst */)
-{
- return kIsacWideband;
+static IsacSamplingRate ACMISACFixGetEncSampRate(ACM_ISAC_STRUCT* /* inst */) {
+ return kIsacWideband;
}
-
-static IsacSamplingRate
-ACMISACFixGetDecSampRate(
- ACM_ISAC_STRUCT* /* inst */)
-{
- return kIsacWideband;
+static IsacSamplingRate ACMISACFixGetDecSampRate(ACM_ISAC_STRUCT* /* inst */) {
+ return kIsacWideband;
}
#endif
-
-
-
-
-
ACMISAC::ACMISAC(WebRtc_Word16 codecID)
: _isEncInitialized(false),
_isacCodingMode(CHANNEL_INDEPENDENT),
@@ -476,727 +350,538 @@
_decoderParams.codecInstant.pltype = -1;
}
-
-ACMISAC::~ACMISAC()
-{
- if (_codecInstPtr != NULL)
- {
- if(_codecInstPtr->inst != NULL)
- {
- ACM_ISAC_FREE(_codecInstPtr->inst);
- _codecInstPtr->inst = NULL;
- }
- delete _codecInstPtr;
- _codecInstPtr = NULL;
+ACMISAC::~ACMISAC() {
+ if (_codecInstPtr != NULL) {
+ if (_codecInstPtr->inst != NULL) {
+ ACM_ISAC_FREE(_codecInstPtr->inst);
+ _codecInstPtr->inst = NULL;
}
- return;
+ delete _codecInstPtr;
+ _codecInstPtr = NULL;
+ }
+ return;
}
-
-ACMGenericCodec*
-ACMISAC::CreateInstance(void)
-{
- return NULL;
+ACMGenericCodec* ACMISAC::CreateInstance(void) {
+ return NULL;
}
-
-WebRtc_Word16
-ACMISAC::InternalEncode(
- WebRtc_UWord8* bitstream,
- WebRtc_Word16* bitStreamLenByte)
-{
- // ISAC takes 10ms audio everytime we call encoder, therefor,
- // it should be treated like codecs with 'basic coding block'
- // non-zero, and the following 'while-loop' should not be necessary.
- // However, due to a mistake in the codec the frame-size might change
- // at the first 10ms pushed in to iSAC if the bit-rate is low, this is
- // sort of a bug in iSAC. to address this we treat iSAC as the
- // following.
-
- if (_codecInstPtr == NULL)
- {
- return -1;
- }
- *bitStreamLenByte = 0;
- while((*bitStreamLenByte == 0) && (_inAudioIxRead < _frameLenSmpl))
- {
- if(_inAudioIxRead > _inAudioIxWrite)
- {
- // something is wrong.
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "The actual fram-size of iSAC appears to be larger that expected. All audio \
-pushed in but no bit-stream is generated.");
- return -1;
- }
- *bitStreamLenByte = ACM_ISAC_ENCODE(_codecInstPtr->inst,
- &_inAudio[_inAudioIxRead], (WebRtc_Word16*)bitstream);
- // increment the read index this tell the caller that how far
- // we have gone forward in reading the audio buffer
- _inAudioIxRead += _samplesIn10MsAudio;
- }
- if(*bitStreamLenByte == 0)
- {
- WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
- "ISAC Has encoded the whole frame but no bit-stream is generated.");
- }
-
- // a packet is generated iSAC, is set in adaptive mode may change
- // the frame length and we like to update the bottleneck value as
- // well, although updating bottleneck is not crucial
- if((*bitStreamLenByte > 0) && (_isacCodingMode == ADAPTIVE))
- {
- //_frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
- ACM_ISAC_GETSENDBITRATE(_codecInstPtr->inst, &_isacCurrentBN);
- }
- UpdateFrameLen();
- return *bitStreamLenByte;
-}
-
-
-WebRtc_Word16
-ACMISAC::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
- return 0;
-}
-
-
-WebRtc_Word16
-ACMISAC::InternalInitEncoder(
- WebRtcACMCodecParams* codecParams)
-{
- // if rate is set to -1 then iSAC has to be in adaptive mode
- if(codecParams->codecInstant.rate == -1)
- {
- _isacCodingMode = ADAPTIVE;
- }
-
- // sanity check that rate is in acceptable range
- else if((codecParams->codecInstant.rate >= ISAC_MIN_RATE) &&
- (codecParams->codecInstant.rate <= ISAC_MAX_RATE))
- {
- _isacCodingMode = CHANNEL_INDEPENDENT;
- _isacCurrentBN = codecParams->codecInstant.rate;
- }
- else
- {
- return -1;
- }
-
- // we need to set the encoder sampling frequency.
- if(UpdateEncoderSampFreq((WebRtc_UWord16)codecParams->codecInstant.plfreq) < 0)
- {
- return -1;
- }
- if(ACM_ISAC_ENCODERINIT(_codecInstPtr->inst, _isacCodingMode) < 0)
- {
- return -1;
- }
-
- // apply the frame-size and rate if operating in
- // channel-independent mode
- if(_isacCodingMode == CHANNEL_INDEPENDENT)
- {
- if(ACM_ISAC_CONTROL(_codecInstPtr->inst,
- codecParams->codecInstant.rate,
- codecParams->codecInstant.pacsize /
- (codecParams->codecInstant.plfreq / 1000)) < 0)
- {
- return -1;
- }
- }
- else
- {
- // We need this for adaptive case and has to be called
- // after initialization
- ACM_ISAC_GETSENDBITRATE(
- _codecInstPtr->inst, &_isacCurrentBN);
- }
- _frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
- return 0;
-}
-
-WebRtc_Word16
-ACMISAC::InternalInitDecoder(
- WebRtcACMCodecParams* codecParams)
-{
- if (_codecInstPtr == NULL)
- {
- return -1;
- }
-
- // set decoder sampling frequency.
- if(codecParams->codecInstant.plfreq == 32000)
- {
- UpdateDecoderSampFreq(ACMCodecDB::kISACSWB);
- }
- else
- {
- UpdateDecoderSampFreq(ACMCodecDB::kISAC);
- }
-
- // in a one-way communication we may never register send-codec.
- // However we like that the BWE to work properly so it has to
- // be initialized. The BWE is initialized when iSAC encoder is initialized.
- // Therefore, we need this.
- if(!_encoderInitialized)
- {
- // Since we don't require a valid rate or a valid packet size when initializing
- // the decoder, we set valid values before initializing encoder
- codecParams->codecInstant.rate = kIsacWbDefaultRate;
- codecParams->codecInstant.pacsize = kIsacPacSize960;
- if(InternalInitEncoder(codecParams) < 0)
- {
- return -1;
- }
- _encoderInitialized = true;
- }
-
- return ACM_ISAC_DECODERINIT(_codecInstPtr->inst);
-}
-
-
-WebRtc_Word16
-ACMISAC::InternalCreateDecoder()
-{
- if (_codecInstPtr == NULL)
- {
- return -1;
- }
- WebRtc_Word16 status = ACM_ISAC_CREATE (&(_codecInstPtr->inst));
-
- // specific to codecs with one instance for encoding and decoding
- _encoderInitialized = false;
- if(status < 0)
- {
- _encoderExist = false;
- }
- else
- {
- _encoderExist = true;
- }
- return status;
-}
-
-
-void
-ACMISAC::DestructDecoderSafe()
-{
- // codec with shared instance cannot delete.
- _decoderInitialized = false;
- return;
-}
-
-
-WebRtc_Word16
-ACMISAC::InternalCreateEncoder()
-{
- if (_codecInstPtr == NULL)
- {
- return -1;
- }
- WebRtc_Word16 status = ACM_ISAC_CREATE(&(_codecInstPtr->inst));
-
- // specific to codecs with one instance for encoding and decoding
- _decoderInitialized = false;
- if(status < 0)
- {
- _decoderExist = false;
- }
- else
- {
- _decoderExist = true;
- }
- return status;
-}
-
-
-void
-ACMISAC::DestructEncoderSafe()
-{
- // codec with shared instance cannot delete.
- _encoderInitialized = false;
- return;
-}
-
-
-WebRtc_Word32
-ACMISAC::CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst)
-{
- // Sanity checks
- if (_codecInstPtr == NULL)
- {
- return -1;
- }
- if (!_decoderInitialized || !_decoderExist)
- {
- // Todo:
- // log error
- return -1;
- }
- // Fill up the structure by calling
- // "SET_CODEC_PAR" & "SET_ISAC_FUNCTION."
- // Then call NetEQ to add the codec to it's
- // database.
- if(codecInst.plfreq == 16000)
- {
- SET_CODEC_PAR((codecDef), kDecoderISAC, codecInst.pltype,
- _codecInstPtr->inst, 16000);
-#ifdef WEBRTC_CODEC_ISAC
- SET_ISAC_FUNCTIONS((codecDef));
-#else
- SET_ISACfix_FUNCTIONS((codecDef));
-#endif
- }
- else
- {
-#ifdef WEBRTC_CODEC_ISAC
- SET_CODEC_PAR((codecDef), kDecoderISACswb, codecInst.pltype,
- _codecInstPtr->inst, 32000);
- SET_ISACSWB_FUNCTIONS((codecDef));
-#else
- return -1;
-#endif
- }
-
- return 0;
-}
-
-
-void
-ACMISAC::InternalDestructEncoderInst(
- void* ptrInst)
-{
- if(ptrInst != NULL)
- {
- ACM_ISAC_FREE((ACM_ISAC_STRUCT *)ptrInst);
- }
- return;
-}
-
-WebRtc_Word16
-ACMISAC::Transcode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte,
- WebRtc_Word16 qBWE,
- WebRtc_Word32 rate,
- bool isRED)
-{
- WebRtc_Word16 jitterInfo = 0;
- // transcode from a higher rate to lower rate
- // sanity check
- if (_codecInstPtr == NULL)
- {
- return -1;
- }
-
- *bitStreamLenByte = ACM_ISAC_GETNEWBITSTREAM(_codecInstPtr->inst,
- qBWE, jitterInfo, rate, (WebRtc_Word16*)bitStream, (isRED)? 1:0);
-
- if(*bitStreamLenByte < 0)
- {
- // error happened
- *bitStreamLenByte = 0;
- return -1;
- }
- else
- {
- return *bitStreamLenByte;
- }
-}
-
-WebRtc_Word16
-ACMISAC::SetBitRateSafe(
- WebRtc_Word32 bitRate)
-{
- if (_codecInstPtr == NULL)
- {
- return -1;
- }
- WebRtc_UWord16 encoderSampFreq;
- EncoderSampFreq(encoderSampFreq);
- bool reinit = false;
- // change the BN of iSAC
- if(bitRate == -1)
- {
- // ADAPTIVE MODE
- // Check if it was already in adaptive mode
- if(_isacCodingMode != ADAPTIVE)
- {
- // was not in adaptive, then set the mode to adaptive
- // and flag for re-initialization
- _isacCodingMode = ADAPTIVE;
- reinit = true;
- }
- }
- // Sanity check if the rate valid
- else if((bitRate >= ISAC_MIN_RATE) &&
- (bitRate <= ISAC_MAX_RATE))
- {
- //check if it was in channel-independent mode before
- if(_isacCodingMode != CHANNEL_INDEPENDENT)
- {
- // was not in channel independent, set the mode to
- // channel-independent and flag for re-initialization
- _isacCodingMode = CHANNEL_INDEPENDENT;
- reinit = true;
- }
- // store the bottleneck
- _isacCurrentBN = (WebRtc_UWord16)bitRate;
- }
- else
- {
- // invlaid rate
- return -1;
- }
-
- WebRtc_Word16 status = 0;
- if(reinit)
- {
- // initialize and check if it is successful
- if(ACM_ISAC_ENCODERINIT(_codecInstPtr->inst, _isacCodingMode) < 0)
- {
- // failed initialization
- return -1;
- }
- }
- if(_isacCodingMode == CHANNEL_INDEPENDENT)
- {
-
- status = ACM_ISAC_CONTROL(_codecInstPtr->inst, _isacCurrentBN,
- (encoderSampFreq == 32000)? 30:(_frameLenSmpl / 16));
- if(status < 0)
- {
- status = -1;
- }
- }
-
- // Update encoder parameters
- _encoderParams.codecInstant.rate = bitRate;
-
- UpdateFrameLen();
- return status;
-}
-
-
-WebRtc_Word32
-ACMISAC::GetEstimatedBandwidthSafe()
-{
- WebRtc_Word16 bandwidthIndex = 0;
- WebRtc_Word16 delayIndex = 0;
- IsacSamplingRate sampRate;
-
- // Get bandwidth information
- ACM_ISAC_GETSENDBWE(_codecInstPtr->inst, &bandwidthIndex, &delayIndex);
-
- // Validy check of index
- if ((bandwidthIndex < 0) || (bandwidthIndex >= NR_ISAC_BANDWIDTHS))
- {
- return -1;
- }
-
- // Check sample frequency
- sampRate = ACM_ISAC_GETDECSAMPRATE(_codecInstPtr->inst);
- if(sampRate == kIsacWideband)
- {
- return isacRatesWB[bandwidthIndex];
- }
- else
- {
- return isacRatesSWB[bandwidthIndex];
- }
-}
-
-WebRtc_Word32
-ACMISAC::SetEstimatedBandwidthSafe(
- WebRtc_Word32 estimatedBandwidth)
-{
- IsacSamplingRate sampRate;
- WebRtc_Word16 bandwidthIndex;
-
- // Check sample frequency and choose appropriate table
- sampRate = ACM_ISAC_GETENCSAMPRATE(_codecInstPtr->inst);
-
- if(sampRate == kIsacWideband)
- {
- // Search through the WB rate table to find the index
-
- bandwidthIndex = NR_ISAC_BANDWIDTHS/2 - 1;
- for (int i=0; i<(NR_ISAC_BANDWIDTHS/2); i++)
- {
- if (estimatedBandwidth == isacRatesWB[i])
- {
- bandwidthIndex = i;
- break;
- } else if (estimatedBandwidth == isacRatesWB[i+NR_ISAC_BANDWIDTHS/2])
- {
- bandwidthIndex = i + NR_ISAC_BANDWIDTHS/2;
- break;
- } else if (estimatedBandwidth < isacRatesWB[i])
- {
- bandwidthIndex = i;
- break;
- }
- }
- }
- else
- {
- // Search through the SWB rate table to find the index
- bandwidthIndex = NR_ISAC_BANDWIDTHS - 1;
- for (int i=0; i<NR_ISAC_BANDWIDTHS; i++)
- {
- if(estimatedBandwidth <= isacRatesSWB[i])
- {
- bandwidthIndex = i;
- break;
- }
- }
- }
-
- // Set iSAC Bandwidth Estimate
- ACM_ISAC_SETBWE(_codecInstPtr->inst, bandwidthIndex);
-
- return 0;
-}
-
-WebRtc_Word32
-ACMISAC::GetRedPayloadSafe(
-#if (!defined(WEBRTC_CODEC_ISAC))
- WebRtc_UWord8* /* redPayload */,
- WebRtc_Word16* /* payloadBytes */)
-{
+WebRtc_Word16 ACMISAC::InternalEncode(WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte) {
+ // ISAC takes 10ms audio everytime we call encoder, therefor,
+ // it should be treated like codecs with 'basic coding block'
+ // non-zero, and the following 'while-loop' should not be necessary.
+ // However, due to a mistake in the codec the frame-size might change
+ // at the first 10ms pushed in to iSAC if the bit-rate is low, this is
+ // sort of a bug in iSAC. to address this we treat iSAC as the
+ // following.
+ if (_codecInstPtr == NULL) {
return -1;
-#else
- WebRtc_UWord8* redPayload,
- WebRtc_Word16* payloadBytes)
-{
-
- WebRtc_Word16 bytes = WebRtcIsac_GetRedPayload(_codecInstPtr->inst, (WebRtc_Word16*)redPayload);
- if (bytes < 0)
- {
- return -1;
+ }
+ *bitStreamLenByte = 0;
+ while ((*bitStreamLenByte == 0) && (_inAudioIxRead < _frameLenSmpl)) {
+ if (_inAudioIxRead > _inAudioIxWrite) {
+ // something is wrong.
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "The actual fram-size of iSAC appears to be larger that expected. "
+ "All audio pushed in but no bit-stream is generated.");
+ return -1;
}
- *payloadBytes = bytes;
- return 0;
-#endif
-}
+ *bitStreamLenByte = ACM_ISAC_ENCODE(_codecInstPtr->inst,
+ &_inAudio[_inAudioIxRead],
+ (WebRtc_Word16*) bitstream);
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _samplesIn10MsAudio;
+ }
+ if (*bitStreamLenByte == 0) {
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
+ "ISAC Has encoded the whole frame but no bit-stream is generated.");
+ }
-WebRtc_Word16
-ACMISAC::UpdateDecoderSampFreq(
-#ifdef WEBRTC_CODEC_ISAC
- WebRtc_Word16 codecId)
-{
- if(ACMCodecDB::kISAC == codecId)
- {
- return WebRtcIsac_SetDecSampRate(_codecInstPtr->inst, kIsacWideband);
- }
- else if(ACMCodecDB::kISACSWB == codecId)
- {
- return WebRtcIsac_SetDecSampRate(_codecInstPtr->inst, kIsacSuperWideband);
- }
- else
- {
- return -1;
- }
-
-#else
- WebRtc_Word16 /* codecId */)
-{
- return 0;
-#endif
-}
-
-
-WebRtc_Word16
-ACMISAC::UpdateEncoderSampFreq(
-#ifdef WEBRTC_CODEC_ISAC
- WebRtc_UWord16 encoderSampFreqHz)
-{
- WebRtc_UWord16 currentSampRateHz;
- EncoderSampFreq(currentSampRateHz);
-
- if(currentSampRateHz != encoderSampFreqHz)
- {
- if((encoderSampFreqHz != 16000) && (encoderSampFreqHz != 32000))
- {
- return -1;
- }
- else
- {
- _inAudioIxRead = 0;
- _inAudioIxWrite = 0;
- _inTimestampIxWrite = 0;
- if(encoderSampFreqHz == 16000)
- {
- if(WebRtcIsac_SetEncSampRate(_codecInstPtr->inst, kIsacWideband) < 0)
- {
- return -1;
- }
- _samplesIn10MsAudio = 160;
- }
- else
- {
-
- if(WebRtcIsac_SetEncSampRate(_codecInstPtr->inst, kIsacSuperWideband) < 0)
- {
- return -1;
- }
- _samplesIn10MsAudio = 320;
- }
- _frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
- _encoderParams.codecInstant.pacsize = _frameLenSmpl;
- _encoderParams.codecInstant.plfreq = encoderSampFreqHz;
- return 0;
- }
- }
-#else
- WebRtc_UWord16 /* codecId */)
-{
-#endif
- return 0;
-}
-
-WebRtc_Word16
-ACMISAC::EncoderSampFreq(
- WebRtc_UWord16& sampFreqHz)
-{
- IsacSamplingRate sampRate;
- sampRate = ACM_ISAC_GETENCSAMPRATE(_codecInstPtr->inst);
- if(sampRate == kIsacSuperWideband)
- {
- sampFreqHz = 32000;
- }
- else
- {
- sampFreqHz = 16000;
- }
- return 0;
-}
-
-WebRtc_Word32
-ACMISAC::ConfigISACBandwidthEstimator(
- const WebRtc_UWord8 initFrameSizeMsec,
- const WebRtc_UWord16 initRateBitPerSec,
- const bool enforceFrameSize)
-{
- WebRtc_Word16 status;
- {
- WebRtc_UWord16 sampFreqHz;
- EncoderSampFreq(sampFreqHz);
- // @TODO: at 32kHz we hardcode calling with 30ms and enforce
- // the frame-size otherwise we might get error. Revise if
- // control-bwe is changed.
- if(sampFreqHz == 32000)
- {
- status = ACM_ISAC_CONTROL_BWE(_codecInstPtr->inst,
- initRateBitPerSec, 30, 1);
- }
- else
- {
- status = ACM_ISAC_CONTROL_BWE(_codecInstPtr->inst,
- initRateBitPerSec, initFrameSizeMsec, enforceFrameSize? 1:0);
- }
- }
- if(status < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Coutn't config iSAC BWE.");
- return -1;
- }
- UpdateFrameLen();
+ // a packet is generated iSAC, is set in adaptive mode may change
+ // the frame length and we like to update the bottleneck value as
+ // well, although updating bottleneck is not crucial
+ if ((*bitStreamLenByte > 0) && (_isacCodingMode == ADAPTIVE)) {
+ //_frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
ACM_ISAC_GETSENDBITRATE(_codecInstPtr->inst, &_isacCurrentBN);
- return 0;
+ }
+ UpdateFrameLen();
+ return *bitStreamLenByte;
}
-WebRtc_Word32
-ACMISAC::SetISACMaxPayloadSize(
- const WebRtc_UWord16 maxPayloadLenBytes)
-{
- return ACM_ISAC_SETMAXPAYLOADSIZE(_codecInstPtr->inst, maxPayloadLenBytes);
+WebRtc_Word16 ACMISAC::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSample */,
+ WebRtc_Word8* /* speechType */) {
+ return 0;
}
-WebRtc_Word32
-ACMISAC::SetISACMaxRate(
- const WebRtc_UWord32 maxRateBitPerSec)
-{
- return ACM_ISAC_SETMAXRATE(_codecInstPtr->inst, maxRateBitPerSec);
-}
+WebRtc_Word16 ACMISAC::InternalInitEncoder(WebRtcACMCodecParams* codecParams) {
+ // if rate is set to -1 then iSAC has to be in adaptive mode
+ if (codecParams->codecInstant.rate == -1) {
+ _isacCodingMode = ADAPTIVE;
+ }
+ // sanity check that rate is in acceptable range
+ else if ((codecParams->codecInstant.rate >= ISAC_MIN_RATE) &&
+ (codecParams->codecInstant.rate <= ISAC_MAX_RATE)) {
+ _isacCodingMode = CHANNEL_INDEPENDENT;
+ _isacCurrentBN = codecParams->codecInstant.rate;
+ } else {
+ return -1;
+ }
-void
-ACMISAC::UpdateFrameLen()
-{
- _frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
- _encoderParams.codecInstant.pacsize = _frameLenSmpl;
-}
+ // we need to set the encoder sampling frequency.
+ if (UpdateEncoderSampFreq((WebRtc_UWord16) codecParams->codecInstant.plfreq)
+ < 0) {
+ return -1;
+ }
+ if (ACM_ISAC_ENCODERINIT(_codecInstPtr->inst, _isacCodingMode) < 0) {
+ return -1;
+ }
-void
-ACMISAC::CurrentRate(WebRtc_Word32& rateBitPerSec)
-{
- if(_isacCodingMode == ADAPTIVE)
- {
- ACM_ISAC_GETSENDBITRATE(_codecInstPtr->inst, &rateBitPerSec);
+ // apply the frame-size and rate if operating in
+ // channel-independent mode
+ if (_isacCodingMode == CHANNEL_INDEPENDENT) {
+ if (ACM_ISAC_CONTROL(_codecInstPtr->inst, codecParams->codecInstant.rate,
+ codecParams->codecInstant.pacsize /
+ (codecParams->codecInstant.plfreq / 1000)) < 0) {
+ return -1;
}
+ } else {
+ // We need this for adaptive case and has to be called
+ // after initialization
+ ACM_ISAC_GETSENDBITRATE(_codecInstPtr->inst, &_isacCurrentBN);
+ }
+ _frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
+ return 0;
}
+WebRtc_Word16 ACMISAC::InternalInitDecoder(WebRtcACMCodecParams* codecParams) {
+ if (_codecInstPtr == NULL) {
+ return -1;
+ }
-bool
-ACMISAC::DecoderParamsSafe(
- WebRtcACMCodecParams* decParams,
- const WebRtc_UWord8 payloadType)
-{
- if(_decoderInitialized)
- {
- if(payloadType == _decoderParams.codecInstant.pltype)
- {
- memcpy(decParams, &_decoderParams, sizeof(WebRtcACMCodecParams));
- return true;
+ // set decoder sampling frequency.
+ if (codecParams->codecInstant.plfreq == 32000) {
+ UpdateDecoderSampFreq(ACMCodecDB::kISACSWB);
+ } else {
+ UpdateDecoderSampFreq(ACMCodecDB::kISAC);
+ }
+
+ // in a one-way communication we may never register send-codec.
+ // However we like that the BWE to work properly so it has to
+ // be initialized. The BWE is initialized when iSAC encoder is initialized.
+ // Therefore, we need this.
+ if (!_encoderInitialized) {
+ // Since we don't require a valid rate or a valid packet size when
+ // initializing the decoder, we set valid values before initializing encoder
+ codecParams->codecInstant.rate = kIsacWbDefaultRate;
+ codecParams->codecInstant.pacsize = kIsacPacSize960;
+ if (InternalInitEncoder(codecParams) < 0) {
+ return -1;
+ }
+ _encoderInitialized = true;
+ }
+
+ return ACM_ISAC_DECODERINIT(_codecInstPtr->inst);
+}
+
+WebRtc_Word16 ACMISAC::InternalCreateDecoder() {
+ if (_codecInstPtr == NULL) {
+ return -1;
+ }
+ WebRtc_Word16 status = ACM_ISAC_CREATE(&(_codecInstPtr->inst));
+
+ // specific to codecs with one instance for encoding and decoding
+ _encoderInitialized = false;
+ if (status < 0) {
+ _encoderExist = false;
+ } else {
+ _encoderExist = true;
+ }
+ return status;
+}
+
+void ACMISAC::DestructDecoderSafe() {
+ // codec with shared instance cannot delete.
+ _decoderInitialized = false;
+ return;
+}
+
+WebRtc_Word16 ACMISAC::InternalCreateEncoder() {
+ if (_codecInstPtr == NULL) {
+ return -1;
+ }
+ WebRtc_Word16 status = ACM_ISAC_CREATE(&(_codecInstPtr->inst));
+
+ // specific to codecs with one instance for encoding and decoding
+ _decoderInitialized = false;
+ if (status < 0) {
+ _decoderExist = false;
+ } else {
+ _decoderExist = true;
+ }
+ return status;
+}
+
+void ACMISAC::DestructEncoderSafe() {
+ // codec with shared instance cannot delete.
+ _encoderInitialized = false;
+ return;
+}
+
+WebRtc_Word32 ACMISAC::CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst) {
+ // Sanity checks
+ if (_codecInstPtr == NULL) {
+ return -1;
+ }
+ if (!_decoderInitialized || !_decoderExist) {
+ return -1;
+ }
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_ISAC_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ if (codecInst.plfreq == 16000) {
+ SET_CODEC_PAR((codecDef), kDecoderISAC, codecInst.pltype,
+ _codecInstPtr->inst, 16000);
+#ifdef WEBRTC_CODEC_ISAC
+ SET_ISAC_FUNCTIONS((codecDef));
+#else
+ SET_ISACfix_FUNCTIONS((codecDef));
+#endif
+ } else {
+#ifdef WEBRTC_CODEC_ISAC
+ SET_CODEC_PAR((codecDef), kDecoderISACswb, codecInst.pltype,
+ _codecInstPtr->inst, 32000);
+ SET_ISACSWB_FUNCTIONS((codecDef));
+#else
+ return -1;
+#endif
+ }
+ return 0;
+}
+
+void ACMISAC::InternalDestructEncoderInst(void* ptrInst) {
+ if (ptrInst != NULL) {
+ ACM_ISAC_FREE((ACM_ISAC_STRUCT *) ptrInst);
+ }
+ return;
+}
+
+WebRtc_Word16 ACMISAC::Transcode(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_Word16 qBWE, WebRtc_Word32 rate,
+ bool isRED) {
+ WebRtc_Word16 jitterInfo = 0;
+ // transcode from a higher rate to lower rate sanity check
+ if (_codecInstPtr == NULL) {
+ return -1;
+ }
+
+ *bitStreamLenByte = ACM_ISAC_GETNEWBITSTREAM(_codecInstPtr->inst, qBWE,
+ jitterInfo, rate,
+ (WebRtc_Word16*) bitStream,
+ (isRED) ? 1 : 0);
+
+ if (*bitStreamLenByte < 0) {
+ // error happened
+ *bitStreamLenByte = 0;
+ return -1;
+ } else {
+ return *bitStreamLenByte;
+ }
+}
+
+WebRtc_Word16 ACMISAC::SetBitRateSafe(WebRtc_Word32 bitRate) {
+ if (_codecInstPtr == NULL) {
+ return -1;
+ }
+ WebRtc_UWord16 encoderSampFreq;
+ EncoderSampFreq(encoderSampFreq);
+ bool reinit = false;
+ // change the BN of iSAC
+ if (bitRate == -1) {
+ // ADAPTIVE MODE
+ // Check if it was already in adaptive mode
+ if (_isacCodingMode != ADAPTIVE) {
+ // was not in adaptive, then set the mode to adaptive
+ // and flag for re-initialization
+ _isacCodingMode = ADAPTIVE;
+ reinit = true;
+ }
+ }
+ // Sanity check if the rate valid
+ else if ((bitRate >= ISAC_MIN_RATE) && (bitRate <= ISAC_MAX_RATE)) {
+ //check if it was in channel-independent mode before
+ if (_isacCodingMode != CHANNEL_INDEPENDENT) {
+ // was not in channel independent, set the mode to
+ // channel-independent and flag for re-initialization
+ _isacCodingMode = CHANNEL_INDEPENDENT;
+ reinit = true;
+ }
+ // store the bottleneck
+ _isacCurrentBN = (WebRtc_UWord16) bitRate;
+ } else {
+ // invlaid rate
+ return -1;
+ }
+
+ WebRtc_Word16 status = 0;
+ if (reinit) {
+ // initialize and check if it is successful
+ if (ACM_ISAC_ENCODERINIT(_codecInstPtr->inst, _isacCodingMode) < 0) {
+ // failed initialization
+ return -1;
+ }
+ }
+ if (_isacCodingMode == CHANNEL_INDEPENDENT) {
+
+ status = ACM_ISAC_CONTROL(
+ _codecInstPtr->inst, _isacCurrentBN,
+ (encoderSampFreq == 32000) ? 30 : (_frameLenSmpl / 16));
+ if (status < 0) {
+ status = -1;
+ }
+ }
+
+ // Update encoder parameters
+ _encoderParams.codecInstant.rate = bitRate;
+
+ UpdateFrameLen();
+ return status;
+}
+
+WebRtc_Word32 ACMISAC::GetEstimatedBandwidthSafe() {
+ WebRtc_Word16 bandwidthIndex = 0;
+ WebRtc_Word16 delayIndex = 0;
+ IsacSamplingRate sampRate;
+
+ // Get bandwidth information
+ ACM_ISAC_GETSENDBWE(_codecInstPtr->inst, &bandwidthIndex, &delayIndex);
+
+ // Validy check of index
+ if ((bandwidthIndex < 0) || (bandwidthIndex >= NR_ISAC_BANDWIDTHS)) {
+ return -1;
+ }
+
+ // Check sample frequency
+ sampRate = ACM_ISAC_GETDECSAMPRATE(_codecInstPtr->inst);
+ if (sampRate == kIsacWideband) {
+ return isacRatesWB[bandwidthIndex];
+ } else {
+ return isacRatesSWB[bandwidthIndex];
+ }
+}
+
+WebRtc_Word32 ACMISAC::SetEstimatedBandwidthSafe(
+ WebRtc_Word32 estimatedBandwidth) {
+ IsacSamplingRate sampRate;
+ WebRtc_Word16 bandwidthIndex;
+
+ // Check sample frequency and choose appropriate table
+ sampRate = ACM_ISAC_GETENCSAMPRATE(_codecInstPtr->inst);
+
+ if (sampRate == kIsacWideband) {
+ // Search through the WB rate table to find the index
+ bandwidthIndex = NR_ISAC_BANDWIDTHS / 2 - 1;
+ for (int i = 0; i < (NR_ISAC_BANDWIDTHS / 2); i++) {
+ if (estimatedBandwidth == isacRatesWB[i]) {
+ bandwidthIndex = i;
+ break;
+ } else if (estimatedBandwidth
+ == isacRatesWB[i + NR_ISAC_BANDWIDTHS / 2]) {
+ bandwidthIndex = i + NR_ISAC_BANDWIDTHS / 2;
+ break;
+ } else if (estimatedBandwidth < isacRatesWB[i]) {
+ bandwidthIndex = i;
+ break;
+ }
+ }
+ } else {
+ // Search through the SWB rate table to find the index
+ bandwidthIndex = NR_ISAC_BANDWIDTHS - 1;
+ for (int i = 0; i < NR_ISAC_BANDWIDTHS; i++) {
+ if (estimatedBandwidth <= isacRatesSWB[i]) {
+ bandwidthIndex = i;
+ break;
+ }
+ }
+ }
+
+ // Set iSAC Bandwidth Estimate
+ ACM_ISAC_SETBWE(_codecInstPtr->inst, bandwidthIndex);
+
+ return 0;
+}
+
+WebRtc_Word32 ACMISAC::GetRedPayloadSafe(
+#if (!defined(WEBRTC_CODEC_ISAC))
+ WebRtc_UWord8* /* redPayload */, WebRtc_Word16* /* payloadBytes */) {
+ return -1;
+#else
+ WebRtc_UWord8* redPayload, WebRtc_Word16* payloadBytes) {
+ WebRtc_Word16 bytes = WebRtcIsac_GetRedPayload(_codecInstPtr->inst,
+ (WebRtc_Word16*)redPayload);
+ if (bytes < 0) {
+ return -1;
+ }
+ *payloadBytes = bytes;
+ return 0;
+#endif
+}
+
+WebRtc_Word16 ACMISAC::UpdateDecoderSampFreq(
+#ifdef WEBRTC_CODEC_ISAC
+ WebRtc_Word16 codecId) {
+ if (ACMCodecDB::kISAC == codecId) {
+ return WebRtcIsac_SetDecSampRate(_codecInstPtr->inst, kIsacWideband);
+ } else if (ACMCodecDB::kISACSWB == codecId) {
+ return WebRtcIsac_SetDecSampRate(_codecInstPtr->inst, kIsacSuperWideband);
+ } else {
+ return -1;
+ }
+#else
+ WebRtc_Word16 /* codecId */) {
+ return 0;
+#endif
+}
+
+WebRtc_Word16 ACMISAC::UpdateEncoderSampFreq(
+#ifdef WEBRTC_CODEC_ISAC
+ WebRtc_UWord16 encoderSampFreqHz) {
+ WebRtc_UWord16 currentSampRateHz;
+ EncoderSampFreq(currentSampRateHz);
+
+ if (currentSampRateHz != encoderSampFreqHz) {
+ if ((encoderSampFreqHz != 16000) && (encoderSampFreqHz != 32000)) {
+ return -1;
+ } else {
+ _inAudioIxRead = 0;
+ _inAudioIxWrite = 0;
+ _inTimestampIxWrite = 0;
+ if (encoderSampFreqHz == 16000) {
+ if (WebRtcIsac_SetEncSampRate(_codecInstPtr->inst, kIsacWideband) < 0) {
+ return -1;
}
- if(payloadType == _decoderParams32kHz.codecInstant.pltype)
- {
- memcpy(decParams, &_decoderParams32kHz,
- sizeof(WebRtcACMCodecParams));
- return true;
+ _samplesIn10MsAudio = 160;
+ } else {
+ if (WebRtcIsac_SetEncSampRate(_codecInstPtr->inst, kIsacSuperWideband)
+ < 0) {
+ return -1;
}
+ _samplesIn10MsAudio = 320;
+ }
+ _frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
+ _encoderParams.codecInstant.pacsize = _frameLenSmpl;
+ _encoderParams.codecInstant.plfreq = encoderSampFreqHz;
+ return 0;
}
- return false;
+ }
+#else
+ WebRtc_UWord16 /* codecId */) {
+#endif
+ return 0;
}
-void
-ACMISAC::SaveDecoderParamSafe(
- const WebRtcACMCodecParams* codecParams)
-{
- // set decoder sampling frequency.
- if(codecParams->codecInstant.plfreq == 32000)
- {
- memcpy(&_decoderParams32kHz, codecParams, sizeof(WebRtcACMCodecParams));
- }
- else
- {
- memcpy(&_decoderParams, codecParams, sizeof(WebRtcACMCodecParams));
- }
+WebRtc_Word16 ACMISAC::EncoderSampFreq(WebRtc_UWord16& sampFreqHz) {
+ IsacSamplingRate sampRate;
+ sampRate = ACM_ISAC_GETENCSAMPRATE(_codecInstPtr->inst);
+ if (sampRate == kIsacSuperWideband) {
+ sampFreqHz = 32000;
+ } else {
+ sampFreqHz = 16000;
+ }
+ return 0;
}
+WebRtc_Word32 ACMISAC::ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 initFrameSizeMsec,
+ const WebRtc_UWord16 initRateBitPerSec, const bool enforceFrameSize) {
+ WebRtc_Word16 status;
+ {
+ WebRtc_UWord16 sampFreqHz;
+ EncoderSampFreq(sampFreqHz);
+ // TODO(turajs): at 32kHz we hardcode calling with 30ms and enforce
+ // the frame-size otherwise we might get error. Revise if
+ // control-bwe is changed.
+ if (sampFreqHz == 32000) {
+ status = ACM_ISAC_CONTROL_BWE(_codecInstPtr->inst, initRateBitPerSec, 30,
+ 1);
+ } else {
+ status = ACM_ISAC_CONTROL_BWE(_codecInstPtr->inst, initRateBitPerSec,
+ initFrameSizeMsec,
+ enforceFrameSize ? 1 : 0);
+ }
+ }
+ if (status < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Couldn't config iSAC BWE.");
+ return -1;
+ }
+ UpdateFrameLen();
+ ACM_ISAC_GETSENDBITRATE(_codecInstPtr->inst, &_isacCurrentBN);
+ return 0;
+}
-WebRtc_Word16
-ACMISAC::REDPayloadISAC(
- const WebRtc_Word32 isacRate,
- const WebRtc_Word16 isacBwEstimate,
- WebRtc_UWord8* payload,
- WebRtc_Word16* payloadLenBytes)
-{
- WebRtc_Word16 status;
- ReadLockScoped rl(_codecWrapperLock);
- status = Transcode(payload, payloadLenBytes, isacBwEstimate, isacRate, true);
- return status;
+WebRtc_Word32 ACMISAC::SetISACMaxPayloadSize(
+ const WebRtc_UWord16 maxPayloadLenBytes) {
+ return ACM_ISAC_SETMAXPAYLOADSIZE(_codecInstPtr->inst, maxPayloadLenBytes);
+}
+
+WebRtc_Word32 ACMISAC::SetISACMaxRate(const WebRtc_UWord32 maxRateBitPerSec) {
+ return ACM_ISAC_SETMAXRATE(_codecInstPtr->inst, maxRateBitPerSec);
+}
+
+void ACMISAC::UpdateFrameLen() {
+ _frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
+ _encoderParams.codecInstant.pacsize = _frameLenSmpl;
+}
+
+void ACMISAC::CurrentRate(WebRtc_Word32& rateBitPerSec) {
+ if (_isacCodingMode == ADAPTIVE) {
+ ACM_ISAC_GETSENDBITRATE(_codecInstPtr->inst, &rateBitPerSec);
+ }
+}
+
+bool ACMISAC::DecoderParamsSafe(WebRtcACMCodecParams* decParams,
+ const WebRtc_UWord8 payloadType) {
+ if (_decoderInitialized) {
+ if (payloadType == _decoderParams.codecInstant.pltype) {
+ memcpy(decParams, &_decoderParams, sizeof(WebRtcACMCodecParams));
+ return true;
+ }
+ if (payloadType == _decoderParams32kHz.codecInstant.pltype) {
+ memcpy(decParams, &_decoderParams32kHz, sizeof(WebRtcACMCodecParams));
+ return true;
+ }
+ }
+ return false;
+}
+
+void ACMISAC::SaveDecoderParamSafe(const WebRtcACMCodecParams* codecParams) {
+ // set decoder sampling frequency.
+ if (codecParams->codecInstant.plfreq == 32000) {
+ memcpy(&_decoderParams32kHz, codecParams, sizeof(WebRtcACMCodecParams));
+ } else {
+ memcpy(&_decoderParams, codecParams, sizeof(WebRtcACMCodecParams));
+ }
+}
+
+WebRtc_Word16 ACMISAC::REDPayloadISAC(const WebRtc_Word32 isacRate,
+ const WebRtc_Word16 isacBwEstimate,
+ WebRtc_UWord8* payload,
+ WebRtc_Word16* payloadLenBytes) {
+ WebRtc_Word16 status;
+ ReadLockScoped rl(_codecWrapperLock);
+ status = Transcode(payload, payloadLenBytes, isacBwEstimate, isacRate, true);
+ return status;
}
#endif
-} // namespace webrtc
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_isac.h b/modules/audio_coding/main/source/acm_isac.h
index ee56a6e..ee22637 100644
--- a/modules/audio_coding/main/source/acm_isac.h
+++ b/modules/audio_coding/main/source/acm_isac.h
@@ -13,137 +13,112 @@
#include "acm_generic_codec.h"
-namespace webrtc
-{
+namespace webrtc {
struct ACMISACInst;
-enum iSACCodingMode {ADAPTIVE, CHANNEL_INDEPENDENT};
-
-
-class ACMISAC : public ACMGenericCodec
-{
-public:
- ACMISAC(WebRtc_Word16 codecID);
- ~ACMISAC();
- // for FEC
- ACMGenericCodec* CreateInstance(void);
-
- WebRtc_Word16 InternalEncode(
- WebRtc_UWord8* bitstream,
- WebRtc_Word16* bitStreamLenByte);
-
- WebRtc_Word16 InternalInitEncoder(
- WebRtcACMCodecParams *codecParams);
-
- WebRtc_Word16 InternalInitDecoder(
- WebRtcACMCodecParams *codecParams);
-
- WebRtc_Word16 DeliverCachedIsacData(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte,
- WebRtc_UWord32* timestamp,
- WebRtcACMEncodingType* encodingType,
- const WebRtc_UWord16 isacRate,
- const WebRtc_UWord8 isacBWestimate);
-
- WebRtc_Word16 DeliverCachedData(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16* /* bitStreamLenByte */,
- WebRtc_UWord32* /* timestamp */,
- WebRtcACMEncodingType* /* encodingType */)
- {
- return -1;
- }
-
- WebRtc_Word16 UpdateDecoderSampFreq(
- WebRtc_Word16 codecId);
-
- WebRtc_Word16 UpdateEncoderSampFreq(
- WebRtc_UWord16 sampFreqHz);
-
- WebRtc_Word16 EncoderSampFreq(
- WebRtc_UWord16& sampFreqHz);
-
- WebRtc_Word32 ConfigISACBandwidthEstimator(
- const WebRtc_UWord8 initFrameSizeMsec,
- const WebRtc_UWord16 initRateBitPerSec,
- const bool enforceFrameSize);
-
- WebRtc_Word32 SetISACMaxPayloadSize(
- const WebRtc_UWord16 maxPayloadLenBytes);
-
- WebRtc_Word32 SetISACMaxRate(
- const WebRtc_UWord32 maxRateBitPerSec);
-
- WebRtc_Word16 REDPayloadISAC(
- const WebRtc_Word32 isacRate,
- const WebRtc_Word16 isacBwEstimate,
- WebRtc_UWord8* payload,
- WebRtc_Word16* payloadLenBytes);
-
-protected:
- WebRtc_Word16 DecodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType);
-
- WebRtc_Word32 CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst);
-
- void DestructEncoderSafe();
-
- void DestructDecoderSafe();
-
- WebRtc_Word16 SetBitRateSafe(
- const WebRtc_Word32 bitRate);
-
- WebRtc_Word32 GetEstimatedBandwidthSafe();
-
- WebRtc_Word32 SetEstimatedBandwidthSafe(WebRtc_Word32 estimatedBandwidth);
-
- WebRtc_Word32 GetRedPayloadSafe(
- WebRtc_UWord8* redPayload,
- WebRtc_Word16* payloadBytes);
-
- WebRtc_Word16 InternalCreateEncoder();
-
- WebRtc_Word16 InternalCreateDecoder();
-
- void InternalDestructEncoderInst(
- void* ptrInst);
-
- WebRtc_Word16 Transcode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte,
- WebRtc_Word16 qBWE,
- WebRtc_Word32 rate,
- bool isRED);
-
- void CurrentRate(WebRtc_Word32& rateBitPerSec);
-
- void UpdateFrameLen();
-
- bool DecoderParamsSafe(
- WebRtcACMCodecParams *decParams,
- const WebRtc_UWord8 payloadType);
-
- void SaveDecoderParamSafe(
- const WebRtcACMCodecParams* codecParams);
-
- ACMISACInst* _codecInstPtr;
-
- bool _isEncInitialized;
- iSACCodingMode _isacCodingMode;
- bool _enforceFrameSize;
- WebRtc_Word32 _isacCurrentBN;
- WebRtc_UWord16 _samplesIn10MsAudio;
- WebRtcACMCodecParams _decoderParams32kHz;
+enum iSACCodingMode {
+ ADAPTIVE,
+ CHANNEL_INDEPENDENT
};
-} //namespace
+class ACMISAC : public ACMGenericCodec {
+ public:
+ ACMISAC(WebRtc_Word16 codecID);
+ ~ACMISAC();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 DeliverCachedIsacData(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_UWord32* timestamp,
+ WebRtcACMEncodingType* encodingType,
+ const WebRtc_UWord16 isacRate,
+ const WebRtc_UWord8 isacBWestimate);
+
+ WebRtc_Word16 DeliverCachedData(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */,
+ WebRtc_UWord32* /* timestamp */,
+ WebRtcACMEncodingType* /* encodingType */) {
+ return -1;
+ }
+
+ WebRtc_Word16 UpdateDecoderSampFreq(WebRtc_Word16 codecId);
+
+ WebRtc_Word16 UpdateEncoderSampFreq(WebRtc_UWord16 sampFreqHz);
+
+ WebRtc_Word16 EncoderSampFreq(WebRtc_UWord16& sampFreqHz);
+
+ WebRtc_Word32 ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 initFrameSizeMsec,
+ const WebRtc_UWord16 initRateBitPerSec, const bool enforceFrameSize);
+
+ WebRtc_Word32 SetISACMaxPayloadSize(const WebRtc_UWord16 maxPayloadLenBytes);
+
+ WebRtc_Word32 SetISACMaxRate(const WebRtc_UWord32 maxRateBitPerSec);
+
+ WebRtc_Word16 REDPayloadISAC(const WebRtc_Word32 isacRate,
+ const WebRtc_Word16 isacBwEstimate,
+ WebRtc_UWord8* payload,
+ WebRtc_Word16* payloadLenBytes);
+
+ protected:
+ WebRtc_Word16 DecodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte, WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 SetBitRateSafe(const WebRtc_Word32 bitRate);
+
+ WebRtc_Word32 GetEstimatedBandwidthSafe();
+
+ WebRtc_Word32 SetEstimatedBandwidthSafe(WebRtc_Word32 estimatedBandwidth);
+
+ WebRtc_Word32 GetRedPayloadSafe(WebRtc_UWord8* redPayload,
+ WebRtc_Word16* payloadBytes);
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(void* ptrInst);
+
+ WebRtc_Word16 Transcode(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte, WebRtc_Word16 qBWE,
+ WebRtc_Word32 rate, bool isRED);
+
+ void CurrentRate(WebRtc_Word32& rateBitPerSec);
+
+ void UpdateFrameLen();
+
+ bool DecoderParamsSafe(WebRtcACMCodecParams *decParams,
+ const WebRtc_UWord8 payloadType);
+
+ void SaveDecoderParamSafe(const WebRtcACMCodecParams* codecParams);
+
+ ACMISACInst* _codecInstPtr;
+ bool _isEncInitialized;
+ iSACCodingMode _isacCodingMode;
+ bool _enforceFrameSize;
+ WebRtc_Word32 _isacCurrentBN;
+ WebRtc_UWord16 _samplesIn10MsAudio;
+ WebRtcACMCodecParams _decoderParams32kHz;
+};
+
+} // namespace
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_ISAC_H_
diff --git a/modules/audio_coding/main/source/acm_isac_macros.h b/modules/audio_coding/main/source/acm_isac_macros.h
index 4c3842a..07031d6 100644
--- a/modules/audio_coding/main/source/acm_isac_macros.h
+++ b/modules/audio_coding/main/source/acm_isac_macros.h
@@ -13,62 +13,61 @@
#include "engine_configurations.h"
-namespace webrtc
-{
+namespace webrtc {
#ifdef WEBRTC_CODEC_ISAC
-# define ACM_ISAC_CREATE WebRtcIsac_Create
-# define ACM_ISAC_FREE WebRtcIsac_Free
-# define ACM_ISAC_ENCODERINIT WebRtcIsac_EncoderInit
-# define ACM_ISAC_ENCODE WebRtcIsac_Encode
-# define ACM_ISAC_DECODERINIT WebRtcIsac_DecoderInit
-# define ACM_ISAC_DECODE_BWE WebRtcIsac_UpdateBwEstimate
-# define ACM_ISAC_DECODE_B WebRtcIsac_Decode
-# define ACM_ISAC_DECODEPLC WebRtcIsac_DecodePlc
-# define ACM_ISAC_CONTROL WebRtcIsac_Control
-# define ACM_ISAC_CONTROL_BWE WebRtcIsac_ControlBwe
-# define ACM_ISAC_GETFRAMELEN WebRtcIsac_ReadFrameLen
-# define ACM_ISAC_GETERRORCODE WebRtcIsac_GetErrorCode
-# define ACM_ISAC_GETSENDBITRATE WebRtcIsac_GetUplinkBw
-# define ACM_ISAC_SETMAXPAYLOADSIZE WebRtcIsac_SetMaxPayloadSize
-# define ACM_ISAC_SETMAXRATE WebRtcIsac_SetMaxRate
-# define ACM_ISAC_GETNEWBITSTREAM WebRtcIsac_GetNewBitStream
-# define ACM_ISAC_GETSENDBWE WebRtcIsac_GetDownLinkBwIndex
-# define ACM_ISAC_SETBWE WebRtcIsac_UpdateUplinkBw
-# define ACM_ISAC_GETBWE WebRtcIsac_ReadBwIndex
-# define ACM_ISAC_GETNEWFRAMELEN WebRtcIsac_GetNewFrameLen
-# define ACM_ISAC_STRUCT ISACStruct
-# define ACM_ISAC_GETENCSAMPRATE WebRtcIsac_EncSampRate
-# define ACM_ISAC_GETDECSAMPRATE WebRtcIsac_DecSampRate
+#define ACM_ISAC_CREATE WebRtcIsac_Create
+#define ACM_ISAC_FREE WebRtcIsac_Free
+#define ACM_ISAC_ENCODERINIT WebRtcIsac_EncoderInit
+#define ACM_ISAC_ENCODE WebRtcIsac_Encode
+#define ACM_ISAC_DECODERINIT WebRtcIsac_DecoderInit
+#define ACM_ISAC_DECODE_BWE WebRtcIsac_UpdateBwEstimate
+#define ACM_ISAC_DECODE_B WebRtcIsac_Decode
+#define ACM_ISAC_DECODEPLC WebRtcIsac_DecodePlc
+#define ACM_ISAC_CONTROL WebRtcIsac_Control
+#define ACM_ISAC_CONTROL_BWE WebRtcIsac_ControlBwe
+#define ACM_ISAC_GETFRAMELEN WebRtcIsac_ReadFrameLen
+#define ACM_ISAC_GETERRORCODE WebRtcIsac_GetErrorCode
+#define ACM_ISAC_GETSENDBITRATE WebRtcIsac_GetUplinkBw
+#define ACM_ISAC_SETMAXPAYLOADSIZE WebRtcIsac_SetMaxPayloadSize
+#define ACM_ISAC_SETMAXRATE WebRtcIsac_SetMaxRate
+#define ACM_ISAC_GETNEWBITSTREAM WebRtcIsac_GetNewBitStream
+#define ACM_ISAC_GETSENDBWE WebRtcIsac_GetDownLinkBwIndex
+#define ACM_ISAC_SETBWE WebRtcIsac_UpdateUplinkBw
+#define ACM_ISAC_GETBWE WebRtcIsac_ReadBwIndex
+#define ACM_ISAC_GETNEWFRAMELEN WebRtcIsac_GetNewFrameLen
+#define ACM_ISAC_STRUCT ISACStruct
+#define ACM_ISAC_GETENCSAMPRATE WebRtcIsac_EncSampRate
+#define ACM_ISAC_GETDECSAMPRATE WebRtcIsac_DecSampRate
#endif
#ifdef WEBRTC_CODEC_ISACFX
-# define ACM_ISAC_CREATE WebRtcIsacfix_Create
-# define ACM_ISAC_FREE WebRtcIsacfix_Free
-# define ACM_ISAC_ENCODERINIT WebRtcIsacfix_EncoderInit
-# define ACM_ISAC_ENCODE WebRtcIsacfix_Encode
-# define ACM_ISAC_DECODERINIT WebRtcIsacfix_DecoderInit
-# define ACM_ISAC_DECODE_BWE WebRtcIsacfix_UpdateBwEstimate
-# define ACM_ISAC_DECODE_B WebRtcIsacfix_Decode
-# define ACM_ISAC_DECODEPLC WebRtcIsacfix_DecodePlc
-# define ACM_ISAC_CONTROL ACMISACFixControl // local Impl
-# define ACM_ISAC_CONTROL_BWE ACMISACFixControlBWE // local Impl
-# define ACM_ISAC_GETFRAMELEN WebRtcIsacfix_ReadFrameLen
-# define ACM_ISAC_GETERRORCODE WebRtcIsacfix_GetErrorCode
-# define ACM_ISAC_GETSENDBITRATE ACMISACFixGetSendBitrate // local Impl
-# define ACM_ISAC_SETMAXPAYLOADSIZE WebRtcIsacfix_SetMaxPayloadSize
-# define ACM_ISAC_SETMAXRATE WebRtcIsacfix_SetMaxRate
-# define ACM_ISAC_GETNEWBITSTREAM ACMISACFixGetNewBitstream // local Impl
-# define ACM_ISAC_GETSENDBWE ACMISACFixGetSendBWE // local Impl
-# define ACM_ISAC_SETBWE WebRtcIsacfix_UpdateUplinkBw
-# define ACM_ISAC_GETBWE WebRtcIsacfix_ReadBwIndex
-# define ACM_ISAC_GETNEWFRAMELEN WebRtcIsacfix_GetNewFrameLen
-# define ACM_ISAC_STRUCT ISACFIX_MainStruct
-# define ACM_ISAC_GETENCSAMPRATE ACMISACFixGetEncSampRate // local Impl
-# define ACM_ISAC_GETDECSAMPRATE ACMISACFixGetDecSampRate // local Impl
+#define ACM_ISAC_CREATE WebRtcIsacfix_Create
+#define ACM_ISAC_FREE WebRtcIsacfix_Free
+#define ACM_ISAC_ENCODERINIT WebRtcIsacfix_EncoderInit
+#define ACM_ISAC_ENCODE WebRtcIsacfix_Encode
+#define ACM_ISAC_DECODERINIT WebRtcIsacfix_DecoderInit
+#define ACM_ISAC_DECODE_BWE WebRtcIsacfix_UpdateBwEstimate
+#define ACM_ISAC_DECODE_B WebRtcIsacfix_Decode
+#define ACM_ISAC_DECODEPLC WebRtcIsacfix_DecodePlc
+#define ACM_ISAC_CONTROL ACMISACFixControl // local Impl
+#define ACM_ISAC_CONTROL_BWE ACMISACFixControlBWE // local Impl
+#define ACM_ISAC_GETFRAMELEN WebRtcIsacfix_ReadFrameLen
+#define ACM_ISAC_GETERRORCODE WebRtcIsacfix_GetErrorCode
+#define ACM_ISAC_GETSENDBITRATE ACMISACFixGetSendBitrate // local Impl
+#define ACM_ISAC_SETMAXPAYLOADSIZE WebRtcIsacfix_SetMaxPayloadSize
+#define ACM_ISAC_SETMAXRATE WebRtcIsacfix_SetMaxRate
+#define ACM_ISAC_GETNEWBITSTREAM ACMISACFixGetNewBitstream // local Impl
+#define ACM_ISAC_GETSENDBWE ACMISACFixGetSendBWE // local Impl
+#define ACM_ISAC_SETBWE WebRtcIsacfix_UpdateUplinkBw
+#define ACM_ISAC_GETBWE WebRtcIsacfix_ReadBwIndex
+#define ACM_ISAC_GETNEWFRAMELEN WebRtcIsacfix_GetNewFrameLen
+#define ACM_ISAC_STRUCT ISACFIX_MainStruct
+#define ACM_ISAC_GETENCSAMPRATE ACMISACFixGetEncSampRate // local Impl
+#define ACM_ISAC_GETDECSAMPRATE ACMISACFixGetDecSampRate // local Impl
#endif
-} //namespace
+} //namespace
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_ISAC_MACROS_H_
diff --git a/modules/audio_coding/main/source/acm_neteq.cc b/modules/audio_coding/main/source/acm_neteq.cc
index 71e541e..24ecba1 100644
--- a/modules/audio_coding/main/source/acm_neteq.cc
+++ b/modules/audio_coding/main/source/acm_neteq.cc
@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include <algorithm> // sort
#include <stdlib.h> // malloc
#include <vector>
@@ -23,40 +22,35 @@
#include "webrtc_neteq.h"
#include "webrtc_neteq_internal.h"
-namespace webrtc
-{
+namespace webrtc {
#define RTP_HEADER_SIZE 12
#define NETEQ_INIT_FREQ 8000
#define NETEQ_INIT_FREQ_KHZ (NETEQ_INIT_FREQ/1000)
#define NETEQ_ERR_MSG_LEN_BYTE (WEBRTC_NETEQ_MAX_ERROR_NAME + 1)
-
ACMNetEQ::ACMNetEQ()
-:
-_id(0),
-_currentSampFreqKHz(NETEQ_INIT_FREQ_KHZ),
-_avtPlayout(false),
-_playoutMode(voice),
-_netEqCritSect(CriticalSectionWrapper::CreateCriticalSection()),
-_vadStatus(false),
-_vadMode(VADNormal),
-_decodeLock(RWLockWrapper::CreateRWLock()),
-_numSlaves(0),
-_receivedStereo(false),
-_masterSlaveInfo(NULL),
-_previousAudioActivity(AudioFrame::kVadUnknown),
-_extraDelay(0),
-_callbackCritSect(CriticalSectionWrapper::CreateCriticalSection())
-{
- for(int n = 0; n < MAX_NUM_SLAVE_NETEQ + 1; n++)
- {
- _isInitialized[n] = false;
- _ptrVADInst[n] = NULL;
- _inst[n] = NULL;
- _instMem[n] = NULL;
- _netEqPacketBuffer[n] = NULL;
- }
+ : _id(0),
+ _currentSampFreqKHz(NETEQ_INIT_FREQ_KHZ),
+ _avtPlayout(false),
+ _playoutMode(voice),
+ _netEqCritSect(CriticalSectionWrapper::CreateCriticalSection()),
+ _vadStatus(false),
+ _vadMode(VADNormal),
+ _decodeLock(RWLockWrapper::CreateRWLock()),
+ _numSlaves(0),
+ _receivedStereo(false),
+ _masterSlaveInfo(NULL),
+ _previousAudioActivity(AudioFrame::kVadUnknown),
+ _extraDelay(0),
+ _callbackCritSect(CriticalSectionWrapper::CreateCriticalSection()) {
+ for (int n = 0; n < MAX_NUM_SLAVE_NETEQ + 1; n++) {
+ _isInitialized[n] = false;
+ _ptrVADInst[n] = NULL;
+ _inst[n] = NULL;
+ _instMem[n] = NULL;
+ _netEqPacketBuffer[n] = NULL;
+ }
}
ACMNetEQ::~ACMNetEQ() {
@@ -78,1015 +72,802 @@
}
}
-WebRtc_Word32
-ACMNetEQ::Init()
-{
- CriticalSectionScoped lock(_netEqCritSect);
+WebRtc_Word32 ACMNetEQ::Init() {
+ CriticalSectionScoped lock(_netEqCritSect);
- for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
- {
- if(InitByIdxSafe(idx) < 0)
- {
- return -1;
+ for (WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++) {
+ if (InitByIdxSafe(idx) < 0) {
+ return -1;
+ }
+ // delete VAD instance and start fresh if required.
+ if (_ptrVADInst[idx] != NULL) {
+ WebRtcVad_Free(_ptrVADInst[idx]);
+ _ptrVADInst[idx] = NULL;
+ }
+ if (_vadStatus) {
+ // Has to enable VAD
+ if (EnableVADByIdxSafe(idx) < 0) {
+ // Failed to enable VAD.
+ // Delete VAD instance, if it is created
+ if (_ptrVADInst[idx] != NULL) {
+ WebRtcVad_Free(_ptrVADInst[idx]);
+ _ptrVADInst[idx] = NULL;
}
- // delete VAD instance and start fresh if required.
- if(_ptrVADInst[idx] != NULL)
- {
- WebRtcVad_Free(_ptrVADInst[idx]);
- _ptrVADInst[idx] = NULL;
- }
- if(_vadStatus)
- {
- // Has to enable VAD
- if(EnableVADByIdxSafe(idx) < 0)
- {
- // Failed to enable VAD.
- // Delete VAD instance, if it is created
- if(_ptrVADInst[idx] != NULL)
- {
- WebRtcVad_Free(_ptrVADInst[idx]);
- _ptrVADInst[idx] = NULL;
- }
- // We are at initialization of NetEq, if failed to
- // enable VAD, we delete the NetEq instance.
- if (_instMem[idx] != NULL) {
- free(_instMem[idx]);
- _instMem[idx] = NULL;
- _inst[idx] = NULL;
- }
- _isInitialized[idx] = false;
- return -1;
- }
- }
- _isInitialized[idx] = true;
- }
- if (EnableVAD() == -1)
- {
- return -1;
- }
- return 0;
-}
-
-WebRtc_Word16
-ACMNetEQ::InitByIdxSafe(
- const WebRtc_Word16 idx)
-{
- int memorySizeBytes;
- if (WebRtcNetEQ_AssignSize(&memorySizeBytes) != 0)
- {
- LogError("AssignSize", idx);
- return -1;
- }
-
- if(_instMem[idx] != NULL)
- {
- free(_instMem[idx]);
- _instMem[idx] = NULL;
- }
- _instMem[idx] = malloc(memorySizeBytes);
- if (_instMem[idx] == NULL)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "InitByIdxSafe: NetEq Initialization error: could not allocate memory for NetEq");
- _isInitialized[idx] = false;
- return -1;
- }
- if (WebRtcNetEQ_Assign(&_inst[idx], _instMem[idx]) != 0)
- {
+ // We are at initialization of NetEq, if failed to
+ // enable VAD, we delete the NetEq instance.
if (_instMem[idx] != NULL) {
- free(_instMem[idx]);
- _instMem[idx] = NULL;
+ free(_instMem[idx]);
+ _instMem[idx] = NULL;
+ _inst[idx] = NULL;
}
- LogError("Assign", idx);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "InitByIdxSafe: NetEq Initialization error: could not Assign");
_isInitialized[idx] = false;
return -1;
- }
- if (WebRtcNetEQ_Init(_inst[idx], NETEQ_INIT_FREQ) != 0)
- {
- if (_instMem[idx] != NULL) {
- free(_instMem[idx]);
- _instMem[idx] = NULL;
- }
- LogError("Init", idx);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "InitByIdxSafe: NetEq Initialization error: could not initialize NetEq");
- _isInitialized[idx] = false;
- return -1;
+ }
}
_isInitialized[idx] = true;
- return 0;
+ }
+ if (EnableVAD() == -1) {
+ return -1;
+ }
+ return 0;
}
-WebRtc_Word16
-ACMNetEQ::EnableVADByIdxSafe(
- const WebRtc_Word16 idx)
-{
- if(_ptrVADInst[idx] == NULL)
- {
- if(WebRtcVad_Create(&_ptrVADInst[idx]) < 0)
- {
- _ptrVADInst[idx] = NULL;
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "EnableVADByIdxSafe: NetEq Initialization error: could not create VAD");
- return -1;
- }
- }
+WebRtc_Word16 ACMNetEQ::InitByIdxSafe(const WebRtc_Word16 idx) {
+ int memorySizeBytes;
+ if (WebRtcNetEQ_AssignSize(&memorySizeBytes) != 0) {
+ LogError("AssignSize", idx);
+ return -1;
+ }
- if(WebRtcNetEQ_SetVADInstance(_inst[idx], _ptrVADInst[idx],
- (WebRtcNetEQ_VADInitFunction) WebRtcVad_Init,
- (WebRtcNetEQ_VADSetmodeFunction) WebRtcVad_set_mode,
- (WebRtcNetEQ_VADFunction) WebRtcVad_Process) < 0)
- {
- LogError("setVADinstance", idx);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "EnableVADByIdxSafe: NetEq Initialization error: could not set VAD instance");
- return -1;
+ if (_instMem[idx] != NULL) {
+ free(_instMem[idx]);
+ _instMem[idx] = NULL;
+ }
+ _instMem[idx] = malloc(memorySizeBytes);
+ if (_instMem[idx] == NULL) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "InitByIdxSafe: NetEq Initialization error: could not allocate memory "
+ "for NetEq");
+ _isInitialized[idx] = false;
+ return -1;
+ }
+ if (WebRtcNetEQ_Assign(&_inst[idx], _instMem[idx]) != 0) {
+ if (_instMem[idx] != NULL) {
+ free(_instMem[idx]);
+ _instMem[idx] = NULL;
}
-
- if(WebRtcNetEQ_SetVADMode(_inst[idx], _vadMode) < 0)
- {
- LogError("setVADmode", idx);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "EnableVADByIdxSafe: NetEq Initialization error: could not set VAD mode");
- return -1;
+ LogError("Assign", idx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "InitByIdxSafe: NetEq Initialization error: could not Assign");
+ _isInitialized[idx] = false;
+ return -1;
+ }
+ if (WebRtcNetEQ_Init(_inst[idx], NETEQ_INIT_FREQ) != 0) {
+ if (_instMem[idx] != NULL) {
+ free(_instMem[idx]);
+ _instMem[idx] = NULL;
}
- return 0;
+ LogError("Init", idx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "InitByIdxSafe: NetEq Initialization error: could not initialize "
+ "NetEq");
+ _isInitialized[idx] = false;
+ return -1;
+ }
+ _isInitialized[idx] = true;
+ return 0;
}
+WebRtc_Word16 ACMNetEQ::EnableVADByIdxSafe(const WebRtc_Word16 idx) {
+ if (_ptrVADInst[idx] == NULL) {
+ if (WebRtcVad_Create(&_ptrVADInst[idx]) < 0) {
+ _ptrVADInst[idx] = NULL;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "EnableVADByIdxSafe: NetEq Initialization error: could not "
+ "create VAD");
+ return -1;
+ }
+ }
+ if (WebRtcNetEQ_SetVADInstance(
+ _inst[idx], _ptrVADInst[idx],
+ (WebRtcNetEQ_VADInitFunction) WebRtcVad_Init,
+ (WebRtcNetEQ_VADSetmodeFunction) WebRtcVad_set_mode,
+ (WebRtcNetEQ_VADFunction) WebRtcVad_Process) < 0) {
+ LogError("setVADinstance", idx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "EnableVADByIdxSafe: NetEq Initialization error: could not set "
+ "VAD instance");
+ return -1;
+ }
+ if (WebRtcNetEQ_SetVADMode(_inst[idx], _vadMode) < 0) {
+ LogError("setVADmode", idx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "EnableVADByIdxSafe: NetEq Initialization error: could not set "
+ "VAD mode");
+ return -1;
+ }
+ return 0;
+}
-WebRtc_Word32
-ACMNetEQ::AllocatePacketBuffer(
+WebRtc_Word32 ACMNetEQ::AllocatePacketBuffer(
const WebRtcNetEQDecoder* usedCodecs,
- WebRtc_Word16 noOfCodecs)
-{
- // Due to WebRtcNetEQ_GetRecommendedBufferSize
- // the following has to be int otherwise we will have compiler error
- // if not casted
+ WebRtc_Word16 noOfCodecs) {
+ // Due to WebRtcNetEQ_GetRecommendedBufferSize
+ // the following has to be int otherwise we will have compiler error
+ // if not casted
- CriticalSectionScoped lock(_netEqCritSect);
- for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
- {
- if(AllocatePacketBufferByIdxSafe(usedCodecs, noOfCodecs, idx) < 0)
- {
- return -1;
- }
+ CriticalSectionScoped lock(_netEqCritSect);
+ for (WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++) {
+ if (AllocatePacketBufferByIdxSafe(usedCodecs, noOfCodecs, idx) < 0) {
+ return -1;
}
- return 0;
+ }
+ return 0;
}
-WebRtc_Word16
-ACMNetEQ::AllocatePacketBufferByIdxSafe(
- const WebRtcNetEQDecoder* usedCodecs,
- WebRtc_Word16 noOfCodecs,
- const WebRtc_Word16 idx)
-{
- int maxNoPackets;
- int bufferSizeInBytes;
+WebRtc_Word16 ACMNetEQ::AllocatePacketBufferByIdxSafe(
+ const WebRtcNetEQDecoder* usedCodecs,
+ WebRtc_Word16 noOfCodecs,
+ const WebRtc_Word16 idx) {
+ int maxNoPackets;
+ int bufferSizeInBytes;
- if(!_isInitialized[idx])
- {
+ if (!_isInitialized[idx]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AllocatePacketBufferByIdxSafe: NetEq is not initialized.");
+ return -1;
+ }
+ if (WebRtcNetEQ_GetRecommendedBufferSize(_inst[idx], usedCodecs, noOfCodecs,
+ kTCPLargeJitter, &maxNoPackets,
+ &bufferSizeInBytes) != 0) {
+ LogError("GetRecommendedBufferSize", idx);
+ return -1;
+ }
+ if (_netEqPacketBuffer[idx] != NULL) {
+ free(_netEqPacketBuffer[idx]);
+ _netEqPacketBuffer[idx] = NULL;
+ }
+
+ _netEqPacketBuffer[idx] = (WebRtc_Word16 *) malloc(bufferSizeInBytes);
+ if (_netEqPacketBuffer[idx] == NULL) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AllocatePacketBufferByIdxSafe: NetEq Initialization error: could not "
+ "allocate memory for NetEq Packet Buffer");
+ return -1;
+
+ }
+ if (WebRtcNetEQ_AssignBuffer(_inst[idx], maxNoPackets,
+ _netEqPacketBuffer[idx],
+ bufferSizeInBytes) != 0) {
+ if (_netEqPacketBuffer[idx] != NULL) {
+ free(_netEqPacketBuffer[idx]);
+ _netEqPacketBuffer[idx] = NULL;
+ }
+ LogError("AssignBuffer", idx);
+ return -1;
+ }
+ return 0;
+}
+
+WebRtc_Word32 ACMNetEQ::SetExtraDelay(const WebRtc_Word32 delayInMS) {
+ CriticalSectionScoped lock(_netEqCritSect);
+
+ for (WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++) {
+ if (!_isInitialized[idx]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetExtraDelay: NetEq is not initialized.");
+ return -1;
+ }
+ if (WebRtcNetEQ_SetExtraDelay(_inst[idx], delayInMS) < 0) {
+ LogError("SetExtraDelay", idx);
+ return -1;
+ }
+ }
+ _extraDelay = delayInMS;
+ return 0;
+}
+
+WebRtc_Word32 ACMNetEQ::SetAVTPlayout(const bool enable) {
+ CriticalSectionScoped lock(_netEqCritSect);
+ if (_avtPlayout != enable) {
+ for (WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++) {
+ if (!_isInitialized[idx]) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "AllocatePacketBufferByIdxSafe: NetEq is not initialized.");
+ "SetAVTPlayout: NetEq is not initialized.");
return -1;
- }
- if (WebRtcNetEQ_GetRecommendedBufferSize(_inst[idx], usedCodecs, noOfCodecs,
- kTCPLargeJitter , &maxNoPackets, &bufferSizeInBytes)
- != 0)
- {
- LogError("GetRecommendedBufferSize", idx);
+ }
+ if (WebRtcNetEQ_SetAVTPlayout(_inst[idx], (enable) ? 1 : 0) < 0) {
+ LogError("SetAVTPlayout", idx);
return -1;
+ }
}
- if(_netEqPacketBuffer[idx] != NULL)
- {
- free(_netEqPacketBuffer[idx]);
- _netEqPacketBuffer[idx] = NULL;
- }
+ }
+ _avtPlayout = enable;
+ return 0;
+}
- _netEqPacketBuffer[idx] = (WebRtc_Word16 *)malloc(bufferSizeInBytes);
- if (_netEqPacketBuffer[idx] == NULL)
- {
+bool ACMNetEQ::AVTPlayout() const {
+ CriticalSectionScoped lock(_netEqCritSect);
+ return _avtPlayout;
+}
+
+WebRtc_Word32 ACMNetEQ::CurrentSampFreqHz() const {
+ CriticalSectionScoped lock(_netEqCritSect);
+ if (!_isInitialized[0]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "CurrentSampFreqHz: NetEq is not initialized.");
+ return -1;
+ }
+ return (WebRtc_Word32)(1000 * _currentSampFreqKHz);
+}
+
+WebRtc_Word32 ACMNetEQ::SetPlayoutMode(const AudioPlayoutMode mode) {
+ CriticalSectionScoped lock(_netEqCritSect);
+ if (_playoutMode != mode) {
+ for (WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++) {
+ if (!_isInitialized[idx]) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "AllocatePacketBufferByIdxSafe: NetEq Initialization error: could not allocate "
- "memory for NetEq Packet Buffer");
+ "SetPlayoutMode: NetEq is not initialized.");
return -1;
+ }
- }
- if (WebRtcNetEQ_AssignBuffer(_inst[idx], maxNoPackets, _netEqPacketBuffer[idx],
- bufferSizeInBytes) != 0)
- {
- if (_netEqPacketBuffer[idx] != NULL) {
- free(_netEqPacketBuffer[idx]);
- _netEqPacketBuffer[idx] = NULL;
- }
- LogError("AssignBuffer", idx);
+ enum WebRtcNetEQPlayoutMode playoutMode = kPlayoutOff;
+ switch (mode) {
+ case voice:
+ playoutMode = kPlayoutOn;
+ break;
+ case fax:
+ playoutMode = kPlayoutFax;
+ break;
+ case streaming:
+ playoutMode = kPlayoutStreaming;
+ break;
+ }
+ if (WebRtcNetEQ_SetPlayoutMode(_inst[idx], playoutMode) < 0) {
+ LogError("SetPlayoutMode", idx);
return -1;
+ }
}
- return 0;
+ _playoutMode = mode;
+ }
+
+ return 0;
}
+AudioPlayoutMode ACMNetEQ::PlayoutMode() const {
+ CriticalSectionScoped lock(_netEqCritSect);
+ return _playoutMode;
+}
-
-
-WebRtc_Word32
-ACMNetEQ::SetExtraDelay(
- const WebRtc_Word32 delayInMS)
-{
- CriticalSectionScoped lock(_netEqCritSect);
-
- for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
- {
- if(!_isInitialized[idx])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "SetExtraDelay: NetEq is not initialized.");
- return -1;
- }
- if(WebRtcNetEQ_SetExtraDelay(_inst[idx], delayInMS) < 0)
- {
- LogError("SetExtraDelay", idx);
- return -1;
- }
+WebRtc_Word32 ACMNetEQ::NetworkStatistics(
+ ACMNetworkStatistics* statistics) const {
+ WebRtcNetEQ_NetworkStatistics stats;
+ CriticalSectionScoped lock(_netEqCritSect);
+ if (!_isInitialized[0]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "NetworkStatistics: NetEq is not initialized.");
+ return -1;
+ }
+ if (WebRtcNetEQ_GetNetworkStatistics(_inst[0], &stats) == 0) {
+ statistics->currentAccelerateRate = stats.currentAccelerateRate;
+ statistics->currentBufferSize = stats.currentBufferSize;
+ statistics->jitterPeaksFound = (stats.jitterPeaksFound > 0);
+ statistics->currentDiscardRate = stats.currentDiscardRate;
+ statistics->currentExpandRate = stats.currentExpandRate;
+ statistics->currentPacketLossRate = stats.currentPacketLossRate;
+ statistics->currentPreemptiveRate = stats.currentPreemptiveRate;
+ statistics->preferredBufferSize = stats.preferredBufferSize;
+ statistics->clockDriftPPM = stats.clockDriftPPM;
+ } else {
+ LogError("getNetworkStatistics", 0);
+ return -1;
+ }
+ const int kArrayLen = 100;
+ int waiting_times[kArrayLen];
+ int waiting_times_len = WebRtcNetEQ_GetRawFrameWaitingTimes(_inst[0],
+ kArrayLen,
+ waiting_times);
+ if (waiting_times_len > 0) {
+ std::vector<int> waiting_times_vec(waiting_times,
+ waiting_times + waiting_times_len);
+ std::sort(waiting_times_vec.begin(), waiting_times_vec.end());
+ size_t size = waiting_times_vec.size();
+ assert(size == static_cast<size_t>(waiting_times_len));
+ if (size % 2 == 0) {
+ statistics->medianWaitingTimeMs = (waiting_times_vec[size / 2 - 1]
+ + waiting_times_vec[size / 2]) / 2;
+ } else {
+ statistics->medianWaitingTimeMs = waiting_times_vec[size / 2];
}
- _extraDelay = delayInMS;
- return 0;
-}
-
-
-WebRtc_Word32
-ACMNetEQ::SetAVTPlayout(
- const bool enable)
-{
- CriticalSectionScoped lock(_netEqCritSect);
- if (_avtPlayout != enable)
- {
- for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
- {
- if(!_isInitialized[idx])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "SetAVTPlayout: NetEq is not initialized.");
- return -1;
- }
- if(WebRtcNetEQ_SetAVTPlayout(_inst[idx], (enable) ? 1 : 0) < 0)
- {
- LogError("SetAVTPlayout", idx);
- return -1;
- }
- }
+ statistics->minWaitingTimeMs = waiting_times_vec.front();
+ statistics->maxWaitingTimeMs = waiting_times_vec.back();
+ double sum = 0;
+ for (size_t i = 0; i < size; ++i) {
+ sum += waiting_times_vec[i];
}
- _avtPlayout = enable;
- return 0;
+ statistics->meanWaitingTimeMs = static_cast<int>(sum / size);
+ } else if (waiting_times_len == 0) {
+ statistics->meanWaitingTimeMs = -1;
+ statistics->medianWaitingTimeMs = -1;
+ statistics->minWaitingTimeMs = -1;
+ statistics->maxWaitingTimeMs = -1;
+ } else {
+ LogError("getRawFrameWaitingTimes", 0);
+ return -1;
+ }
+ return 0;
}
+WebRtc_Word32 ACMNetEQ::RecIn(const WebRtc_UWord8* incomingPayload,
+ const WebRtc_Word32 payloadLength,
+ const WebRtcRTPHeader& rtpInfo) {
+ WebRtc_Word16 payload_length = static_cast<WebRtc_Word16>(payloadLength);
-bool
-ACMNetEQ::AVTPlayout() const
-{
- CriticalSectionScoped lock(_netEqCritSect);
- return _avtPlayout;
+ // translate to NetEq struct
+ WebRtcNetEQ_RTPInfo netEqRTPInfo;
+ netEqRTPInfo.payloadType = rtpInfo.header.payloadType;
+ netEqRTPInfo.sequenceNumber = rtpInfo.header.sequenceNumber;
+ netEqRTPInfo.timeStamp = rtpInfo.header.timestamp;
+ netEqRTPInfo.SSRC = rtpInfo.header.ssrc;
+ netEqRTPInfo.markerBit = rtpInfo.header.markerBit;
+
+ CriticalSectionScoped lock(_netEqCritSect);
+ // Down-cast the time to (32-6)-bit since we only care about
+ // the least significant bits. (32-6) bits cover 2^(32-6) = 67108864 ms.
+ // we masked 6 most significant bits of 32-bit so we don't loose resolution
+ // when do the following multiplication.
+ const WebRtc_UWord32 nowInMs =
+ static_cast<WebRtc_UWord32>(
+ TickTime::MillisecondTimestamp() & 0x03ffffff);
+ WebRtc_UWord32 recvTimestamp = static_cast<WebRtc_UWord32>(
+ _currentSampFreqKHz * nowInMs);
+
+ int status;
+ // In case of stereo payload, first half of the data should be pushed into
+ // master, and the second half into slave.
+ if (rtpInfo.type.Audio.channel == 2) {
+ payload_length = payload_length / 2;
+ }
+
+ // Check that master is initialized.
+ if (!_isInitialized[0]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecIn: NetEq is not initialized.");
+ return -1;
+ }
+ // PUSH into Master
+ status = WebRtcNetEQ_RecInRTPStruct(_inst[0], &netEqRTPInfo, incomingPayload,
+ payload_length, recvTimestamp);
+ if (status < 0) {
+ LogError("RecInRTPStruct", 0);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecIn: NetEq, error in pushing in Master");
+ return -1;
+ }
+
+ // If the received stream is stereo, insert second half of paket into slave.
+ if (rtpInfo.type.Audio.channel == 2) {
+ if (!_isInitialized[1]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecIn: NetEq is not initialized.");
+ return -1;
+ }
+ // PUSH into Slave
+ status = WebRtcNetEQ_RecInRTPStruct(_inst[1], &netEqRTPInfo,
+ &incomingPayload[payload_length],
+ payload_length, recvTimestamp);
+ if (status < 0) {
+ LogError("RecInRTPStruct", 1);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecIn: NetEq, error in pushing in Slave");
+ return -1;
+ }
+ }
+
+ return 0;
}
-WebRtc_Word32
-ACMNetEQ::CurrentSampFreqHz() const
-{
- CriticalSectionScoped lock(_netEqCritSect);
- if(!_isInitialized[0])
+WebRtc_Word32 ACMNetEQ::RecOut(AudioFrame& audioFrame) {
+ enum WebRtcNetEQOutputType type;
+ WebRtc_Word16 payloadLenSample;
+ enum WebRtcNetEQOutputType typeMaster;
+ enum WebRtcNetEQOutputType typeSlave;
+
+ WebRtc_Word16 payloadLenSampleSlave;
+
+ CriticalSectionScoped lockNetEq(_netEqCritSect);
+
+ if (!_receivedStereo) {
+ if (!_isInitialized[0]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecOut: NetEq is not initialized.");
+ return -1;
+ }
{
+ WriteLockScoped lockCodec(*_decodeLock);
+ if (WebRtcNetEQ_RecOut(_inst[0], &(audioFrame.data_[0]),
+ &payloadLenSample) != 0) {
+ LogError("RecOut", 0);
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "CurrentSampFreqHz: NetEq is not initialized.");
- return -1;
- }
- return (WebRtc_Word32)(1000*_currentSampFreqKHz);
-}
+ "RecOut: NetEq, error in pulling out for mono case");
-
-WebRtc_Word32
-ACMNetEQ::SetPlayoutMode(
- const AudioPlayoutMode mode)
-{
- CriticalSectionScoped lock(_netEqCritSect);
- if(_playoutMode != mode)
- {
- for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
- {
- if(!_isInitialized[idx])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "SetPlayoutMode: NetEq is not initialized.");
- return -1;
- }
-
- enum WebRtcNetEQPlayoutMode playoutMode = kPlayoutOff;
- switch(mode)
- {
- case voice:
- playoutMode = kPlayoutOn;
- break;
- case fax:
- playoutMode = kPlayoutFax;
- break;
- case streaming:
- playoutMode = kPlayoutStreaming;
- break;
- }
- if(WebRtcNetEQ_SetPlayoutMode(_inst[idx], playoutMode) < 0)
- {
- LogError("SetPlayoutMode", idx);
- return -1;
- }
+ // Check for errors that can be recovered from:
+ // RECOUT_ERROR_SAMPLEUNDERRUN = 2003
+ int errorCode = WebRtcNetEQ_GetErrorCode(_inst[0]);
+ if (errorCode != 2003) {
+ // Cannot recover; return an error
+ return -1;
}
- _playoutMode = mode;
+ }
}
-
- return 0;
-}
-
-AudioPlayoutMode
-ACMNetEQ::PlayoutMode() const
-{
- CriticalSectionScoped lock(_netEqCritSect);
- return _playoutMode;
-}
-
-
-WebRtc_Word32
-ACMNetEQ::NetworkStatistics(
- ACMNetworkStatistics* statistics) const
-{
- WebRtcNetEQ_NetworkStatistics stats;
- CriticalSectionScoped lock(_netEqCritSect);
- if(!_isInitialized[0])
+ WebRtcNetEQ_GetSpeechOutputType(_inst[0], &type);
+ audioFrame.num_channels_ = 1;
+ } else {
+ if (!_isInitialized[0] || !_isInitialized[1]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecOut: NetEq is not initialized.");
+ return -1;
+ }
+ WebRtc_Word16 payloadMaster[480];
+ WebRtc_Word16 payloadSlave[480];
{
+ WriteLockScoped lockCodec(*_decodeLock);
+ if (WebRtcNetEQ_RecOutMasterSlave(_inst[0], payloadMaster,
+ &payloadLenSample, _masterSlaveInfo, 1)
+ != 0) {
+ LogError("RecOutMasterSlave", 0);
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "NetworkStatistics: NetEq is not initialized.");
- return -1;
- }
- if(WebRtcNetEQ_GetNetworkStatistics(_inst[0], &stats) == 0)
- {
- statistics->currentAccelerateRate = stats.currentAccelerateRate;
- statistics->currentBufferSize = stats.currentBufferSize;
- statistics->jitterPeaksFound = (stats.jitterPeaksFound > 0);
- statistics->currentDiscardRate = stats.currentDiscardRate;
- statistics->currentExpandRate = stats.currentExpandRate;
- statistics->currentPacketLossRate = stats.currentPacketLossRate;
- statistics->currentPreemptiveRate = stats.currentPreemptiveRate;
- statistics->preferredBufferSize = stats.preferredBufferSize;
- statistics->clockDriftPPM = stats.clockDriftPPM;
- }
- else
- {
- LogError("getNetworkStatistics", 0);
- return -1;
- }
- const int kArrayLen = 100;
- int waiting_times[kArrayLen];
- int waiting_times_len = WebRtcNetEQ_GetRawFrameWaitingTimes(
- _inst[0], kArrayLen, waiting_times);
- if (waiting_times_len > 0)
- {
- std::vector<int> waiting_times_vec(waiting_times,
- waiting_times + waiting_times_len);
- std::sort(waiting_times_vec.begin(), waiting_times_vec.end());
- size_t size = waiting_times_vec.size();
- assert(size == static_cast<size_t>(waiting_times_len));
- if (size % 2 == 0)
- {
- statistics->medianWaitingTimeMs =
- (waiting_times_vec[size / 2 - 1] +
- waiting_times_vec[size / 2]) / 2;
+ "RecOut: NetEq, error in pulling out for master");
+
+ // Check for errors that can be recovered from:
+ // RECOUT_ERROR_SAMPLEUNDERRUN = 2003
+ int errorCode = WebRtcNetEQ_GetErrorCode(_inst[0]);
+ if (errorCode != 2003) {
+ // Cannot recover; return an error
+ return -1;
}
- else
- {
- statistics->medianWaitingTimeMs = waiting_times_vec[size / 2];
- }
- statistics->minWaitingTimeMs = waiting_times_vec.front();
- statistics->maxWaitingTimeMs = waiting_times_vec.back();
- double sum = 0;
- for (size_t i = 0; i < size; ++i) {
- sum += waiting_times_vec[i];
- }
- statistics->meanWaitingTimeMs = static_cast<int>(sum / size);
- }
- else if (waiting_times_len == 0)
- {
- statistics->meanWaitingTimeMs = -1;
- statistics->medianWaitingTimeMs = -1;
- statistics->minWaitingTimeMs = -1;
- statistics->maxWaitingTimeMs = -1;
- }
- else
- {
- LogError("getRawFrameWaitingTimes", 0);
- return -1;
- }
- return 0;
-}
-
-WebRtc_Word32
-ACMNetEQ::RecIn(
- const WebRtc_UWord8* incomingPayload,
- const WebRtc_Word32 payloadLength,
- const WebRtcRTPHeader& rtpInfo)
-{
- WebRtc_Word16 payload_length = static_cast<WebRtc_Word16>(payloadLength);
-
- // translate to NetEq struct
- WebRtcNetEQ_RTPInfo netEqRTPInfo;
- netEqRTPInfo.payloadType = rtpInfo.header.payloadType;
- netEqRTPInfo.sequenceNumber = rtpInfo.header.sequenceNumber;
- netEqRTPInfo.timeStamp = rtpInfo.header.timestamp;
- netEqRTPInfo.SSRC = rtpInfo.header.ssrc;
- netEqRTPInfo.markerBit = rtpInfo.header.markerBit;
-
- CriticalSectionScoped lock(_netEqCritSect);
- // Down-cast the time to (32-6)-bit since we only care about
- // the least significant bits. (32-6) bits cover 2^(32-6) = 67108864 ms.
- // we masked 6 most significant bits of 32-bit so we don't loose resolution
- // when do the following multiplication.
- const WebRtc_UWord32 nowInMs = static_cast<WebRtc_UWord32>(
- TickTime::MillisecondTimestamp() & 0x03ffffff);
- WebRtc_UWord32 recvTimestamp = static_cast<WebRtc_UWord32>
- (_currentSampFreqKHz * nowInMs);
-
- int status;
- // In case of stereo payload, first half of the data should be pushed into
- // master, and the second half into slave.
- if (rtpInfo.type.Audio.channel == 2) {
- payload_length = payload_length / 2;
- }
-
- // Check that master is initialized.
- if(!_isInitialized[0])
- {
+ }
+ if (WebRtcNetEQ_RecOutMasterSlave(_inst[1], payloadSlave,
+ &payloadLenSampleSlave,
+ _masterSlaveInfo, 0) != 0) {
+ LogError("RecOutMasterSlave", 1);
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "RecIn: NetEq is not initialized.");
- return -1;
- }
- // PUSH into Master
- status = WebRtcNetEQ_RecInRTPStruct(_inst[0], &netEqRTPInfo,
- incomingPayload, payload_length, recvTimestamp);
- if(status < 0)
- {
- LogError("RecInRTPStruct", 0);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "RecIn: NetEq, error in pushing in Master");
- return -1;
+ "RecOut: NetEq, error in pulling out for slave");
+
+ // Check for errors that can be recovered from:
+ // RECOUT_ERROR_SAMPLEUNDERRUN = 2003
+ int errorCode = WebRtcNetEQ_GetErrorCode(_inst[1]);
+ if (errorCode != 2003) {
+ // Cannot recover; return an error
+ return -1;
+ }
+ }
}
- // If the received stream is stereo, insert second half of paket into slave.
- if(rtpInfo.type.Audio.channel == 2)
- {
- if(!_isInitialized[1])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "RecIn: NetEq is not initialized.");
- return -1;
- }
- // PUSH into Slave
- status = WebRtcNetEQ_RecInRTPStruct(_inst[1], &netEqRTPInfo,
- &incomingPayload[payload_length],
- payload_length,
- recvTimestamp);
- if(status < 0)
- {
- LogError("RecInRTPStruct", 1);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "RecIn: NetEq, error in pushing in Slave");
- return -1;
- }
+ if (payloadLenSample != payloadLenSampleSlave) {
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
+ "RecOut: mismatch between the lenght of the decoded audio by Master "
+ "(%d samples) and Slave (%d samples).",
+ payloadLenSample, payloadLenSampleSlave);
+ if (payloadLenSample > payloadLenSampleSlave) {
+ memset(&payloadSlave[payloadLenSampleSlave], 0,
+ (payloadLenSample - payloadLenSampleSlave) * sizeof(WebRtc_Word16));
+ }
}
- return 0;
-}
-
-WebRtc_Word32
-ACMNetEQ::RecOut(
- AudioFrame& audioFrame)
-{
- enum WebRtcNetEQOutputType type;
- WebRtc_Word16 payloadLenSample;
- enum WebRtcNetEQOutputType typeMaster;
- enum WebRtcNetEQOutputType typeSlave;
-
- WebRtc_Word16 payloadLenSampleSlave;
-
- CriticalSectionScoped lockNetEq(_netEqCritSect);
-
- if(!_receivedStereo)
- {
- if(!_isInitialized[0])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "RecOut: NetEq is not initialized.");
- return -1;
- }
- {
- WriteLockScoped lockCodec(*_decodeLock);
- if(WebRtcNetEQ_RecOut(_inst[0], &(audioFrame.data_[0]),
- &payloadLenSample) != 0)
- {
- LogError("RecOut", 0);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "RecOut: NetEq, error in pulling out for mono case");
-
- // Check for errors that can be recovered from:
- // RECOUT_ERROR_SAMPLEUNDERRUN = 2003
- int errorCode = WebRtcNetEQ_GetErrorCode(_inst[0]);
- if(errorCode != 2003)
- {
- // Cannot recover; return an error
- return -1;
- }
- }
- }
- WebRtcNetEQ_GetSpeechOutputType(_inst[0], &type);
- audioFrame.num_channels_ = 1;
+ for (WebRtc_Word16 n = 0; n < payloadLenSample; n++) {
+ audioFrame.data_[n << 1] = payloadMaster[n];
+ audioFrame.data_[(n << 1) + 1] = payloadSlave[n];
}
- else
- {
- if(!_isInitialized[0] || !_isInitialized[1])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "RecOut: NetEq is not initialized.");
- return -1;
- }
- WebRtc_Word16 payloadMaster[480];
- WebRtc_Word16 payloadSlave[480];
- {
- WriteLockScoped lockCodec(*_decodeLock);
- if(WebRtcNetEQ_RecOutMasterSlave(_inst[0], payloadMaster,
- &payloadLenSample, _masterSlaveInfo, 1) != 0)
- {
- LogError("RecOutMasterSlave", 0);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "RecOut: NetEq, error in pulling out for master");
+ audioFrame.num_channels_ = 2;
- // Check for errors that can be recovered from:
- // RECOUT_ERROR_SAMPLEUNDERRUN = 2003
- int errorCode = WebRtcNetEQ_GetErrorCode(_inst[0]);
- if(errorCode != 2003)
- {
- // Cannot recover; return an error
- return -1;
- }
- }
- if(WebRtcNetEQ_RecOutMasterSlave(_inst[1], payloadSlave,
- &payloadLenSampleSlave, _masterSlaveInfo, 0) != 0)
- {
- LogError("RecOutMasterSlave", 1);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "RecOut: NetEq, error in pulling out for slave");
-
- // Check for errors that can be recovered from:
- // RECOUT_ERROR_SAMPLEUNDERRUN = 2003
- int errorCode = WebRtcNetEQ_GetErrorCode(_inst[1]);
- if(errorCode != 2003)
- {
- // Cannot recover; return an error
- return -1;
- }
- }
- }
-
- if(payloadLenSample != payloadLenSampleSlave)
- {
- WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
- "RecOut: mismatch between the lenght of the decoded \
-audio by Master (%d samples) and Slave (%d samples).",
- payloadLenSample, payloadLenSampleSlave);
- if(payloadLenSample > payloadLenSampleSlave)
- {
- memset(&payloadSlave[payloadLenSampleSlave], 0,
- (payloadLenSample - payloadLenSampleSlave) * sizeof(WebRtc_Word16));
- }
- }
-
- for(WebRtc_Word16 n = 0; n < payloadLenSample; n++)
- {
- audioFrame.data_[n<<1] = payloadMaster[n];
- audioFrame.data_[(n<<1)+1] = payloadSlave[n];
- }
- audioFrame.num_channels_ = 2;
-
- WebRtcNetEQ_GetSpeechOutputType(_inst[0], &typeMaster);
- WebRtcNetEQ_GetSpeechOutputType(_inst[1], &typeSlave);
- if((typeMaster == kOutputNormal) ||
- (typeSlave == kOutputNormal))
- {
- type = kOutputNormal;
- }
- else
- {
- type = typeMaster;
- }
+ WebRtcNetEQ_GetSpeechOutputType(_inst[0], &typeMaster);
+ WebRtcNetEQ_GetSpeechOutputType(_inst[1], &typeSlave);
+ if ((typeMaster == kOutputNormal) || (typeSlave == kOutputNormal)) {
+ type = kOutputNormal;
+ } else {
+ type = typeMaster;
}
+ }
- audioFrame.samples_per_channel_ = static_cast<WebRtc_UWord16>(payloadLenSample);
- // NetEq always returns 10 ms of audio.
- _currentSampFreqKHz = static_cast<float>(audioFrame.samples_per_channel_) / 10.0f;
- audioFrame.sample_rate_hz_ = audioFrame.samples_per_channel_ * 100;
- if(_vadStatus)
- {
- if(type == kOutputVADPassive)
- {
- audioFrame.vad_activity_ = AudioFrame::kVadPassive;
- audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
- }
- else if(type == kOutputNormal)
- {
- audioFrame.vad_activity_ = AudioFrame::kVadActive;
- audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
- }
- else if(type == kOutputPLC)
- {
- audioFrame.vad_activity_ = _previousAudioActivity;
- audioFrame.speech_type_ = AudioFrame::kPLC;
- }
- else if(type == kOutputCNG)
- {
- audioFrame.vad_activity_ = AudioFrame::kVadPassive;
- audioFrame.speech_type_ = AudioFrame::kCNG;
- }
- else
- {
- audioFrame.vad_activity_ = AudioFrame::kVadPassive;
- audioFrame.speech_type_ = AudioFrame::kPLCCNG;
- }
+ audioFrame.samples_per_channel_ =
+ static_cast<WebRtc_UWord16>(payloadLenSample);
+ // NetEq always returns 10 ms of audio.
+ _currentSampFreqKHz =
+ static_cast<float>(audioFrame.samples_per_channel_) / 10.0f;
+ audioFrame.sample_rate_hz_ = audioFrame.samples_per_channel_ * 100;
+ if (_vadStatus) {
+ if (type == kOutputVADPassive) {
+ audioFrame.vad_activity_ = AudioFrame::kVadPassive;
+ audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
+ } else if (type == kOutputNormal) {
+ audioFrame.vad_activity_ = AudioFrame::kVadActive;
+ audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
+ } else if (type == kOutputPLC) {
+ audioFrame.vad_activity_ = _previousAudioActivity;
+ audioFrame.speech_type_ = AudioFrame::kPLC;
+ } else if (type == kOutputCNG) {
+ audioFrame.vad_activity_ = AudioFrame::kVadPassive;
+ audioFrame.speech_type_ = AudioFrame::kCNG;
+ } else {
+ audioFrame.vad_activity_ = AudioFrame::kVadPassive;
+ audioFrame.speech_type_ = AudioFrame::kPLCCNG;
}
- else
- {
- // Always return kVadUnknown when receive VAD is inactive
- audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
-
- if(type == kOutputNormal)
- {
- audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
- }
- else if(type == kOutputPLC)
- {
- audioFrame.speech_type_ = AudioFrame::kPLC;
- }
- else if(type == kOutputPLCtoCNG)
- {
- audioFrame.speech_type_ = AudioFrame::kPLCCNG;
- }
- else if(type == kOutputCNG)
- {
- audioFrame.speech_type_ = AudioFrame::kCNG;
- }
- else
- {
- // type is kOutputVADPassive which
- // we don't expect to get if _vadStatus is false
- WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
- "RecOut: NetEq returned kVadPassive while _vadStatus is false.");
- audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
- audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
- }
+ } else {
+ // Always return kVadUnknown when receive VAD is inactive
+ audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
+ if (type == kOutputNormal) {
+ audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
+ } else if (type == kOutputPLC) {
+ audioFrame.speech_type_ = AudioFrame::kPLC;
+ } else if (type == kOutputPLCtoCNG) {
+ audioFrame.speech_type_ = AudioFrame::kPLCCNG;
+ } else if (type == kOutputCNG) {
+ audioFrame.speech_type_ = AudioFrame::kCNG;
+ } else {
+ // type is kOutputVADPassive which
+ // we don't expect to get if _vadStatus is false
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
+ "RecOut: NetEq returned kVadPassive while _vadStatus is false.");
+ audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
+ audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
}
- _previousAudioActivity = audioFrame.vad_activity_;
+ }
+ _previousAudioActivity = audioFrame.vad_activity_;
- return 0;
+ return 0;
}
// When ACMGenericCodec has set the codec specific parameters in codecDef
// it calls AddCodec() to add the new codec to the NetEQ database.
-WebRtc_Word32
-ACMNetEQ::AddCodec(
- WebRtcNetEQ_CodecDef* codecDef,
- bool toMaster)
-{
- if (codecDef == NULL)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "ACMNetEQ::AddCodec: error, codecDef is NULL");
- return -1;
- }
- CriticalSectionScoped lock(_netEqCritSect);
+WebRtc_Word32 ACMNetEQ::AddCodec(WebRtcNetEQ_CodecDef* codecDef,
+ bool toMaster) {
+ if (codecDef == NULL) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "ACMNetEQ::AddCodec: error, codecDef is NULL");
+ return -1;
+ }
+ CriticalSectionScoped lock(_netEqCritSect);
- WebRtc_Word16 idx;
- if(toMaster)
- {
- idx = 0;
- }
- else
- {
- idx = 1;
- }
+ WebRtc_Word16 idx;
+ if (toMaster) {
+ idx = 0;
+ } else {
+ idx = 1;
+ }
- if(!_isInitialized[idx])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "ACMNetEQ::AddCodec: NetEq is not initialized.");
- return -1;
- }
- if(WebRtcNetEQ_CodecDbAdd(_inst[idx], codecDef) < 0)
- {
- LogError("CodecDB_Add", idx);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "ACMNetEQ::AddCodec: NetEq, error in adding codec");
- return -1;
- }
- else
- {
- return 0;
- }
+ if (!_isInitialized[idx]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "ACMNetEQ::AddCodec: NetEq is not initialized.");
+ return -1;
+ }
+ if (WebRtcNetEQ_CodecDbAdd(_inst[idx], codecDef) < 0) {
+ LogError("CodecDB_Add", idx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "ACMNetEQ::AddCodec: NetEq, error in adding codec");
+ return -1;
+ } else {
+ return 0;
+ }
}
// Creates a Word16 RTP packet out of a Word8 payload and an rtp info struct.
// Must be byte order safe.
-void
-ACMNetEQ::RTPPack(
- WebRtc_Word16* rtpPacket,
- const WebRtc_Word8* payload,
- const WebRtc_Word32 payloadLengthW8,
- const WebRtcRTPHeader& rtpInfo)
-{
- WebRtc_Word32 idx = 0;
- WEBRTC_SPL_SET_BYTE(rtpPacket, (WebRtc_Word8)0x80, idx);
+void ACMNetEQ::RTPPack(WebRtc_Word16* rtpPacket, const WebRtc_Word8* payload,
+ const WebRtc_Word32 payloadLengthW8,
+ const WebRtcRTPHeader& rtpInfo) {
+ WebRtc_Word32 idx = 0;
+ WEBRTC_SPL_SET_BYTE(rtpPacket, (WebRtc_Word8) 0x80, idx);
+ idx++;
+ WEBRTC_SPL_SET_BYTE(rtpPacket, rtpInfo.header.payloadType, idx);
+ idx++;
+ WEBRTC_SPL_SET_BYTE(rtpPacket,
+ WEBRTC_SPL_GET_BYTE(&(rtpInfo.header.sequenceNumber), 1),
+ idx);
+ idx++;
+ WEBRTC_SPL_SET_BYTE(rtpPacket,
+ WEBRTC_SPL_GET_BYTE(&(rtpInfo.header.sequenceNumber), 0),
+ idx);
+ idx++;
+ WEBRTC_SPL_SET_BYTE(rtpPacket,
+ WEBRTC_SPL_GET_BYTE(&(rtpInfo.header.timestamp), 3), idx);
+ idx++;
+ WEBRTC_SPL_SET_BYTE(rtpPacket,
+ WEBRTC_SPL_GET_BYTE(&(rtpInfo.header.timestamp), 2), idx);
+ idx++;
+ WEBRTC_SPL_SET_BYTE(rtpPacket,
+ WEBRTC_SPL_GET_BYTE(&(rtpInfo.header.timestamp), 1), idx);
+ idx++;
+ WEBRTC_SPL_SET_BYTE(rtpPacket,
+ WEBRTC_SPL_GET_BYTE(&(rtpInfo.header.timestamp), 0), idx);
+ idx++;
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(&(rtpInfo.header.ssrc), 3),
+ idx);
+ idx++;
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(&(rtpInfo.header.ssrc), 2),
+ idx);
+ idx++;
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(&(rtpInfo.header.ssrc), 1),
+ idx);
+ idx++;
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(&(rtpInfo.header.ssrc), 0),
+ idx);
+ idx++;
+ for (WebRtc_Word16 i = 0; i < payloadLengthW8; i++) {
+ WEBRTC_SPL_SET_BYTE(rtpPacket, payload[i], idx);
idx++;
-
- WEBRTC_SPL_SET_BYTE(rtpPacket, rtpInfo.header.payloadType, idx);
- idx++;
-
- WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
- &(rtpInfo.header.sequenceNumber), 1), idx);
- idx++;
-
- WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
- &(rtpInfo.header.sequenceNumber), 0), idx);
- idx++;
-
- WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
- &(rtpInfo.header.timestamp), 3), idx);
- idx++;
-
- WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
- &(rtpInfo.header.timestamp), 2), idx);
- idx++;
-
- WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
- &(rtpInfo.header.timestamp), 1), idx);
- idx++;
-
- WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
- &(rtpInfo.header.timestamp), 0), idx);
- idx++;
-
- WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
- &(rtpInfo.header.ssrc), 3), idx);
- idx++;
-
- WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
- &(rtpInfo.header.ssrc), 2), idx);
- idx++;
-
- WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
- &(rtpInfo.header.ssrc), 1), idx);
- idx++;
-
- WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
- &(rtpInfo.header.ssrc), 0), idx);
- idx++;
-
- for (WebRtc_Word16 i=0; i < payloadLengthW8; i++)
- {
- WEBRTC_SPL_SET_BYTE(rtpPacket, payload[i], idx);
- idx++;
- }
- if (payloadLengthW8 & 1)
- {
- // Our 16 bits buffer is one byte too large, set that
- // last byte to zero.
- WEBRTC_SPL_SET_BYTE(rtpPacket, 0x0, idx);
- }
+ }
+ if (payloadLengthW8 & 1) {
+ // Our 16 bits buffer is one byte too large, set that
+ // last byte to zero.
+ WEBRTC_SPL_SET_BYTE(rtpPacket, 0x0, idx);
+ }
}
-WebRtc_Word16
-ACMNetEQ::EnableVAD()
-{
- CriticalSectionScoped lock(_netEqCritSect);
- if (_vadStatus)
- {
- return 0;
- }
- for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
- {
- if(!_isInitialized[idx])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "SetVADStatus: NetEq is not initialized.");
- return -1;
- }
- // VAD was off and we have to turn it on
- if(EnableVADByIdxSafe(idx) < 0)
- {
- return -1;
- }
-
- // Set previous VAD status to PASSIVE
- _previousAudioActivity = AudioFrame::kVadPassive;
- }
- _vadStatus = true;
+WebRtc_Word16 ACMNetEQ::EnableVAD() {
+ CriticalSectionScoped lock(_netEqCritSect);
+ if (_vadStatus) {
return 0;
+ }
+ for (WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++) {
+ if (!_isInitialized[idx]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetVADStatus: NetEq is not initialized.");
+ return -1;
+ }
+ // VAD was off and we have to turn it on
+ if (EnableVADByIdxSafe(idx) < 0) {
+ return -1;
+ }
+
+ // Set previous VAD status to PASSIVE
+ _previousAudioActivity = AudioFrame::kVadPassive;
+ }
+ _vadStatus = true;
+ return 0;
}
-
-ACMVADMode
-ACMNetEQ::VADMode() const
-{
- CriticalSectionScoped lock(_netEqCritSect);
- return _vadMode;
+ACMVADMode ACMNetEQ::VADMode() const {
+ CriticalSectionScoped lock(_netEqCritSect);
+ return _vadMode;
}
-
-WebRtc_Word16
-ACMNetEQ::SetVADMode(
- const ACMVADMode mode)
-{
- CriticalSectionScoped lock(_netEqCritSect);
- if((mode < VADNormal) || (mode > VADVeryAggr))
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "SetVADMode: NetEq error: could not set VAD mode, mode is not supported");
- return -1;
- }
- else
- {
- for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
- {
- if(!_isInitialized[idx])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "SetVADMode: NetEq is not initialized.");
- return -1;
- }
- if(WebRtcNetEQ_SetVADMode(_inst[idx], mode) < 0)
- {
- LogError("SetVADmode", idx);
- return -1;
- }
- }
- _vadMode = mode;
- return 0;
- }
-}
-
-
-WebRtc_Word32
-ACMNetEQ::FlushBuffers()
-{
- CriticalSectionScoped lock(_netEqCritSect);
- for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
- {
- if(!_isInitialized[idx])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "FlushBuffers: NetEq is not initialized.");
- return -1;
- }
- if(WebRtcNetEQ_FlushBuffers(_inst[idx]) < 0)
- {
- LogError("FlushBuffers", idx);
- return -1;
- }
- }
- return 0;
-}
-
-WebRtc_Word16
-ACMNetEQ::RemoveCodec(
- WebRtcNetEQDecoder codecIdx,
- bool isStereo)
-{
- // sanity check
- if((codecIdx <= kDecoderReservedStart) ||
- (codecIdx >= kDecoderReservedEnd))
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "RemoveCodec: NetEq error: could not Remove Codec, codec index out of range");
- return -1;
- }
- CriticalSectionScoped lock(_netEqCritSect);
- if(!_isInitialized[0])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "RemoveCodec: NetEq is not initialized.");
- return -1;
- }
-
- if(WebRtcNetEQ_CodecDbRemove(_inst[0], codecIdx) < 0)
- {
- LogError("CodecDB_Remove", 0);
- return -1;
- }
-
- if(isStereo)
- {
- if(WebRtcNetEQ_CodecDbRemove(_inst[1], codecIdx) < 0)
- {
- LogError("CodecDB_Remove", 1);
- return -1;
- }
- }
-
- return 0;
-}
-
-WebRtc_Word16
-ACMNetEQ::SetBackgroundNoiseMode(
- const ACMBackgroundNoiseMode mode)
-{
- CriticalSectionScoped lock(_netEqCritSect);
- for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
- {
- if(!_isInitialized[idx])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "SetBackgroundNoiseMode: NetEq is not initialized.");
- return -1;
- }
- if(WebRtcNetEQ_SetBGNMode(_inst[idx], (WebRtcNetEQBGNMode)mode) < 0)
- {
- LogError("SetBGNMode", idx);
- return -1;
- }
- }
- return 0;
-}
-
-WebRtc_Word16
-ACMNetEQ::BackgroundNoiseMode(
- ACMBackgroundNoiseMode& mode)
-{
- WebRtcNetEQBGNMode myMode;
- CriticalSectionScoped lock(_netEqCritSect);
- if(!_isInitialized[0])
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "BackgroundNoiseMode: NetEq is not initialized.");
- return -1;
- }
- if(WebRtcNetEQ_GetBGNMode(_inst[0], &myMode) < 0)
- {
- LogError("WebRtcNetEQ_GetBGNMode", 0);
- return -1;
- }
- else
- {
- mode = (ACMBackgroundNoiseMode)myMode;
- }
- return 0;
-}
-
-void
-ACMNetEQ::SetUniqueId(
- WebRtc_Word32 id)
-{
- CriticalSectionScoped lock(_netEqCritSect);
- _id = id;
-}
-
-
-void
-ACMNetEQ::LogError(
- const char* neteqFuncName,
- const WebRtc_Word16 idx) const
-{
- char errorName[NETEQ_ERR_MSG_LEN_BYTE];
- char myFuncName[50];
- int neteqErrorCode = WebRtcNetEQ_GetErrorCode(_inst[idx]);
- WebRtcNetEQ_GetErrorName(neteqErrorCode, errorName, NETEQ_ERR_MSG_LEN_BYTE - 1);
- strncpy(myFuncName, neteqFuncName, 49);
- errorName[NETEQ_ERR_MSG_LEN_BYTE - 1] = '\0';
- myFuncName[49] = '\0';
+WebRtc_Word16 ACMNetEQ::SetVADMode(const ACMVADMode mode) {
+ CriticalSectionScoped lock(_netEqCritSect);
+ if ((mode < VADNormal) || (mode > VADVeryAggr)) {
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "NetEq-%d Error in function %s, error-code: %d, error-string: %s",
- idx,
- myFuncName,
- neteqErrorCode,
- errorName);
+ "SetVADMode: NetEq error: could not set VAD mode, mode is not "
+ "supported");
+ return -1;
+ } else {
+ for (WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++) {
+ if (!_isInitialized[idx]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetVADMode: NetEq is not initialized.");
+ return -1;
+ }
+ if (WebRtcNetEQ_SetVADMode(_inst[idx], mode) < 0) {
+ LogError("SetVADmode", idx);
+ return -1;
+ }
+ }
+ _vadMode = mode;
+ return 0;
+ }
}
+WebRtc_Word32 ACMNetEQ::FlushBuffers() {
+ CriticalSectionScoped lock(_netEqCritSect);
+ for (WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++) {
+ if (!_isInitialized[idx]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "FlushBuffers: NetEq is not initialized.");
+ return -1;
+ }
+ if (WebRtcNetEQ_FlushBuffers(_inst[idx]) < 0) {
+ LogError("FlushBuffers", idx);
+ return -1;
+ }
+ }
+ return 0;
+}
-WebRtc_Word32
-ACMNetEQ::PlayoutTimestamp(
- WebRtc_UWord32& timestamp)
-{
- CriticalSectionScoped lock(_netEqCritSect);
- if(WebRtcNetEQ_GetSpeechTimeStamp(_inst[0], ×tamp) < 0)
- {
- LogError("GetSpeechTimeStamp", 0);
- return -1;
+WebRtc_Word16 ACMNetEQ::RemoveCodec(WebRtcNetEQDecoder codecIdx,
+ bool isStereo) {
+ // sanity check
+ if ((codecIdx <= kDecoderReservedStart) ||
+ (codecIdx >= kDecoderReservedEnd)) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RemoveCodec: NetEq error: could not Remove Codec, codec index out "
+ "of range");
+ return -1;
+ }
+ CriticalSectionScoped lock(_netEqCritSect);
+ if (!_isInitialized[0]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RemoveCodec: NetEq is not initialized.");
+ return -1;
+ }
+
+ if (WebRtcNetEQ_CodecDbRemove(_inst[0], codecIdx) < 0) {
+ LogError("CodecDB_Remove", 0);
+ return -1;
+ }
+
+ if (isStereo) {
+ if (WebRtcNetEQ_CodecDbRemove(_inst[1], codecIdx) < 0) {
+ LogError("CodecDB_Remove", 1);
+ return -1;
}
- else
- {
- return 0;
+ }
+
+ return 0;
+}
+
+WebRtc_Word16 ACMNetEQ::SetBackgroundNoiseMode(
+ const ACMBackgroundNoiseMode mode) {
+ CriticalSectionScoped lock(_netEqCritSect);
+ for (WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++) {
+ if (!_isInitialized[idx]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetBackgroundNoiseMode: NetEq is not initialized.");
+ return -1;
}
+ if (WebRtcNetEQ_SetBGNMode(_inst[idx], (WebRtcNetEQBGNMode) mode) < 0) {
+ LogError("SetBGNMode", idx);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+WebRtc_Word16 ACMNetEQ::BackgroundNoiseMode(ACMBackgroundNoiseMode& mode) {
+ WebRtcNetEQBGNMode myMode;
+ CriticalSectionScoped lock(_netEqCritSect);
+ if (!_isInitialized[0]) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "BackgroundNoiseMode: NetEq is not initialized.");
+ return -1;
+ }
+ if (WebRtcNetEQ_GetBGNMode(_inst[0], &myMode) < 0) {
+ LogError("WebRtcNetEQ_GetBGNMode", 0);
+ return -1;
+ } else {
+ mode = (ACMBackgroundNoiseMode) myMode;
+ }
+ return 0;
+}
+
+void ACMNetEQ::SetUniqueId(WebRtc_Word32 id) {
+ CriticalSectionScoped lock(_netEqCritSect);
+ _id = id;
+}
+
+void ACMNetEQ::LogError(const char* neteqFuncName,
+ const WebRtc_Word16 idx) const {
+ char errorName[NETEQ_ERR_MSG_LEN_BYTE];
+ char myFuncName[50];
+ int neteqErrorCode = WebRtcNetEQ_GetErrorCode(_inst[idx]);
+ WebRtcNetEQ_GetErrorName(neteqErrorCode, errorName,
+ NETEQ_ERR_MSG_LEN_BYTE - 1);
+ strncpy(myFuncName, neteqFuncName, 49);
+ errorName[NETEQ_ERR_MSG_LEN_BYTE - 1] = '\0';
+ myFuncName[49] = '\0';
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "NetEq-%d Error in function %s, error-code: %d, error-string: %s", idx,
+ myFuncName, neteqErrorCode, errorName);
+}
+
+WebRtc_Word32 ACMNetEQ::PlayoutTimestamp(WebRtc_UWord32& timestamp) {
+ CriticalSectionScoped lock(_netEqCritSect);
+ if (WebRtcNetEQ_GetSpeechTimeStamp(_inst[0], ×tamp) < 0) {
+ LogError("GetSpeechTimeStamp", 0);
+ return -1;
+ } else {
+ return 0;
+ }
}
void ACMNetEQ::RemoveSlaves() {
@@ -1121,125 +902,108 @@
}
}
-WebRtc_Word16
-ACMNetEQ::AddSlave(
- const WebRtcNetEQDecoder* usedCodecs,
- WebRtc_Word16 noOfCodecs)
-{
- CriticalSectionScoped lock(_netEqCritSect);
- const WebRtc_Word16 slaveIdx = 1;
- if(_numSlaves < 1)
- {
- // initialize the receiver, this also sets up VAD.
- if(InitByIdxSafe(slaveIdx) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "AddSlave: AddSlave Failed, Could not Initialize");
- return -1;
- }
-
- // Allocate buffer.
- if(AllocatePacketBufferByIdxSafe(usedCodecs, noOfCodecs, slaveIdx) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "AddSlave: AddSlave Failed, Could not Allocate Packet Buffer");
- return -1;
- }
-
- if(_masterSlaveInfo != NULL)
- {
- free(_masterSlaveInfo);
- _masterSlaveInfo = NULL;
- }
- int msInfoSize = WebRtcNetEQ_GetMasterSlaveInfoSize();
- _masterSlaveInfo = malloc(msInfoSize);
-
- if(_masterSlaveInfo == NULL)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "AddSlave: AddSlave Failed, Could not Allocate memory for Master-Slave Info");
- return -1;
- }
-
- // We accept this as initialized NetEQ, the rest is to synchronize
- // Slave with Master.
- _numSlaves = 1;
- _isInitialized[slaveIdx] = true;
-
- // Set Slave delay as all other instances.
- if(WebRtcNetEQ_SetExtraDelay(_inst[slaveIdx], _extraDelay) < 0)
- {
- LogError("SetExtraDelay", slaveIdx);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "AddSlave: AddSlave Failed, Could not set delay");
- return -1;
- }
-
- // Set AVT
- if(WebRtcNetEQ_SetAVTPlayout(_inst[slaveIdx], (_avtPlayout) ? 1 : 0) < 0)
- {
- LogError("SetAVTPlayout", slaveIdx);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "AddSlave: AddSlave Failed, Could not set AVT playout.");
- return -1;
- }
-
- // Set Background Noise
- WebRtcNetEQBGNMode currentMode;
- if(WebRtcNetEQ_GetBGNMode(_inst[0], ¤tMode) < 0)
- {
- LogError("GetBGNMode", 0);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "AAddSlave: AddSlave Failed, Could not Get BGN form Master.");
- return -1;
- }
-
- if(WebRtcNetEQ_SetBGNMode(_inst[slaveIdx], (WebRtcNetEQBGNMode)currentMode) < 0)
- {
- LogError("SetBGNMode", slaveIdx);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "AddSlave: AddSlave Failed, Could not set BGN mode.");
- return -1;
- }
-
- enum WebRtcNetEQPlayoutMode playoutMode = kPlayoutOff;
- switch(_playoutMode)
- {
- case voice:
- playoutMode = kPlayoutOn;
- break;
- case fax:
- playoutMode = kPlayoutFax;
- break;
- case streaming:
- playoutMode = kPlayoutStreaming;
- break;
- }
- if(WebRtcNetEQ_SetPlayoutMode(_inst[slaveIdx], playoutMode) < 0)
- {
- LogError("SetPlayoutMode", 1);
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
- "AddSlave: AddSlave Failed, Could not Set Playout Mode.");
- return -1;
- }
+WebRtc_Word16 ACMNetEQ::AddSlave(const WebRtcNetEQDecoder* usedCodecs,
+ WebRtc_Word16 noOfCodecs) {
+ CriticalSectionScoped lock(_netEqCritSect);
+ const WebRtc_Word16 slaveIdx = 1;
+ if (_numSlaves < 1) {
+ // initialize the receiver, this also sets up VAD.
+ if (InitByIdxSafe(slaveIdx) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not Initialize");
+ return -1;
}
- return 0;
+ // Allocate buffer.
+ if (AllocatePacketBufferByIdxSafe(usedCodecs, noOfCodecs, slaveIdx) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not Allocate Packet Buffer");
+ return -1;
+ }
+
+ if (_masterSlaveInfo != NULL) {
+ free(_masterSlaveInfo);
+ _masterSlaveInfo = NULL;
+ }
+ int msInfoSize = WebRtcNetEQ_GetMasterSlaveInfoSize();
+ _masterSlaveInfo = malloc(msInfoSize);
+
+ if (_masterSlaveInfo == NULL) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not Allocate memory for "
+ "Master-Slave Info");
+ return -1;
+ }
+
+ // We accept this as initialized NetEQ, the rest is to synchronize
+ // Slave with Master.
+ _numSlaves = 1;
+ _isInitialized[slaveIdx] = true;
+
+ // Set Slave delay as all other instances.
+ if (WebRtcNetEQ_SetExtraDelay(_inst[slaveIdx], _extraDelay) < 0) {
+ LogError("SetExtraDelay", slaveIdx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not set delay");
+ return -1;
+ }
+
+ // Set AVT
+ if (WebRtcNetEQ_SetAVTPlayout(_inst[slaveIdx], (_avtPlayout) ? 1 : 0) < 0) {
+ LogError("SetAVTPlayout", slaveIdx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not set AVT playout.");
+ return -1;
+ }
+
+ // Set Background Noise
+ WebRtcNetEQBGNMode currentMode;
+ if (WebRtcNetEQ_GetBGNMode(_inst[0], ¤tMode) < 0) {
+ LogError("GetBGNMode", 0);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AAddSlave: AddSlave Failed, Could not Get BGN form Master.");
+ return -1;
+ }
+
+ if (WebRtcNetEQ_SetBGNMode(_inst[slaveIdx],
+ (WebRtcNetEQBGNMode) currentMode) < 0) {
+ LogError("SetBGNMode", slaveIdx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not set BGN mode.");
+ return -1;
+ }
+
+ enum WebRtcNetEQPlayoutMode playoutMode = kPlayoutOff;
+ switch (_playoutMode) {
+ case voice:
+ playoutMode = kPlayoutOn;
+ break;
+ case fax:
+ playoutMode = kPlayoutFax;
+ break;
+ case streaming:
+ playoutMode = kPlayoutStreaming;
+ break;
+ }
+ if (WebRtcNetEQ_SetPlayoutMode(_inst[slaveIdx], playoutMode) < 0) {
+ LogError("SetPlayoutMode", 1);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not Set Playout Mode.");
+ return -1;
+ }
+ }
+
+ return 0;
}
-void
-ACMNetEQ::SetReceivedStereo(
- bool receivedStereo)
-{
- CriticalSectionScoped lock(_netEqCritSect);
- _receivedStereo = receivedStereo;
+void ACMNetEQ::SetReceivedStereo(bool receivedStereo) {
+ CriticalSectionScoped lock(_netEqCritSect);
+ _receivedStereo = receivedStereo;
}
-WebRtc_UWord8
-ACMNetEQ::NumSlaves()
-{
- CriticalSectionScoped lock(_netEqCritSect);
- return _numSlaves;
+WebRtc_UWord8 ACMNetEQ::NumSlaves() {
+ CriticalSectionScoped lock(_netEqCritSect);
+ return _numSlaves;
}
-} // namespace webrtc
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_neteq.h b/modules/audio_coding/main/source/acm_neteq.h
index db6c0fd..0086460 100644
--- a/modules/audio_coding/main/source/acm_neteq.h
+++ b/modules/audio_coding/main/source/acm_neteq.h
@@ -29,341 +29,317 @@
#define MAX_NUM_SLAVE_NETEQ 1
-class ACMNetEQ
-{
-public:
- // Constructor of the class
- ACMNetEQ();
+class ACMNetEQ {
+ public:
+ // Constructor of the class
+ ACMNetEQ();
- // Destructor of the class.
- ~ACMNetEQ();
+ // Destructor of the class.
+ ~ACMNetEQ();
- //
- // Init()
- // Allocates memory for NetEQ and VAD and initializes them.
- //
- // Return value : 0 if ok.
- // -1 if NetEQ or VAD returned an error or
- // if out of memory.
- //
- WebRtc_Word32 Init();
+ //
+ // Init()
+ // Allocates memory for NetEQ and VAD and initializes them.
+ //
+ // Return value : 0 if ok.
+ // -1 if NetEQ or VAD returned an error or
+ // if out of memory.
+ //
+ WebRtc_Word32 Init();
- //
- // RecIn()
- // Gives the payload to NetEQ.
- //
- // Input:
- // - incomingPayload : Incoming audio payload.
- // - payloadLength : Length of incoming audio payload.
- // - rtpInfo : RTP header for the incoming payload containing
- // information about payload type, sequence number,
- // timestamp, ssrc and marker bit.
- //
- // Return value : 0 if ok.
- // <0 if NetEQ returned an error.
- //
- WebRtc_Word32 RecIn(
- const WebRtc_UWord8* incomingPayload,
- const WebRtc_Word32 payloadLength,
- const WebRtcRTPHeader& rtpInfo);
+ //
+ // RecIn()
+ // Gives the payload to NetEQ.
+ //
+ // Input:
+ // - incomingPayload : Incoming audio payload.
+ // - payloadLength : Length of incoming audio payload.
+ // - rtpInfo : RTP header for the incoming payload containing
+ // information about payload type, sequence number,
+ // timestamp, ssrc and marker bit.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 RecIn(const WebRtc_UWord8* incomingPayload,
+ const WebRtc_Word32 payloadLength,
+ const WebRtcRTPHeader& rtpInfo);
- //
- // RecOut()
- // Asks NetEQ for 10 ms of decoded audio.
- //
- // Input:
- // -audioFrame : an audio frame were output data and
- // associated parameters are written to.
- //
- // Return value : 0 if ok.
- // -1 if NetEQ returned an error.
- //
- WebRtc_Word32 RecOut(
- AudioFrame& audioFrame);
+ //
+ // RecOut()
+ // Asks NetEQ for 10 ms of decoded audio.
+ //
+ // Input:
+ // -audioFrame : an audio frame were output data and
+ // associated parameters are written to.
+ //
+ // Return value : 0 if ok.
+ // -1 if NetEQ returned an error.
+ //
+ WebRtc_Word32 RecOut(AudioFrame& audioFrame);
- //
- // AddCodec()
- // Adds a new codec to the NetEQ codec database.
- //
- // Input:
- // - codecDef : The codec to be added.
- // - toMaster : true if the codec has to be added to Master
- // NetEq, otherwise will be added to the Slave
- // NetEQ.
- //
- // Return value : 0 if ok.
- // <0 if NetEQ returned an error.
- //
- WebRtc_Word32 AddCodec(
- WebRtcNetEQ_CodecDef *codecDef,
- bool toMaster = true);
+ //
+ // AddCodec()
+ // Adds a new codec to the NetEQ codec database.
+ //
+ // Input:
+ // - codecDef : The codec to be added.
+ // - toMaster : true if the codec has to be added to Master
+ // NetEq, otherwise will be added to the Slave
+ // NetEQ.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 AddCodec(WebRtcNetEQ_CodecDef *codecDef, bool toMaster = true);
- //
- // AllocatePacketBuffer()
- // Allocates the NetEQ packet buffer.
- //
- // Input:
- // - usedCodecs : An array of the codecs to be used by NetEQ.
- // - noOfCodecs : Number of codecs in usedCodecs.
- //
- // Return value : 0 if ok.
- // <0 if NetEQ returned an error.
- //
- WebRtc_Word32 AllocatePacketBuffer(
- const WebRtcNetEQDecoder* usedCodecs,
- WebRtc_Word16 noOfCodecs);
+ //
+ // AllocatePacketBuffer()
+ // Allocates the NetEQ packet buffer.
+ //
+ // Input:
+ // - usedCodecs : An array of the codecs to be used by NetEQ.
+ // - noOfCodecs : Number of codecs in usedCodecs.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 AllocatePacketBuffer(const WebRtcNetEQDecoder* usedCodecs,
+ WebRtc_Word16 noOfCodecs);
- //
- // SetExtraDelay()
- // Sets an delayInMS milliseconds extra delay in NetEQ.
- //
- // Input:
- // - delayInMS : Extra delay in milliseconds.
- //
- // Return value : 0 if ok.
- // <0 if NetEQ returned an error.
- //
- WebRtc_Word32 SetExtraDelay(
- const WebRtc_Word32 delayInMS);
+ //
+ // SetExtraDelay()
+ // Sets an delayInMS milliseconds extra delay in NetEQ.
+ //
+ // Input:
+ // - delayInMS : Extra delay in milliseconds.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 SetExtraDelay(const WebRtc_Word32 delayInMS);
- //
- // SetAVTPlayout()
- // Enable/disable playout of AVT payloads.
- //
- // Input:
- // - enable : Enable if true, disable if false.
- //
- // Return value : 0 if ok.
- // <0 if NetEQ returned an error.
- //
- WebRtc_Word32 SetAVTPlayout(
- const bool enable);
+ //
+ // SetAVTPlayout()
+ // Enable/disable playout of AVT payloads.
+ //
+ // Input:
+ // - enable : Enable if true, disable if false.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 SetAVTPlayout(const bool enable);
- //
- // AVTPlayout()
- // Get the current AVT playout state.
- //
- // Return value : True if AVT playout is enabled.
- // False if AVT playout is disabled.
- //
- bool AVTPlayout() const;
+ //
+ // AVTPlayout()
+ // Get the current AVT playout state.
+ //
+ // Return value : True if AVT playout is enabled.
+ // False if AVT playout is disabled.
+ //
+ bool AVTPlayout() const;
- //
- // CurrentSampFreqHz()
- // Get the current sampling frequency in Hz.
- //
- // Return value : Sampling frequency in Hz.
- //
- WebRtc_Word32 CurrentSampFreqHz() const;
+ //
+ // CurrentSampFreqHz()
+ // Get the current sampling frequency in Hz.
+ //
+ // Return value : Sampling frequency in Hz.
+ //
+ WebRtc_Word32 CurrentSampFreqHz() const;
- //
- // SetPlayoutMode()
- // Sets the playout mode to voice or fax.
- //
- // Input:
- // - mode : The playout mode to be used, voice,
- // fax, or streaming.
- //
- // Return value : 0 if ok.
- // <0 if NetEQ returned an error.
- //
- WebRtc_Word32 SetPlayoutMode(
- const AudioPlayoutMode mode);
+ //
+ // SetPlayoutMode()
+ // Sets the playout mode to voice or fax.
+ //
+ // Input:
+ // - mode : The playout mode to be used, voice,
+ // fax, or streaming.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 SetPlayoutMode(const AudioPlayoutMode mode);
- //
- // PlayoutMode()
- // Get the current playout mode.
- //
- // Return value : The current playout mode.
- //
- AudioPlayoutMode PlayoutMode() const;
+ //
+ // PlayoutMode()
+ // Get the current playout mode.
+ //
+ // Return value : The current playout mode.
+ //
+ AudioPlayoutMode PlayoutMode() const;
- //
- // NetworkStatistics()
- // Get the current network statistics from NetEQ.
- //
- // Output:
- // - statistics : The current network statistics.
- //
- // Return value : 0 if ok.
- // <0 if NetEQ returned an error.
- //
- WebRtc_Word32 NetworkStatistics(
- ACMNetworkStatistics* statistics) const;
+ //
+ // NetworkStatistics()
+ // Get the current network statistics from NetEQ.
+ //
+ // Output:
+ // - statistics : The current network statistics.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 NetworkStatistics(ACMNetworkStatistics* statistics) const;
- //
- // VADMode()
- // Get the current VAD Mode.
- //
- // Return value : The current VAD mode.
- //
- ACMVADMode VADMode() const;
+ //
+ // VADMode()
+ // Get the current VAD Mode.
+ //
+ // Return value : The current VAD mode.
+ //
+ ACMVADMode VADMode() const;
- //
- // SetVADMode()
- // Set the VAD mode.
- //
- // Input:
- // - mode : The new VAD mode.
- //
- // Return value : 0 if ok.
- // -1 if an error occurred.
- //
- WebRtc_Word16 SetVADMode(
- const ACMVADMode mode);
+ //
+ // SetVADMode()
+ // Set the VAD mode.
+ //
+ // Input:
+ // - mode : The new VAD mode.
+ //
+ // Return value : 0 if ok.
+ // -1 if an error occurred.
+ //
+ WebRtc_Word16 SetVADMode(const ACMVADMode mode);
- //
- // DecodeLock()
- // Get the decode lock used to protect decoder instances while decoding.
- //
- // Return value : Pointer to the decode lock.
- //
- RWLockWrapper* DecodeLock() const
- {
- return _decodeLock;
- }
+ //
+ // DecodeLock()
+ // Get the decode lock used to protect decoder instances while decoding.
+ //
+ // Return value : Pointer to the decode lock.
+ //
+ RWLockWrapper* DecodeLock() const {
+ return _decodeLock;
+ }
- //
- // FlushBuffers()
- // Flushes the NetEQ packet and speech buffers.
- //
- // Return value : 0 if ok.
- // -1 if NetEQ returned an error.
- //
- WebRtc_Word32 FlushBuffers();
+ //
+ // FlushBuffers()
+ // Flushes the NetEQ packet and speech buffers.
+ //
+ // Return value : 0 if ok.
+ // -1 if NetEQ returned an error.
+ //
+ WebRtc_Word32 FlushBuffers();
- //
- // RemoveCodec()
- // Removes a codec from the NetEQ codec database.
- //
- // Input:
- // - codecIdx : Codec to be removed.
- //
- // Return value : 0 if ok.
- // -1 if an error occurred.
- //
- WebRtc_Word16 RemoveCodec(
- WebRtcNetEQDecoder codecIdx,
- bool isStereo = false);
+ //
+ // RemoveCodec()
+ // Removes a codec from the NetEQ codec database.
+ //
+ // Input:
+ // - codecIdx : Codec to be removed.
+ //
+ // Return value : 0 if ok.
+ // -1 if an error occurred.
+ //
+ WebRtc_Word16 RemoveCodec(WebRtcNetEQDecoder codecIdx, bool isStereo = false);
+ //
+ // SetBackgroundNoiseMode()
+ // Set the mode of the background noise.
+ //
+ // Input:
+ // - mode : an enumerator specifying the mode of the
+ // background noise.
+ //
+ // Return value : 0 if succeeded,
+ // -1 if failed to set the mode.
+ //
+ WebRtc_Word16 SetBackgroundNoiseMode(const ACMBackgroundNoiseMode mode);
- //
- // SetBackgroundNoiseMode()
- // Set the mode of the background noise.
- //
- // Input:
- // - mode : an enumerator specifying the mode of the
- // background noise.
- //
- // Return value : 0 if succeeded,
- // -1 if failed to set the mode.
- //
- WebRtc_Word16 SetBackgroundNoiseMode(
- const ACMBackgroundNoiseMode mode);
+ //
+ // BackgroundNoiseMode()
+ // return the mode of the background noise.
+ //
+ // Return value : The mode of background noise.
+ //
+ WebRtc_Word16 BackgroundNoiseMode(ACMBackgroundNoiseMode& mode);
- //
- // BackgroundNoiseMode()
- // return the mode of the background noise.
- //
- // Return value : The mode of background noise.
- //
- WebRtc_Word16 BackgroundNoiseMode(
- ACMBackgroundNoiseMode& mode);
+ void SetUniqueId(WebRtc_Word32 id);
- void SetUniqueId(
- WebRtc_Word32 id);
+ WebRtc_Word32 PlayoutTimestamp(WebRtc_UWord32& timestamp);
- WebRtc_Word32 PlayoutTimestamp(
- WebRtc_UWord32& timestamp);
+ void SetReceivedStereo(bool receivedStereo);
- void SetReceivedStereo(
- bool receivedStereo);
+ WebRtc_UWord8 NumSlaves();
- WebRtc_UWord8 NumSlaves();
+ enum JB {
+ masterJB = 0,
+ slaveJB = 1
+ };
- enum JB {masterJB = 0, slaveJB = 1};
+ // Delete all slaves.
+ void RemoveSlaves();
- // Delete all slaves.
- void RemoveSlaves();
+ WebRtc_Word16 AddSlave(const WebRtcNetEQDecoder* usedCodecs,
+ WebRtc_Word16 noOfCodecs);
- WebRtc_Word16 AddSlave(
- const WebRtcNetEQDecoder* usedCodecs,
- WebRtc_Word16 noOfCodecs);
+ private:
+ //
+ // RTPPack()
+ // Creates a Word16 RTP packet out of the payload data in Word16 and
+ // a WebRtcRTPHeader.
+ //
+ // Input:
+ // - payload : Payload to be packetized.
+ // - payloadLengthW8 : Length of the payload in bytes.
+ // - rtpInfo : RTP header struct.
+ //
+ // Output:
+ // - rtpPacket : The RTP packet.
+ //
+ static void RTPPack(WebRtc_Word16* rtpPacket, const WebRtc_Word8* payload,
+ const WebRtc_Word32 payloadLengthW8,
+ const WebRtcRTPHeader& rtpInfo);
-private:
- //
- // RTPPack()
- // Creates a Word16 RTP packet out of the payload data in Word16 and
- // a WebRtcRTPHeader.
- //
- // Input:
- // - payload : Payload to be packetized.
- // - payloadLengthW8 : Length of the payload in bytes.
- // - rtpInfo : RTP header struct.
- //
- // Output:
- // - rtpPacket : The RTP packet.
- //
- static void RTPPack(
- WebRtc_Word16* rtpPacket,
- const WebRtc_Word8* payload,
- const WebRtc_Word32 payloadLengthW8,
- const WebRtcRTPHeader& rtpInfo);
+ void LogError(const char* neteqFuncName, const WebRtc_Word16 idx) const;
- void LogError(
- const char* neteqFuncName,
- const WebRtc_Word16 idx) const;
+ WebRtc_Word16 InitByIdxSafe(const WebRtc_Word16 idx);
- WebRtc_Word16 InitByIdxSafe(
- const WebRtc_Word16 idx);
+ // EnableVAD()
+ // Enable VAD.
+ //
+ // Return value : 0 if ok.
+ // -1 if an error occurred.
+ //
+ WebRtc_Word16 EnableVAD();
- // EnableVAD()
- // Enable VAD.
- //
- // Return value : 0 if ok.
- // -1 if an error occurred.
- //
- WebRtc_Word16 EnableVAD();
+ WebRtc_Word16 EnableVADByIdxSafe(const WebRtc_Word16 idx);
- WebRtc_Word16 EnableVADByIdxSafe(
- const WebRtc_Word16 idx);
+ WebRtc_Word16 AllocatePacketBufferByIdxSafe(
+ const WebRtcNetEQDecoder* usedCodecs,
+ WebRtc_Word16 noOfCodecs,
+ const WebRtc_Word16 idx);
- WebRtc_Word16 AllocatePacketBufferByIdxSafe(
- const WebRtcNetEQDecoder* usedCodecs,
- WebRtc_Word16 noOfCodecs,
- const WebRtc_Word16 idx);
+ // Delete the NetEQ corresponding to |index|.
+ void RemoveNetEQSafe(int index);
- // Delete the NetEQ corresponding to |index|.
- void RemoveNetEQSafe(int index);
+ void RemoveSlavesSafe();
- void RemoveSlavesSafe();
+ void* _inst[MAX_NUM_SLAVE_NETEQ + 1];
+ void* _instMem[MAX_NUM_SLAVE_NETEQ + 1];
- void* _inst[MAX_NUM_SLAVE_NETEQ + 1];
- void* _instMem[MAX_NUM_SLAVE_NETEQ + 1];
+ WebRtc_Word16* _netEqPacketBuffer[MAX_NUM_SLAVE_NETEQ + 1];
- WebRtc_Word16* _netEqPacketBuffer[MAX_NUM_SLAVE_NETEQ + 1];
+ WebRtc_Word32 _id;
+ float _currentSampFreqKHz;
+ bool _avtPlayout;
+ AudioPlayoutMode _playoutMode;
+ CriticalSectionWrapper* _netEqCritSect;
- WebRtc_Word32 _id;
- float _currentSampFreqKHz;
- bool _avtPlayout;
- AudioPlayoutMode _playoutMode;
- CriticalSectionWrapper* _netEqCritSect;
+ WebRtcVadInst* _ptrVADInst[MAX_NUM_SLAVE_NETEQ + 1];
- WebRtcVadInst* _ptrVADInst[MAX_NUM_SLAVE_NETEQ + 1];
+ bool _vadStatus;
+ ACMVADMode _vadMode;
+ RWLockWrapper* _decodeLock;
+ bool _isInitialized[MAX_NUM_SLAVE_NETEQ + 1];
+ WebRtc_UWord8 _numSlaves;
+ bool _receivedStereo;
+ void* _masterSlaveInfo;
+ AudioFrame::VADActivity _previousAudioActivity;
+ WebRtc_Word32 _extraDelay;
- bool _vadStatus;
- ACMVADMode _vadMode;
- RWLockWrapper* _decodeLock;
- bool _isInitialized[MAX_NUM_SLAVE_NETEQ + 1];
- WebRtc_UWord8 _numSlaves;
- bool _receivedStereo;
- void* _masterSlaveInfo;
- AudioFrame::VADActivity _previousAudioActivity;
- WebRtc_Word32 _extraDelay;
-
- CriticalSectionWrapper* _callbackCritSect;
+ CriticalSectionWrapper* _callbackCritSect;
};
-} //namespace webrtc
+} //namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_NETEQ_H_
diff --git a/modules/audio_coding/main/source/acm_pcm16b.cc b/modules/audio_coding/main/source/acm_pcm16b.cc
index e44be38..4e88743 100644
--- a/modules/audio_coding/main/source/acm_pcm16b.cc
+++ b/modules/audio_coding/main/source/acm_pcm16b.cc
@@ -18,7 +18,7 @@
#include "webrtc_neteq_help_macros.h"
#ifdef WEBRTC_CODEC_PCM16
- #include "pcm16b.h"
+#include "pcm16b.h"
#endif
namespace webrtc {
@@ -90,7 +90,6 @@
}
#else //===================== Actual Implementation =======================
-
ACMPCM16B::ACMPCM16B(WebRtc_Word16 codecID) {
_codecID = codecID;
_samplingFreqHz = ACMCodecDB::CodecFreq(_codecID);
@@ -103,8 +102,8 @@
WebRtc_Word16 ACMPCM16B::InternalEncode(WebRtc_UWord8* bitStream,
WebRtc_Word16* bitStreamLenByte) {
*bitStreamLenByte = WebRtcPcm16b_Encode(&_inAudio[_inAudioIxRead],
- _frameLenSmpl * _noChannels,
- bitStream);
+ _frameLenSmpl * _noChannels,
+ bitStream);
// Increment the read index to tell the caller that how far
// we have gone forward in reading the audio buffer.
_inAudioIxRead += _frameLenSmpl * _noChannels;
@@ -144,13 +143,13 @@
}
case 16000: {
SET_CODEC_PAR(codecDef, kDecoderPCM16Bwb, codecInst.pltype, NULL,
- 16000);
+ 16000);
SET_PCM16B_WB_FUNCTIONS(codecDef);
break;
}
case 32000: {
SET_CODEC_PAR(codecDef, kDecoderPCM16Bswb32kHz, codecInst.pltype,
- NULL, 32000);
+ NULL, 32000);
SET_PCM16B_SWB32_FUNCTIONS(codecDef);
break;
}
@@ -162,19 +161,19 @@
switch(_samplingFreqHz) {
case 8000: {
SET_CODEC_PAR(codecDef, kDecoderPCM16B_2ch, codecInst.pltype, NULL,
- 8000);
+ 8000);
SET_PCM16B_FUNCTIONS(codecDef);
break;
}
case 16000: {
SET_CODEC_PAR(codecDef, kDecoderPCM16Bwb_2ch, codecInst.pltype,
- NULL, 16000);
+ NULL, 16000);
SET_PCM16B_WB_FUNCTIONS(codecDef);
break;
}
case 32000: {
SET_CODEC_PAR(codecDef, kDecoderPCM16Bswb32kHz_2ch, codecInst.pltype,
- NULL, 32000);
+ NULL, 32000);
SET_PCM16B_SWB32_FUNCTIONS(codecDef);
break;
}
@@ -244,4 +243,4 @@
}
#endif
-} // namespace webrtc
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_pcm16b.h b/modules/audio_coding/main/source/acm_pcm16b.h
index 09fbb05..cd81070 100644
--- a/modules/audio_coding/main/source/acm_pcm16b.h
+++ b/modules/audio_coding/main/source/acm_pcm16b.h
@@ -13,55 +13,47 @@
#include "acm_generic_codec.h"
-namespace webrtc
-{
+namespace webrtc {
-class ACMPCM16B : public ACMGenericCodec
-{
-public:
- ACMPCM16B(WebRtc_Word16 codecID);
- ~ACMPCM16B();
- // for FEC
- ACMGenericCodec* CreateInstance(void);
+class ACMPCM16B : public ACMGenericCodec {
+ public:
+ ACMPCM16B(WebRtc_Word16 codecID);
+ ~ACMPCM16B();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
- WebRtc_Word16 InternalEncode(
- WebRtc_UWord8* bitstream,
- WebRtc_Word16* bitStreamLenByte);
+ WebRtc_Word16 InternalEncode(WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
- WebRtc_Word16 InternalInitEncoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitEncoder(WebRtcACMCodecParams *codecParams);
- WebRtc_Word16 InternalInitDecoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitDecoder(WebRtcACMCodecParams *codecParams);
-protected:
- WebRtc_Word16 DecodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType);
+ protected:
+ WebRtc_Word16 DecodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
- WebRtc_Word32 CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst);
+ WebRtc_Word32 CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
- void DestructEncoderSafe();
+ void DestructEncoderSafe();
- void DestructDecoderSafe();
+ void DestructDecoderSafe();
- WebRtc_Word16 InternalCreateEncoder();
+ WebRtc_Word16 InternalCreateEncoder();
- WebRtc_Word16 InternalCreateDecoder();
+ WebRtc_Word16 InternalCreateDecoder();
- void InternalDestructEncoderInst(
- void* ptrInst);
+ void InternalDestructEncoderInst(void* ptrInst);
- void SplitStereoPacket(uint8_t* payload, int32_t* payload_length);
+ void SplitStereoPacket(uint8_t* payload, int32_t* payload_length);
- WebRtc_Word32 _samplingFreqHz;
+ WebRtc_Word32 _samplingFreqHz;
};
-} // namespace webrtc
+} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_PCM16B_H_
diff --git a/modules/audio_coding/main/source/acm_pcma.cc b/modules/audio_coding/main/source/acm_pcma.cc
index c459d25..867f543 100644
--- a/modules/audio_coding/main/source/acm_pcma.cc
+++ b/modules/audio_coding/main/source/acm_pcma.cc
@@ -120,11 +120,11 @@
// end of the bytestream vector. After looping the data is reordered to:
// l1 l2 l3 l4 ... l(N-1) lN r1 r2 r3 r4 ... r(N-1) r(N),
// where N is the total number of samples.
- for (int i = 0; i < *payload_length / 2; i ++) {
+ for (int i = 0; i < *payload_length / 2; i++) {
right_byte = payload[i + 1];
memmove(&payload[i + 1], &payload[i + 2], *payload_length - i - 2);
payload[*payload_length - 1] = right_byte;
}
}
-} // namespace webrtc
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_pcma.h b/modules/audio_coding/main/source/acm_pcma.h
index a3cf220..56dac16 100644
--- a/modules/audio_coding/main/source/acm_pcma.h
+++ b/modules/audio_coding/main/source/acm_pcma.h
@@ -13,53 +13,45 @@
#include "acm_generic_codec.h"
-namespace webrtc
-{
+namespace webrtc {
-class ACMPCMA : public ACMGenericCodec
-{
-public:
- ACMPCMA(WebRtc_Word16 codecID);
- ~ACMPCMA();
- // for FEC
- ACMGenericCodec* CreateInstance(void);
+class ACMPCMA : public ACMGenericCodec {
+ public:
+ ACMPCMA(WebRtc_Word16 codecID);
+ ~ACMPCMA();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
- WebRtc_Word16 InternalEncode(
- WebRtc_UWord8* bitstream,
- WebRtc_Word16* bitStreamLenByte);
+ WebRtc_Word16 InternalEncode(WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
- WebRtc_Word16 InternalInitEncoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitEncoder(WebRtcACMCodecParams *codecParams);
- WebRtc_Word16 InternalInitDecoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitDecoder(WebRtcACMCodecParams *codecParams);
-protected:
- WebRtc_Word16 DecodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType);
+ protected:
+ WebRtc_Word16 DecodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
- WebRtc_Word32 CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst);
+ WebRtc_Word32 CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
- void DestructEncoderSafe();
+ void DestructEncoderSafe();
- void DestructDecoderSafe();
+ void DestructDecoderSafe();
- WebRtc_Word16 InternalCreateEncoder();
+ WebRtc_Word16 InternalCreateEncoder();
- WebRtc_Word16 InternalCreateDecoder();
+ WebRtc_Word16 InternalCreateDecoder();
- void InternalDestructEncoderInst(
- void* ptrInst);
+ void InternalDestructEncoderInst(void* ptrInst);
- void SplitStereoPacket(uint8_t* payload, int32_t* payload_length);
+ void SplitStereoPacket(uint8_t* payload, int32_t* payload_length);
};
-} // namespace webrtc
+} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_PCMA_H_
diff --git a/modules/audio_coding/main/source/acm_pcmu.cc b/modules/audio_coding/main/source/acm_pcmu.cc
index 83240d4..92f7f5c 100644
--- a/modules/audio_coding/main/source/acm_pcmu.cc
+++ b/modules/audio_coding/main/source/acm_pcmu.cc
@@ -56,8 +56,8 @@
WebRtc_Word16 ACMPCMU::InternalInitDecoder(
WebRtcACMCodecParams* /* codecParams */) {
- // This codec does not need initialization, PCM has no instance.
- return 0;
+ // This codec does not need initialization, PCM has no instance.
+ return 0;
}
WebRtc_Word32 ACMPCMU::CodecDef(WebRtcNetEQ_CodecDef& codecDef,
@@ -122,11 +122,11 @@
// end of the bytestream vector. After looping the data is reordered to:
// l1 l2 l3 l4 ... l(N-1) lN r1 r2 r3 r4 ... r(N-1) r(N),
// where N is the total number of samples.
- for (int i = 0; i < *payload_length / 2; i ++) {
+ for (int i = 0; i < *payload_length / 2; i++) {
right_byte = payload[i + 1];
memmove(&payload[i + 1], &payload[i + 2], *payload_length - i - 2);
payload[*payload_length - 1] = right_byte;
}
}
-} // namespace webrtc
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_pcmu.h b/modules/audio_coding/main/source/acm_pcmu.h
index 716ac40..c8b999a 100644
--- a/modules/audio_coding/main/source/acm_pcmu.h
+++ b/modules/audio_coding/main/source/acm_pcmu.h
@@ -13,53 +13,45 @@
#include "acm_generic_codec.h"
-namespace webrtc
-{
+namespace webrtc {
-class ACMPCMU : public ACMGenericCodec
-{
-public:
- ACMPCMU(WebRtc_Word16 codecID);
- ~ACMPCMU();
- // for FEC
- ACMGenericCodec* CreateInstance(void);
+class ACMPCMU : public ACMGenericCodec {
+ public:
+ ACMPCMU(WebRtc_Word16 codecID);
+ ~ACMPCMU();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
- WebRtc_Word16 InternalEncode(
- WebRtc_UWord8* bitstream,
- WebRtc_Word16* bitStreamLenByte);
+ WebRtc_Word16 InternalEncode(WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
- WebRtc_Word16 InternalInitEncoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitEncoder(WebRtcACMCodecParams *codecParams);
- WebRtc_Word16 InternalInitDecoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitDecoder(WebRtcACMCodecParams *codecParams);
-protected:
- WebRtc_Word16 DecodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType);
+ protected:
+ WebRtc_Word16 DecodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
- WebRtc_Word32 CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst);
+ WebRtc_Word32 CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
- void DestructEncoderSafe();
+ void DestructEncoderSafe();
- void DestructDecoderSafe();
+ void DestructDecoderSafe();
- WebRtc_Word16 InternalCreateEncoder();
+ WebRtc_Word16 InternalCreateEncoder();
- WebRtc_Word16 InternalCreateDecoder();
+ WebRtc_Word16 InternalCreateDecoder();
- void InternalDestructEncoderInst(
- void* ptrInst);
+ void InternalDestructEncoderInst(void* ptrInst);
- void SplitStereoPacket(uint8_t* payload, int32_t* payload_length);
+ void SplitStereoPacket(uint8_t* payload, int32_t* payload_length);
};
-} // namespace webrtc
+} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_PCMU_H_
diff --git a/modules/audio_coding/main/source/acm_red.cc b/modules/audio_coding/main/source/acm_red.cc
index 232142d..cad985e 100644
--- a/modules/audio_coding/main/source/acm_red.cc
+++ b/modules/audio_coding/main/source/acm_red.cc
@@ -15,129 +15,89 @@
#include "webrtc_neteq.h"
#include "webrtc_neteq_help_macros.h"
-namespace webrtc
-{
+namespace webrtc {
-ACMRED::ACMRED(WebRtc_Word16 codecID)
-{
- _codecID = codecID;
+ACMRED::ACMRED(WebRtc_Word16 codecID) {
+ _codecID = codecID;
}
-
-ACMRED::~ACMRED()
-{
- return;
+ACMRED::~ACMRED() {
+ return;
}
-
-WebRtc_Word16
-ACMRED::InternalEncode(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16* /* bitStreamLenByte */)
-{
- // RED is never used as an encoder
- // RED has no instance
- return 0;
+WebRtc_Word16 ACMRED::InternalEncode(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */) {
+ // RED is never used as an encoder
+ // RED has no instance
+ return 0;
}
-
-WebRtc_Word16
-ACMRED::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
- return 0;
+WebRtc_Word16 ACMRED::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ return 0;
}
-
-WebRtc_Word16
-ACMRED::InternalInitEncoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- // This codec does not need initialization,
- // RED has no instance
- return 0;
+WebRtc_Word16 ACMRED::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ // This codec does not need initialization,
+ // RED has no instance
+ return 0;
}
-
-WebRtc_Word16
-ACMRED::InternalInitDecoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- // This codec does not need initialization,
- // RED has no instance
- return 0;
+WebRtc_Word16 ACMRED::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ // This codec does not need initialization,
+ // RED has no instance
+ return 0;
}
+WebRtc_Word32 ACMRED::CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst) {
+ if (!_decoderInitialized) {
+ // Todo:
+ // log error
+ return -1;
+ }
-WebRtc_Word32
-ACMRED::CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst)
-{
- if (!_decoderInitialized)
- {
- // Todo:
- // log error
- return -1;
- }
-
- // Fill up the structure by calling
- // "SET_CODEC_PAR" & "SET_PCMU_FUNCTION."
- // Then call NetEQ to add the codec to it's
- // database.
- SET_CODEC_PAR((codecDef), kDecoderRED, codecInst.pltype, NULL, 8000);
- SET_RED_FUNCTIONS((codecDef));
- return 0;
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_PCMU_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderRED, codecInst.pltype, NULL, 8000);
+ SET_RED_FUNCTIONS((codecDef));
+ return 0;
}
-
-ACMGenericCodec*
-ACMRED::CreateInstance(void)
-{
- return NULL;
+ACMGenericCodec* ACMRED::CreateInstance(void) {
+ return NULL;
}
-
-WebRtc_Word16
-ACMRED::InternalCreateEncoder()
-{
- // RED has no instance
- return 0;
+WebRtc_Word16 ACMRED::InternalCreateEncoder() {
+ // RED has no instance
+ return 0;
}
-
-WebRtc_Word16
-ACMRED::InternalCreateDecoder()
-{
- // RED has no instance
- return 0;
+WebRtc_Word16 ACMRED::InternalCreateDecoder() {
+ // RED has no instance
+ return 0;
}
-
-void
-ACMRED::InternalDestructEncoderInst(
- void* /* ptrInst */)
-{
- // RED has no instance
- return;
+void ACMRED::InternalDestructEncoderInst(void* /* ptrInst */) {
+ // RED has no instance
+ return;
}
-
-void
-ACMRED::DestructEncoderSafe()
-{
- // RED has no instance
- return;
+void ACMRED::DestructEncoderSafe() {
+ // RED has no instance
+ return;
}
-void ACMRED::DestructDecoderSafe()
-{
- // RED has no instance
- return;
+void ACMRED::DestructDecoderSafe() {
+ // RED has no instance
+ return;
}
-} // namespace webrtc
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_red.h b/modules/audio_coding/main/source/acm_red.h
index 3aaae47..1e07823 100644
--- a/modules/audio_coding/main/source/acm_red.h
+++ b/modules/audio_coding/main/source/acm_red.h
@@ -13,51 +13,43 @@
#include "acm_generic_codec.h"
-namespace webrtc
-{
+namespace webrtc {
-class ACMRED : public ACMGenericCodec
-{
-public:
- ACMRED(WebRtc_Word16 codecID);
- ~ACMRED();
- // for FEC
- ACMGenericCodec* CreateInstance(void);
+class ACMRED : public ACMGenericCodec {
+ public:
+ ACMRED(WebRtc_Word16 codecID);
+ ~ACMRED();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
- WebRtc_Word16 InternalEncode(
- WebRtc_UWord8* bitstream,
- WebRtc_Word16* bitStreamLenByte);
+ WebRtc_Word16 InternalEncode(WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
- WebRtc_Word16 InternalInitEncoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitEncoder(WebRtcACMCodecParams *codecParams);
- WebRtc_Word16 InternalInitDecoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitDecoder(WebRtcACMCodecParams *codecParams);
-protected:
- WebRtc_Word16 DecodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType);
+ protected:
+ WebRtc_Word16 DecodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
- WebRtc_Word32 CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst);
+ WebRtc_Word32 CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
- void DestructEncoderSafe();
+ void DestructEncoderSafe();
- void DestructDecoderSafe();
+ void DestructDecoderSafe();
- WebRtc_Word16 InternalCreateEncoder();
+ WebRtc_Word16 InternalCreateEncoder();
- WebRtc_Word16 InternalCreateDecoder();
+ WebRtc_Word16 InternalCreateDecoder();
- void InternalDestructEncoderInst(
- void* ptrInst);
+ void InternalDestructEncoderInst(void* ptrInst);
};
-} // namespace webrtc
+} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_RED_H_
diff --git a/modules/audio_coding/main/source/acm_speex.cc b/modules/audio_coding/main/source/acm_speex.cc
index 14554d0..ae5f366 100644
--- a/modules/audio_coding/main/source/acm_speex.cc
+++ b/modules/audio_coding/main/source/acm_speex.cc
@@ -17,35 +17,9 @@
#include "webrtc_neteq_help_macros.h"
#ifdef WEBRTC_CODEC_SPEEX
- // NOTE! Speex is not included in the open-source package. The following
- // interface file is needed:
- //
- // /modules/audio_coding/codecs/speex/main/interface/speex_interface.h
- //
- // The API in the header file should match the one below.
- //
- // int16_t WebRtcSpeex_CreateEnc(SPEEX_encinst_t **SPEEXenc_inst,
- // int32_t fs);
- // int16_t WebRtcSpeex_FreeEnc(SPEEX_encinst_t *SPEEXenc_inst);
- // int16_t WebRtcSpeex_CreateDec(SPEEX_decinst_t **SPEEXdec_inst,
- // int32_t fs,
- // int16_t enh_enabled);
- // int16_t WebRtcSpeex_FreeDec(SPEEX_decinst_t *SPEEXdec_inst);
- // int16_t WebRtcSpeex_Encode(SPEEX_encinst_t *SPEEXenc_inst,
- // int16_t *speechIn,
- // int32_t rate);
- // int16_t WebRtcSpeex_EncoderInit(SPEEX_encinst_t *SPEEXenc_inst,
- // int16_t vbr, int16_t complexity,
- // int16_t vad_enable);
- // int16_t WebRtcSpeex_GetBitstream(SPEEX_encinst_t *SPEEXenc_inst,
- // int16_t *encoded);
- // int16_t WebRtcSpeex_DecodePlc(SPEEX_decinst_t *SPEEXdec_inst,
- // int16_t *decoded, int16_t noOfLostFrames);
- // int16_t WebRtcSpeex_Decode(SPEEX_decinst_t *SPEEXdec_inst,
- // int16_t *encoded, int16_t len,
- // int16_t *decoded, int16_t *speechType);
- // int16_t WebRtcSpeex_DecoderInit(SPEEX_decinst_t *SPEEXdec_inst);
- #include "speex_interface.h"
+// NOTE! Speex is not included in the open-source package. Modify this file or
+// your codec API to match the function calls and names of used Speex API file.
+#include "speex_interface.h"
#endif
namespace webrtc {
@@ -62,561 +36,431 @@
return;
}
-ACMSPEEX::~ACMSPEEX()
-{
- return;
+ACMSPEEX::~ACMSPEEX() {
+ return;
}
-WebRtc_Word16
-ACMSPEEX::InternalEncode(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16* /* bitStreamLenByte */)
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::InternalEncode(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */) {
+ return -1;
}
-WebRtc_Word16
-ACMSPEEX::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ return -1;
}
-WebRtc_Word16
-ACMSPEEX::EnableDTX()
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::EnableDTX() {
+ return -1;
}
-WebRtc_Word16
-ACMSPEEX::DisableDTX()
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::DisableDTX() {
+ return -1;
}
-WebRtc_Word16
-ACMSPEEX::InternalInitEncoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
}
-WebRtc_Word16
-ACMSPEEX::InternalInitDecoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ return -1;
}
-WebRtc_Word32
-ACMSPEEX::CodecDef(
- WebRtcNetEQ_CodecDef& /* codecDef */,
- const CodecInst& /* codecInst */)
-{
- return -1;
+WebRtc_Word32 ACMSPEEX::CodecDef(WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */) {
+ return -1;
}
-ACMGenericCodec*
-ACMSPEEX::CreateInstance(void)
-{
- return NULL;
+ACMGenericCodec* ACMSPEEX::CreateInstance(void) {
+ return NULL;
}
-WebRtc_Word16
-ACMSPEEX::InternalCreateEncoder()
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::InternalCreateEncoder() {
+ return -1;
}
-void
-ACMSPEEX::DestructEncoderSafe()
-{
- return;
+void ACMSPEEX::DestructEncoderSafe() {
+ return;
}
-
-WebRtc_Word16
-ACMSPEEX::InternalCreateDecoder()
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::InternalCreateDecoder() {
+ return -1;
}
-void
-ACMSPEEX::DestructDecoderSafe()
-{
- return;
+void ACMSPEEX::DestructDecoderSafe() {
+ return;
}
-WebRtc_Word16
-ACMSPEEX::SetBitRateSafe(
- const WebRtc_Word32 /* rate */)
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::SetBitRateSafe(const WebRtc_Word32 /* rate */) {
+ return -1;
}
-void
-ACMSPEEX::InternalDestructEncoderInst(
- void* /* ptrInst */)
-{
- return;
+void ACMSPEEX::InternalDestructEncoderInst(void* /* ptrInst */) {
+ return;
}
#ifdef UNUSEDSPEEX
-WebRtc_Word16
-ACMSPEEX::EnableVBR()
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::EnableVBR() {
+ return -1;
}
-WebRtc_Word16
-ACMSPEEX::DisableVBR()
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::DisableVBR() {
+ return -1;
}
-WebRtc_Word16
-ACMSPEEX::SetComplMode(
- WebRtc_Word16 mode)
-{
- return -1;
+WebRtc_Word16 ACMSPEEX::SetComplMode(WebRtc_Word16 mode) {
+ return -1;
}
#endif
-#else //===================== Actual Implementation =======================
+#else //===================== Actual Implementation =======================
-ACMSPEEX::ACMSPEEX(WebRtc_Word16 codecID):
-_encoderInstPtr(NULL),
-_decoderInstPtr(NULL)
-{
- _codecID = codecID;
+ACMSPEEX::ACMSPEEX(WebRtc_Word16 codecID)
+ : _encoderInstPtr(NULL),
+ _decoderInstPtr(NULL) {
+ _codecID = codecID;
- // Set sampling frequency, frame size and rate Speex
- if(_codecID == ACMCodecDB::kSPEEX8)
- {
- _samplingFrequency = 8000;
- _samplesIn20MsAudio = 160;
- _encodingRate = 11000;
- }
- else if(_codecID == ACMCodecDB::kSPEEX16)
- {
- _samplingFrequency = 16000;
- _samplesIn20MsAudio = 320;
- _encodingRate = 22000;
- }
- else
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Wrong codec id for Speex.");
+ // Set sampling frequency, frame size and rate Speex
+ if (_codecID == ACMCodecDB::kSPEEX8) {
+ _samplingFrequency = 8000;
+ _samplesIn20MsAudio = 160;
+ _encodingRate = 11000;
+ } else if (_codecID == ACMCodecDB::kSPEEX16) {
+ _samplingFrequency = 16000;
+ _samplesIn20MsAudio = 320;
+ _encodingRate = 22000;
+ } else {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Wrong codec id for Speex.");
- _samplingFrequency = -1;
- _samplesIn20MsAudio = -1;
- _encodingRate = -1;
+ _samplingFrequency = -1;
+ _samplesIn20MsAudio = -1;
+ _encodingRate = -1;
+ }
+
+ _hasInternalDTX = true;
+ _dtxEnabled = false;
+ _vbrEnabled = false;
+ _complMode = 3; // default complexity value
+
+ return;
+}
+
+ACMSPEEX::~ACMSPEEX() {
+ if (_encoderInstPtr != NULL) {
+ WebRtcSpeex_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if (_decoderInstPtr != NULL) {
+ WebRtcSpeex_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
+}
+
+WebRtc_Word16 ACMSPEEX::InternalEncode(WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte) {
+ WebRtc_Word16 status;
+ WebRtc_Word16 numEncodedSamples = 0;
+ WebRtc_Word16 n = 0;
+
+ while (numEncodedSamples < _frameLenSmpl) {
+ status = WebRtcSpeex_Encode(_encoderInstPtr, &_inAudio[_inAudioIxRead],
+ _encodingRate);
+
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _samplesIn20MsAudio;
+ numEncodedSamples += _samplesIn20MsAudio;
+
+ if (status < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error in Speex encoder");
+ return status;
}
- _hasInternalDTX = true;
+ // Update VAD, if internal DTX is used
+ if (_hasInternalDTX && _dtxEnabled) {
+ _vadLabel[n++] = status;
+ _vadLabel[n++] = status;
+ }
+
+ if (status == 0) {
+ // This frame is detected as inactive. We need send whatever
+ // encoded so far.
+ *bitStreamLenByte = WebRtcSpeex_GetBitstream(_encoderInstPtr,
+ (WebRtc_Word16*) bitStream);
+
+ return *bitStreamLenByte;
+ }
+ }
+
+ *bitStreamLenByte = WebRtcSpeex_GetBitstream(_encoderInstPtr,
+ (WebRtc_Word16*) bitStream);
+ return *bitStreamLenByte;
+}
+
+WebRtc_Word16 ACMSPEEX::DecodeSafe(WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */) {
+ return 0;
+}
+
+WebRtc_Word16 ACMSPEEX::EnableDTX() {
+ if (_dtxEnabled) {
+ return 0;
+ } else if (_encoderExist) { // check if encoder exist
+ // enable DTX
+ if (WebRtcSpeex_EncoderInit(_encoderInstPtr, (_vbrEnabled ? 1 : 0),
+ _complMode, 1) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot enable DTX for Speex");
+ return -1;
+ }
+ _dtxEnabled = true;
+ return 0;
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+WebRtc_Word16 ACMSPEEX::DisableDTX() {
+ if (!_dtxEnabled) {
+ return 0;
+ } else if (_encoderExist) { // check if encoder exist
+ // disable DTX
+ if (WebRtcSpeex_EncoderInit(_encoderInstPtr, (_vbrEnabled ? 1 : 0),
+ _complMode, 0) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot disable DTX for Speex");
+ return -1;
+ }
_dtxEnabled = false;
- _vbrEnabled = false;
- _complMode = 3; // default complexity value
-
- return;
-}
-
-ACMSPEEX::~ACMSPEEX()
-{
- if(_encoderInstPtr != NULL)
- {
- WebRtcSpeex_FreeEnc(_encoderInstPtr);
- _encoderInstPtr = NULL;
- }
- if(_decoderInstPtr != NULL)
- {
- WebRtcSpeex_FreeDec(_decoderInstPtr);
- _decoderInstPtr = NULL;
- }
- return;
-}
-
-WebRtc_Word16
-ACMSPEEX::InternalEncode(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16* bitStreamLenByte)
-{
- WebRtc_Word16 status;
- WebRtc_Word16 numEncodedSamples = 0;
- WebRtc_Word16 n = 0;
-
- while( numEncodedSamples < _frameLenSmpl)
- {
- status = WebRtcSpeex_Encode(_encoderInstPtr, &_inAudio[_inAudioIxRead],
- _encodingRate);
-
- // increment the read index this tell the caller that how far
- // we have gone forward in reading the audio buffer
- _inAudioIxRead += _samplesIn20MsAudio;
- numEncodedSamples += _samplesIn20MsAudio;
-
- if(status < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Error in Speex encoder");
- return status;
- }
-
- // Update VAD, if internal DTX is used
- if(_hasInternalDTX && _dtxEnabled)
- {
- _vadLabel[n++] = status;
- _vadLabel[n++] = status;
- }
-
- if(status == 0)
- {
- // This frame is detected as inactive. We need send whatever
- // encoded so far.
- *bitStreamLenByte = WebRtcSpeex_GetBitstream(_encoderInstPtr,
- (WebRtc_Word16*)bitStream);
-
- return *bitStreamLenByte;
- }
- }
-
- *bitStreamLenByte = WebRtcSpeex_GetBitstream(_encoderInstPtr,
- (WebRtc_Word16*)bitStream);
- return *bitStreamLenByte;
-}
-
-WebRtc_Word16
-ACMSPEEX::DecodeSafe(
- WebRtc_UWord8* /* bitStream */,
- WebRtc_Word16 /* bitStreamLenByte */,
- WebRtc_Word16* /* audio */,
- WebRtc_Word16* /* audioSamples */,
- WebRtc_Word8* /* speechType */)
-{
return 0;
-}
-
-WebRtc_Word16
-ACMSPEEX::EnableDTX()
-{
- if(_dtxEnabled)
- {
- return 0;
- }
- else if(_encoderExist) // check if encoder exist
- {
- // enable DTX
- if(WebRtcSpeex_EncoderInit(_encoderInstPtr, (_vbrEnabled ? 1:0), _complMode, 1) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Cannot enable DTX for Speex");
- return -1;
- }
- _dtxEnabled = true;
- return 0;
- }
- else
- {
- return -1;
- }
-
+ } else {
+ // encoder doesn't exists, therefore disabling is harmless
return 0;
+ }
+
+ return 0;
}
-WebRtc_Word16
-ACMSPEEX::DisableDTX()
-{
- if(!_dtxEnabled)
- {
- return 0;
- }
- else if(_encoderExist) // check if encoder exist
- {
- // disable DTX
- if(WebRtcSpeex_EncoderInit(_encoderInstPtr, (_vbrEnabled ? 1:0), _complMode, 0) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Cannot disable DTX for Speex");
- return -1;
- }
- _dtxEnabled = false;
- return 0;
- }
- else
- {
- // encoder doesn't exists, therefore disabling is harmless
- return 0;
- }
+WebRtc_Word16 ACMSPEEX::InternalInitEncoder(WebRtcACMCodecParams* codecParams) {
+ // sanity check
+ if (_encoderInstPtr == NULL) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot initialize Speex encoder, instance does not exist");
+ return -1;
+ }
+ WebRtc_Word16 status = SetBitRateSafe((codecParams->codecInstant).rate);
+ status +=
+ (WebRtcSpeex_EncoderInit(_encoderInstPtr, _vbrEnabled, _complMode,
+ ((codecParams->enableDTX) ? 1 : 0)) < 0) ?
+ -1 : 0;
+
+ if (status >= 0) {
return 0;
+ } else {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error in initialization of Speex encoder");
+ return -1;
+ }
}
-WebRtc_Word16
-ACMSPEEX::InternalInitEncoder(
- WebRtcACMCodecParams* codecParams)
-{
- // sanity check
- if (_encoderInstPtr == NULL)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Cannot initialize Speex encoder, instance does not exist");
- return -1;
- }
+WebRtc_Word16 ACMSPEEX::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */) {
+ WebRtc_Word16 status;
- WebRtc_Word16 status = SetBitRateSafe((codecParams->codecInstant).rate);
- status += (WebRtcSpeex_EncoderInit(_encoderInstPtr, _vbrEnabled, _complMode, ((codecParams->enableDTX)? 1:0)) < 0)? -1:0;
+ // sanity check
+ if (_decoderInstPtr == NULL) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot initialize Speex decoder, instance does not exist");
+ return -1;
+ }
+ status = ((WebRtcSpeex_DecoderInit(_decoderInstPtr) < 0) ? -1 : 0);
- if (status >= 0) {
- return 0;
- } else {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Error in initialization of Speex encoder");
- return -1;
- }
-}
-
-WebRtc_Word16
-ACMSPEEX::InternalInitDecoder(
- WebRtcACMCodecParams* /* codecParams */)
-{
- WebRtc_Word16 status;
-
- // sanity check
- if (_decoderInstPtr == NULL)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Cannot initialize Speex decoder, instance does not exist");
- return -1;
- }
- status = ((WebRtcSpeex_DecoderInit(_decoderInstPtr) < 0)? -1:0);
-
- if (status >= 0) {
- return 0;
- } else {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Error in initialization of Speex decoder");
- return -1;
- }
-}
-
-WebRtc_Word32
-ACMSPEEX::CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst)
-{
- if (!_decoderInitialized)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Error, Speex decoder is not initialized");
- return -1;
- }
-
- // Fill up the structure by calling
- // "SET_CODEC_PAR" & "SET_SPEEX_FUNCTION."
- // Then call NetEQ to add the codec to its
- // database.
-
- switch(_samplingFrequency)
- {
- case 8000:
- {
- SET_CODEC_PAR((codecDef), kDecoderSPEEX_8, codecInst.pltype,
- _decoderInstPtr, 8000);
- break;
- }
- case 16000:
- {
- SET_CODEC_PAR((codecDef), kDecoderSPEEX_16, codecInst.pltype,
- _decoderInstPtr, 16000);
- break;
- }
- default:
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Unsupported sampling frequency for Speex");
-
- return -1;
- }
- }
-
- SET_SPEEX_FUNCTIONS((codecDef));
+ if (status >= 0) {
return 0;
+ } else {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error in initialization of Speex decoder");
+ return -1;
+ }
}
-ACMGenericCodec*
-ACMSPEEX::CreateInstance(void)
-{
- return NULL;
-}
+WebRtc_Word32 ACMSPEEX::CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst) {
+ if (!_decoderInitialized) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error, Speex decoder is not initialized");
+ return -1;
+ }
-WebRtc_Word16
-ACMSPEEX::InternalCreateEncoder()
-{
- return WebRtcSpeex_CreateEnc(&_encoderInstPtr, _samplingFrequency);
-}
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_SPEEX_FUNCTION."
+ // Then call NetEQ to add the codec to its
+ // database.
-void
-ACMSPEEX::DestructEncoderSafe()
-{
- if(_encoderInstPtr != NULL)
- {
- WebRtcSpeex_FreeEnc(_encoderInstPtr);
- _encoderInstPtr = NULL;
+ switch (_samplingFrequency) {
+ case 8000: {
+ SET_CODEC_PAR((codecDef), kDecoderSPEEX_8, codecInst.pltype,
+ _decoderInstPtr, 8000);
+ break;
}
- // there is no encoder set the following
- _encoderExist = false;
- _encoderInitialized = false;
- _encodingRate = 0;
-}
-
-
-WebRtc_Word16
-ACMSPEEX::InternalCreateDecoder()
-{
- return WebRtcSpeex_CreateDec(&_decoderInstPtr, _samplingFrequency, 1);
-}
-
-void
-ACMSPEEX::DestructDecoderSafe()
-{
- if(_decoderInstPtr != NULL)
- {
- WebRtcSpeex_FreeDec(_decoderInstPtr);
- _decoderInstPtr = NULL;
+ case 16000: {
+ SET_CODEC_PAR((codecDef), kDecoderSPEEX_16, codecInst.pltype,
+ _decoderInstPtr, 16000);
+ break;
}
- // there is no encoder instance set the followings
- _decoderExist = false;
- _decoderInitialized = false;
+ default: {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Unsupported sampling frequency for Speex");
+
+ return -1;
+ }
+ }
+
+ SET_SPEEX_FUNCTIONS((codecDef));
+ return 0;
}
-WebRtc_Word16
-ACMSPEEX::SetBitRateSafe(
- const WebRtc_Word32 rate)
-{
- // Check if changed rate
- if (rate == _encodingRate) {
- return 0;
- } else if (rate > 2000) {
- _encodingRate = rate;
- _encoderParams.codecInstant.rate = rate;
- } else {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Unsupported encoding rate for Speex");
+ACMGenericCodec* ACMSPEEX::CreateInstance(void) {
+ return NULL;
+}
- return -1;
- }
+WebRtc_Word16 ACMSPEEX::InternalCreateEncoder() {
+ return WebRtcSpeex_CreateEnc(&_encoderInstPtr, _samplingFrequency);
+}
+void ACMSPEEX::DestructEncoderSafe() {
+ if (_encoderInstPtr != NULL) {
+ WebRtcSpeex_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ // there is no encoder set the following
+ _encoderExist = false;
+ _encoderInitialized = false;
+ _encodingRate = 0;
+}
+
+WebRtc_Word16 ACMSPEEX::InternalCreateDecoder() {
+ return WebRtcSpeex_CreateDec(&_decoderInstPtr, _samplingFrequency, 1);
+}
+
+void ACMSPEEX::DestructDecoderSafe() {
+ if (_decoderInstPtr != NULL) {
+ WebRtcSpeex_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ // there is no encoder instance set the followings
+ _decoderExist = false;
+ _decoderInitialized = false;
+}
+
+WebRtc_Word16 ACMSPEEX::SetBitRateSafe(const WebRtc_Word32 rate) {
+ // Check if changed rate
+ if (rate == _encodingRate) {
return 0;
+ } else if (rate > 2000) {
+ _encodingRate = rate;
+ _encoderParams.codecInstant.rate = rate;
+ } else {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Unsupported encoding rate for Speex");
+
+ return -1;
+ }
+
+ return 0;
}
-
-void
-ACMSPEEX::InternalDestructEncoderInst(
- void* ptrInst)
-{
- if(ptrInst != NULL)
- {
- WebRtcSpeex_FreeEnc((SPEEX_encinst_t_*)ptrInst);
- }
- return;
+void ACMSPEEX::InternalDestructEncoderInst(void* ptrInst) {
+ if (ptrInst != NULL) {
+ WebRtcSpeex_FreeEnc((SPEEX_encinst_t_*) ptrInst);
+ }
+ return;
}
#ifdef UNUSEDSPEEX
// This API is currently not in use. If requested to be able to enable/disable VBR
// an ACM API need to be added.
-WebRtc_Word16
-ACMSPEEX::EnableVBR()
-{
- if(_vbrEnabled)
- {
- return 0;
- }
- else if(_encoderExist) // check if encoder exist
- {
- // enable Variable Bit Rate (VBR)
- if(WebRtcSpeex_EncoderInit(_encoderInstPtr, 1, _complMode, (_dtxEnabled? 1:0)) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Cannot enable VBR mode for Speex");
+WebRtc_Word16 ACMSPEEX::EnableVBR() {
+ if (_vbrEnabled) {
+ return 0;
+ } else if (_encoderExist) // check if encoder exist
+ {
+ // enable Variable Bit Rate (VBR)
+ if (WebRtcSpeex_EncoderInit(_encoderInstPtr, 1, _complMode,
+ (_dtxEnabled ? 1 : 0)) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot enable VBR mode for Speex");
- return -1;
- }
- _vbrEnabled = true;
- return 0;
+ return -1;
}
- else
- {
- return -1;
- }
+ _vbrEnabled = true;
+ return 0;
+ } else {
+ return -1;
+ }
}
+// This API is currently not in use. If requested to be able to enable/disable
+// VBR an ACM API need to be added.
+WebRtc_Word16 ACMSPEEX::DisableVBR() {
+ if (!_vbrEnabled) {
+ return 0;
+ } else if (_encoderExist) { // check if encoder exist
+ // disable DTX
+ if (WebRtcSpeex_EncoderInit(_encoderInstPtr, 0, _complMode,
+ (_dtxEnabled ? 1 : 0)) < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot disable DTX for Speex");
-// This API is currently not in use. If requested to be able to enable/disable VBR
-// an ACM API need to be added.
-WebRtc_Word16
-ACMSPEEX::DisableVBR()
-{
- if(!_vbrEnabled)
- {
- return 0;
+ return -1;
}
- else if(_encoderExist) // check if encoder exist
- {
- // disable DTX
- if(WebRtcSpeex_EncoderInit(_encoderInstPtr, 0, _complMode, (_dtxEnabled? 1:0)) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Cannot disable DTX for Speex");
-
- return -1;
- }
- _vbrEnabled = false;
- return 0;
- }
- else
- {
- // encoder doesn't exists, therefore disabling is harmless
- return 0;
- }
+ _vbrEnabled = false;
+ return 0;
+ } else {
+ // encoder doesn't exists, therefore disabling is harmless
+ return 0;
+ }
}
// This API is currently not in use. If requested to be able to set complexity
// an ACM API need to be added.
-WebRtc_Word16
-ACMSPEEX::SetComplMode(
- WebRtc_Word16 mode)
-{
- // Check if new mode
- if(mode == _complMode)
- {
- return 0;
+WebRtc_Word16 ACMSPEEX::SetComplMode(WebRtc_Word16 mode) {
+ // Check if new mode
+ if (mode == _complMode) {
+ return 0;
+ } else if (_encoderExist) { // check if encoder exist
+ // Set new mode
+ if (WebRtcSpeex_EncoderInit(_encoderInstPtr, 0, mode, (_dtxEnabled ? 1 : 0))
+ < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error in complexity mode for Speex");
+ return -1;
}
- else if(_encoderExist) // check if encoder exist
- {
- // Set new mode
- if(WebRtcSpeex_EncoderInit(_encoderInstPtr, 0, mode, (_dtxEnabled? 1:0)) < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
- "Error in complexity mode for Speex");
- return -1;
- }
- _complMode = mode;
- return 0;
- }
- else
- {
- // encoder doesn't exists, therefore disabling is harmless
- return 0;
- }
+ _complMode = mode;
+ return 0;
+ } else {
+ // encoder doesn't exists, therefore disabling is harmless
+ return 0;
+ }
}
#endif
#endif
-} // namespace webrtc
+} // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_speex.h b/modules/audio_coding/main/source/acm_speex.h
index aabcec1..ee31daf 100644
--- a/modules/audio_coding/main/source/acm_speex.h
+++ b/modules/audio_coding/main/source/acm_speex.h
@@ -19,72 +19,63 @@
namespace webrtc {
-class ACMSPEEX : public ACMGenericCodec
-{
-public:
- ACMSPEEX(WebRtc_Word16 codecID);
- ~ACMSPEEX();
- // for FEC
- ACMGenericCodec* CreateInstance(void);
+class ACMSPEEX : public ACMGenericCodec {
+ public:
+ ACMSPEEX(WebRtc_Word16 codecID);
+ ~ACMSPEEX();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
- WebRtc_Word16 InternalEncode(
- WebRtc_UWord8* bitstream,
- WebRtc_Word16* bitStreamLenByte);
+ WebRtc_Word16 InternalEncode(WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
- WebRtc_Word16 InternalInitEncoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitEncoder(WebRtcACMCodecParams *codecParams);
- WebRtc_Word16 InternalInitDecoder(
- WebRtcACMCodecParams *codecParams);
+ WebRtc_Word16 InternalInitDecoder(WebRtcACMCodecParams *codecParams);
-protected:
- WebRtc_Word16 DecodeSafe(
- WebRtc_UWord8* bitStream,
- WebRtc_Word16 bitStreamLenByte,
- WebRtc_Word16* audio,
- WebRtc_Word16* audioSamples,
- WebRtc_Word8* speechType);
+ protected:
+ WebRtc_Word16 DecodeSafe(WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
- WebRtc_Word32 CodecDef(
- WebRtcNetEQ_CodecDef& codecDef,
- const CodecInst& codecInst);
+ WebRtc_Word32 CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
- void DestructEncoderSafe();
+ void DestructEncoderSafe();
- void DestructDecoderSafe();
+ void DestructDecoderSafe();
- WebRtc_Word16 InternalCreateEncoder();
+ WebRtc_Word16 InternalCreateEncoder();
- WebRtc_Word16 InternalCreateDecoder();
+ WebRtc_Word16 InternalCreateDecoder();
- void InternalDestructEncoderInst(
- void* ptrInst);
+ void InternalDestructEncoderInst(void* ptrInst);
- WebRtc_Word16 SetBitRateSafe(
- const WebRtc_Word32 rate);
+ WebRtc_Word16 SetBitRateSafe(const WebRtc_Word32 rate);
- WebRtc_Word16 EnableDTX();
+ WebRtc_Word16 EnableDTX();
- WebRtc_Word16 DisableDTX();
+ WebRtc_Word16 DisableDTX();
#ifdef UNUSEDSPEEX
- WebRtc_Word16 EnableVBR();
+ WebRtc_Word16 EnableVBR();
- WebRtc_Word16 DisableVBR();
+ WebRtc_Word16 DisableVBR();
- WebRtc_Word16 SetComplMode(
- WebRtc_Word16 mode);
+ WebRtc_Word16 SetComplMode(WebRtc_Word16 mode);
#endif
- SPEEX_encinst_t_* _encoderInstPtr;
- SPEEX_decinst_t_* _decoderInstPtr;
- WebRtc_Word16 _complMode;
- bool _vbrEnabled;
- WebRtc_Word32 _encodingRate;
- WebRtc_Word16 _samplingFrequency;
- WebRtc_UWord16 _samplesIn20MsAudio;
+ SPEEX_encinst_t_* _encoderInstPtr;
+ SPEEX_decinst_t_* _decoderInstPtr;
+ WebRtc_Word16 _complMode;
+ bool _vbrEnabled;
+ WebRtc_Word32 _encodingRate;
+ WebRtc_Word16 _samplingFrequency;
+ WebRtc_UWord16 _samplesIn20MsAudio;
};
-} // namespace webrtc
+} // namespace webrtc
#endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_SPEEX_H_
diff --git a/modules/audio_coding/main/source/audio_coding_module.cc b/modules/audio_coding/main/source/audio_coding_module.cc
index 4fe6dad..0efc8b7 100644
--- a/modules/audio_coding/main/source/audio_coding_module.cc
+++ b/modules/audio_coding/main/source/audio_coding_module.cc
@@ -8,51 +8,38 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-
#include "acm_dtmf_detection.h"
#include "audio_coding_module.h"
#include "audio_coding_module_impl.h"
#include "trace.h"
-namespace webrtc
-{
+namespace webrtc {
// Create module
-AudioCodingModule*
-AudioCodingModule::Create(
- const WebRtc_Word32 id)
-{
- return new AudioCodingModuleImpl(id);
+AudioCodingModule* AudioCodingModule::Create(const WebRtc_Word32 id) {
+ return new AudioCodingModuleImpl(id);
}
// Destroy module
-void
-AudioCodingModule::Destroy(
- AudioCodingModule* module)
-{
- delete static_cast<AudioCodingModuleImpl*> (module);
+void AudioCodingModule::Destroy(AudioCodingModule* module) {
+ delete static_cast<AudioCodingModuleImpl*>(module);
}
// Get number of supported codecs
-WebRtc_UWord8 AudioCodingModule::NumberOfCodecs()
-{
- return static_cast<WebRtc_UWord8>(ACMCodecDB::kNumCodecs);
+WebRtc_UWord8 AudioCodingModule::NumberOfCodecs() {
+ return static_cast<WebRtc_UWord8>(ACMCodecDB::kNumCodecs);
}
// Get supported codec param with id
-WebRtc_Word32
-AudioCodingModule::Codec(
- const WebRtc_UWord8 listId,
- CodecInst& codec)
-{
- // Get the codec settings for the codec with the given list ID
- return ACMCodecDB::Codec(listId, &codec);
+WebRtc_Word32 AudioCodingModule::Codec(const WebRtc_UWord8 listId,
+ CodecInst& codec) {
+ // Get the codec settings for the codec with the given list ID
+ return ACMCodecDB::Codec(listId, &codec);
}
// Get supported codec Param with name, frequency and number of channels.
WebRtc_Word32 AudioCodingModule::Codec(const char* payload_name,
- CodecInst& codec,
- int sampling_freq_hz,
+ CodecInst& codec, int sampling_freq_hz,
int channels) {
int codec_id;
@@ -62,10 +49,10 @@
// We couldn't find a matching codec, set the parameterss to unacceptable
// values and return.
codec.plname[0] = '\0';
- codec.pltype = -1;
- codec.pacsize = 0;
- codec.rate = 0;
- codec.plfreq = 0;
+ codec.pltype = -1;
+ codec.pacsize = 0;
+ codec.rate = 0;
+ codec.plfreq = 0;
return -1;
}
@@ -77,30 +64,23 @@
// Get supported codec Index with name, frequency and number of channels.
WebRtc_Word32 AudioCodingModule::Codec(const char* payload_name,
- int sampling_freq_hz,
- int channels) {
+ int sampling_freq_hz, int channels) {
return ACMCodecDB::CodecId(payload_name, sampling_freq_hz, channels);
}
// Checks the validity of the parameters of the given codec
-bool
-AudioCodingModule::IsCodecValid(
- const CodecInst& codec)
-{
- int mirrorID;
- char errMsg[500];
+bool AudioCodingModule::IsCodecValid(const CodecInst& codec) {
+ int mirrorID;
+ char errMsg[500];
- int codecNumber = ACMCodecDB::CodecNumber(&codec, &mirrorID, errMsg, 500);
+ int codecNumber = ACMCodecDB::CodecNumber(&codec, &mirrorID, errMsg, 500);
- if(codecNumber < 0)
- {
- WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1, errMsg);
- return false;
- }
- else
- {
- return true;
- }
+ if (codecNumber < 0) {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1, errMsg);
+ return false;
+ } else {
+ return true;
+ }
}
-} // namespace webrtc
+} // namespace webrtc