git-svn-id: http://webrtc.googlecode.com/svn/trunk@156 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/src/modules/audio_coding/main/OWNERS b/src/modules/audio_coding/main/OWNERS
new file mode 100644
index 0000000..a7220e7
--- /dev/null
+++ b/src/modules/audio_coding/main/OWNERS
@@ -0,0 +1,3 @@
+tlegrand@google.com
+turajs@google.com
+jks@google.com
diff --git a/src/modules/audio_coding/main/interface/audio_coding_module.h b/src/modules/audio_coding/main/interface/audio_coding_module.h
new file mode 100644
index 0000000..7164e29
--- /dev/null
+++ b/src/modules/audio_coding/main/interface/audio_coding_module.h
@@ -0,0 +1,1076 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_CODING_MODULE_H
+#define AUDIO_CODING_MODULE_H
+
+#include "module.h"
+#include "audio_coding_module_typedefs.h"
+#include "module_common_types.h"
+
+
+namespace webrtc
+{
+
+// forward declarations
+struct CodecInst;
+
+
+#define WEBRTC_10MS_PCM_AUDIO 960 // 16 bits super wideband 48 Khz
+
+
+// Callback class used for sending data ready to be packetized
+class AudioPacketizationCallback
+{
+public:
+ virtual ~AudioPacketizationCallback() {}
+
+ virtual WebRtc_Word32 SendData(
+ FrameType frameType,
+ WebRtc_UWord8 payloadType,
+ WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation) = 0;
+};
+
+// Callback class used for inband Dtmf detection
+class AudioCodingFeedback
+{
+public:
+ virtual ~AudioCodingFeedback() {}
+
+ virtual WebRtc_Word32 IncomingDtmf(
+ const WebRtc_UWord8 digitDtmf,
+ const bool end) = 0;
+};
+
+// Callback class used for reporting VAD decision
+class ACMVADCallback
+{
+public:
+ virtual ~ACMVADCallback() {}
+ virtual WebRtc_Word32 InFrameType(
+ WebRtc_Word16 frameType) = 0;
+};
+
+// Callback class used for reporting receiver statistics
+class ACMVQMonCallback
+{
+public:
+ virtual ~ACMVQMonCallback() {}
+
+ virtual WebRtc_Word32 NetEqStatistics(
+ const WebRtc_Word32 id, // current ACM id
+ const WebRtc_UWord16 MIUsValid, // valid voice duration in ms
+ const WebRtc_UWord16 MIUsReplaced, // concealed voice duration in ms
+ const WebRtc_UWord8 eventFlags, // concealed voice flags
+ const WebRtc_UWord16 delayMS) = 0; // average delay in ms
+};
+
+class AudioCodingModule: public Module
+{
+protected:
+ AudioCodingModule(){}
+ virtual ~AudioCodingModule(){}
+
+public:
+ ///////////////////////////////////////////////////////////////////////////
+ // Creation and destruction of a ACM
+ //
+ static AudioCodingModule* Create(
+ const WebRtc_Word32 id);
+
+ static void Destroy(
+ AudioCodingModule* module);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Utility functions
+ //
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 GetVersion()
+ // Returns version of the module and its components.
+ //
+ // Outputs:
+ // -version : a buffer that the version string is stored.
+ // -remainBuffBytes : remaining size of the buffer "version" in
+ // bytes, excluding terminating-null.
+ // -position : the first character of the ACM version will
+ // written to version[position] and so on.
+ //
+ // Return value:
+ // -1 if failed to write the whole version string,
+ // 0 if succeeded.
+ //
+ static WebRtc_Word32 GetVersion(
+ WebRtc_Word8* version,
+ WebRtc_UWord32& remainBuffBytes,
+ WebRtc_UWord32& position);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_UWord8 NumberOfCodecs()
+ // Returns number of supported codecs.
+ //
+ // Return value:
+ // number of supported codecs.
+ ///
+ static WebRtc_UWord8 NumberOfCodecs();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 Codec()
+ // Get supported codec with list number.
+ //
+ // Input:
+ // -listId : list number.
+ //
+ // Output:
+ // -codec : a structure where the parameters of the codec,
+ // given by list number is written to.
+ //
+ // Return value:
+ // -1 if the list number (listId) is invalid.
+ // 0 if succeeded.
+ //
+ static WebRtc_Word32 Codec(
+ const WebRtc_UWord8 listId,
+ CodecInst& codec);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 Codec()
+ // Get supported codec with the given codec name and sampling frequency.
+ // If the sampling frequency is -1 then the search will be only based on
+ // codec name.
+ //
+ // Input:
+ // -payloadName : name of the codec.
+ // -samplingFreqHz : samling frequency of the codec.
+ //
+ // Output:
+ // -codec : a structure where the parameters of the codec,
+ // given by name is written to.
+ //
+ // Return value:
+ // -1 if the list number (listId) is invalid.
+ // 0 if succeeded.
+ //
+ static WebRtc_Word32 Codec(
+ const WebRtc_Word8* payloadName,
+ CodecInst& codec,
+ const WebRtc_Word32 samplingFreqHz = -1);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 Codec()
+ //
+ // Returns the list number of the given codec name and sampling frequency.
+ // If the sampling frequency is -1 then the search will be only based on
+ // codec name.
+ //
+ // Input:
+ // -payloadName : name of the codec.
+ // -samplingFreqHz : samling frequency of the codec.
+ //
+ // Return value:
+ // if the codec is found, the index of the codec in the list,
+ // -1 if the codec is not found.
+ //
+ static WebRtc_Word32 Codec(
+ const WebRtc_Word8* payloadName,
+ const WebRtc_Word32 samplingFreqHz = -1);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // bool IsCodecValid()
+ // Checks the validity of the parameters of the given codec.
+ //
+ // Input:
+ // -codec : the structur which keeps the parameters of the
+ // codec.
+ //
+ // Reurn value:
+ // true if the parameters are valid,
+ // false if any parameter is not valid.
+ //
+ static bool IsCodecValid(const CodecInst& codec);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Sender
+ //
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 InitializeSender()
+ // Any encoder-related state of ACM will be initialized to the
+ // same state when ACM is created. This will not interrupt or
+ // effect decoding functionality of ACM. ACM will lose all the
+ // encoding-related settings by calling this function.
+ // For instance, a send codec has to be registered again.
+ //
+ // Return value:
+ // -1 if failed to initialize,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word32 InitializeSender() = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 ResetEncoder()
+ // This API resets the states of encoder. All the encoder settings, such as
+ // send-codec or VAD/DTX, will be preserved.
+ //
+ // Return value:
+ // -1 if failed to initialize,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word32 ResetEncoder() = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 RegisterSendCodec()
+ // Registers a codec, specified by "sendCodec," as sending codec.
+ // This API can be called multiple of times to register Codec. The last codec
+ // registered overwrites the previous ones.
+ // The API can also be used to change payload type for CNG and RED, which are
+ // registered by default to default payload types.
+ // Note that registering CNG and RED won't overwrite speech codecs.
+ // This API can be called to set/change the send payload-type, frame-size
+ // or encoding rate (if applicable for the codec).
+ //
+ // Input:
+ // -sendCodec : Parameters of the codec to be registered, c.f.
+ // common_types.h for the definition of
+ // CodecInst.
+ //
+ // Return value:
+ // -1 if failed to initialize,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word32 RegisterSendCodec(
+ const CodecInst& sendCodec) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SendCodec()
+ // Get parameters for the codec currently registered as send codec.
+ //
+ // Output:
+ // -currentSendCodec : parameters of the send codec.
+ //
+ // Return value:
+ // -1 if failed to get send codec,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word32 SendCodec(
+ CodecInst& currentSendCodec) const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SendFrequency()
+ // Get the sampling frequency of the current encoder in Hertz.
+ //
+ // Return value:
+ // positive; sampling frequency [Hz] of the current encoder.
+ // -1 if an error has happened.
+ //
+ virtual WebRtc_Word32 SendFrequency() const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 Bitrate()
+ // Get encoding bit-rate in bits per second.
+ //
+ // Return value:
+ // positive; encoding rate in bits/sec,
+ // -1 if an error is happened.
+ //
+ virtual WebRtc_Word32 SendBitrate() const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SetReceivedEstimatedBandwidth()
+ // Set available bandwidth [bits/sec] of the up-link channel.
+ // This information is used for traffic shaping, and is currently only
+ // supported if iSAC is the send codec.
+ //
+ // Input:
+ // -bw : bandwidth in bits/sec estimated for
+ // up-link.
+ // Return value
+ // -1 if error occurred in setting the bandwidth,
+ // 0 bandwidth is set successfully.
+ //
+ virtual WebRtc_Word32 SetReceivedEstimatedBandwidth(
+ const WebRtc_Word32 bw) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 RegisterTransportCallback()
+ // Register a transport callback which will be called to deliver
+ // the encoded buffers whenever Process() is called and a
+ // bit-stream is ready.
+ //
+ // Input:
+ // -transport : pointer to the callback class
+ // transport->SendData() is called whenever
+ // Process() is called and bit-stream is ready
+ // to deliver.
+ //
+ // Return value:
+ // -1 if the transport callback could not be registered
+ // 0 if registration is successful.
+ //
+ virtual WebRtc_Word32 RegisterTransportCallback(
+ AudioPacketizationCallback* transport) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 Add10MsData()
+ // Add 10MS of raw (PCM) audio data to the encoder. If the sampling
+ // frequency of the audio does not match the sampling frequency of the
+ // current encoder ACM will resample the audio.
+ //
+ // Input:
+ // -audioFrame : the input audio frame, containing raw audio
+ // sampling frequency etc.,
+ // c.f. module_common_types.h for definition of
+ // AudioFrame.
+ //
+ // Return value:
+ // 0 successfully added the frame.
+ // -1 some error occurred and data is not added.
+ // < -1 to add the frame to the buffer n samples had to be
+ // overwritten, -n is the return value in this case.
+ //
+ virtual WebRtc_Word32 Add10MsData(
+ const AudioFrame& audioFrame) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // (FEC) Forward Error Correction
+ //
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SetFECStatus(const bool enable)
+ // configure FEC status i.e. on/off.
+ //
+ // RFC 2198 describes a solution which has a single payload type which
+ // signifies a packet with redundancy. That packet then becomes a container,
+ // encapsulating multiple payloads into a single RTP packet.
+ // Such a scheme is flexible, since any amount of redundancy may be
+ // encapsulated within a single packet. There is, however, a small overhead
+ // since each encapsulated payload must be preceded by a header indicating
+ // the type of data enclosed.
+ //
+ // This means that FEC is actually a RED scheme.
+ //
+ // Input:
+ // -enableFEC : if true FEC is enabled, otherwise FEC is
+ // disabled.
+ //
+ // Return value:
+ // -1 if failed to set FEC status,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word32 SetFECStatus(
+ const bool enableFEC) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // bool FECStatus()
+ // Get FEC status
+ //
+ // Return value
+ // true if FEC is enabled,
+ // false if FEC is disabled.
+ //
+ virtual bool FECStatus() const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // (VAD) Voice Activity Detection
+ //
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SetVAD()
+ // If DTX is enabled & the codec does not have internal DTX/VAD
+ // WebRtc VAD will be automatically enabled and 'enableVAD' is ignored.
+ //
+ // If DTX is disabled but VAD is enabled no DTX packets are send,
+ // regardless of whether the codec has internal DTX/VAD or not. In this
+ // case, WebRtc VAD is running to label frames as active/in-active.
+ //
+ // Inputs:
+ // -enableDTX : if true DTX is enabled,
+ // otherwise DTX is disabled.
+ // -enableVAD : if true VAD is enabled,
+ // otherwise VAD is disabled.
+ // -vadMode : determines the aggressiveness of VAD. A more
+ // aggressive mode results in more frames labeled
+ // as in-active, c.f. definition of
+ // ACMVADMode in audio_coding_module_typedefs.h
+ // for valid values.
+ //
+ // Return value:
+ // -1 if failed to set up VAD/DTX,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word32 SetVAD(
+ const bool enableDTX = true,
+ const bool enableVAD = false,
+ const ACMVADMode vadMode = VADNormal) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 VAD()
+ // Get VAD status.
+ //
+ // Outputs:
+ // -dtxEnabled : is set to true if DTX is enabled, otherwise
+ // is set to false.
+ // -vadEnabled : is set to true if VAD is enabled, otherwise
+ // is set to false.
+ // -vadMode : is set to the current aggressiveness of VAD.
+ //
+ // Return value:
+ // -1 if fails to retrieve the setting of DTX/VAD,
+ // 0 if succeeeded.
+ //
+ virtual WebRtc_Word32 VAD(
+ bool& dtxEnabled,
+ bool& vadEnabled,
+ ACMVADMode& vadMode) const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 ReplaceInternalDTXWithWebRtc()
+ // Used to replace codec internal DTX scheme with WebRtc. This is only
+ // supported for G729, where this call replaces AnnexB with WebRtc DTX.
+ //
+ // Input:
+ // -useWebRtcDTX : if false (default) the codec built-in DTX/VAD
+ // scheme is used, otherwise the internal DTX is
+ // replaced with WebRtc DTX/VAD.
+ //
+ // Return value:
+ // -1 if failed to replace codec internal DTX with WebRtc,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word32 ReplaceInternalDTXWithWebRtc(
+ const bool useWebRtcDTX = false) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 IsInternalDTXReplacedWithWebRtc()
+ // Get status if the codec internal DTX (when such exists) is replaced with
+ // WebRtc DTX. This is only supported for G729.
+ //
+ // Output:
+ // -usesWebRtcDTX : is set to true if the codec internal DTX is
+ // replaced with WebRtc DTX/VAD, otherwise it is set
+ // to false.
+ //
+ // Return value:
+ // -1 if failed to determine if codec internal DTX is replaced with WebRtc,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word32 IsInternalDTXReplacedWithWebRtc(
+ bool& usesWebRtcDTX) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 RegisterVADCallback()
+ // Call this method to register a callback function which is called
+ // any time that ACM encounters an empty frame. That is a frame which is
+ // recognized inactive. Depending on the codec WebRtc VAD or internal codec
+ // VAD is employed to identify a frame as active/inactive.
+ //
+ // Input:
+ // -vadCallback : pointer to a callback function.
+ //
+ // Return value:
+ // -1 if failed to register the callback function.
+ // 0 if the callback function is registered successfully.
+ //
+ virtual WebRtc_Word32 RegisterVADCallback(
+ ACMVADCallback* vadCallback) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Receiver
+ //
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 InitializeReceiver()
+ // Any decoder-related state of ACM will be initialized to the
+ // same state when ACM is created. This will not interrupt or
+ // effect encoding functionality of ACM. ACM would lose all the
+ // decoding-related settings by calling this function.
+ // For instance, all registered codecs are deleted and have to be
+ // registered again.
+ //
+ // Return value:
+ // -1 if failed to initialize,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word32 InitializeReceiver() = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 ResetDecoder()
+ // This API resets the states of decoders. ACM will not lose any
+ // decoder-related settings, such as registered codecs.
+ //
+ // Return value:
+ // -1 if failed to initialize,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word32 ResetDecoder() = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 ReceiveFrequency()
+ // Get sampling frequency of the last received payload.
+ //
+ // Return value:
+ // non-negative the sampling frequency in Hertz.
+ // -1 if an error has occurred.
+ //
+ virtual WebRtc_Word32 ReceiveFrequency() const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 PlayoutFrequency()
+ // Get sampling frequency of audio played out.
+ //
+ // Return value:
+ // the sampling frequency in Hertz.
+ //
+ virtual WebRtc_Word32 PlayoutFrequency() const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 RegisterReceiveCodec()
+ // Register possible decoders, can be called multiple times for
+ // codecs, CNG-NB, CNG-WB, CNG-SWB, AVT and RED.
+ //
+ // Input:
+ // -receiveCodec : parameters of the codec to be registered, c.f.
+ // common_types.h for the definition of
+ // CodecInst.
+ //
+ // Return value:
+ // -1 if failed to register the codec
+ // 0 if the codec registered successfully.
+ //
+ virtual WebRtc_Word32 RegisterReceiveCodec(
+ const CodecInst& receiveCodec) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 UnregisterReceiveCodec()
+ // Unregister the codec currently registered with a specific payload type
+ // from the list of possible receive codecs.
+ //
+ // Input:
+ // -payloadType : The number representing the payload type to
+ // unregister.
+ //
+ // Output:
+ // -1 if the unregistration fails.
+ // 0 if the given codec is successfully unregistered.
+ //
+ virtual WebRtc_Word32 UnregisterReceiveCodec(
+ const WebRtc_Word16 receiveCodec) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 ReceiveCodec()
+ // Get the codec associated with last received payload.
+ //
+ // Output:
+ // -currRcvCodec : parameters of the codec associated with the last
+ // received payload, c.f. common_types.h for
+ // the definition of CodecInst.
+ //
+ // Return value:
+ // -1 if failed to retrieve the codec,
+ // 0 if the codec is successfully retrieved.
+ //
+ virtual WebRtc_Word32 ReceiveCodec(
+ CodecInst& currRcvCodec) const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 IncomingPacket()
+ // Call this function to insert a parsed RTP packet into ACM.
+ //
+ // Inputs:
+ // -incomingPayload : received payload.
+ // -payloadLengthByte : the length of payload in bytes.
+ // -rtpInfo : the relevant information retrieved from RTP
+ // header.
+ //
+ // Return value:
+ // -1 if failed to push in the payload
+ // 0 if payload is successfully pushed in.
+ //
+ virtual WebRtc_Word32 IncomingPacket(
+ const WebRtc_Word8* incomingPayload,
+ const WebRtc_Word32 payloadLengthByte,
+ const WebRtcRTPHeader& rtpInfo) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 IncomingPayload()
+ // Call this API to push incoming payloads when there is no rtp-info.
+ // The rtp-info will be created in ACM. One usage for this API is when
+ // pre-encoded files are pushed in ACM
+ //
+ // Inputs:
+ // -incomingPayload : received payload.
+ // -payloadLenghtByte : the length, in bytes, of the received payload.
+ // -payloadType : the payload-type. This specifies which codec has
+ // to be used to decode the payload.
+ // -timestamp : send timestamp of the payload. ACM starts with
+ // a random value and increment it by the
+ // packet-size, which is given when the codec in
+ // question is registered by RegisterReceiveCodec().
+ // Therefore, it is essential to have the timestamp
+ // if the frame-size differ from the registered
+ // value or if the incoming payload contains DTX
+ // packets.
+ //
+ // Return value:
+ // -1 if failed to push in the payload
+ // 0 if payload is successfully pushed in.
+ //
+ virtual WebRtc_Word32 IncomingPayload(
+ const WebRtc_Word8* incomingPayload,
+ const WebRtc_Word32 payloadLengthByte,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timestamp = 0) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SetMinimumPlayoutDelay()
+ // Set Minimum playout delay, used for lip-sync.
+ //
+ // Input:
+ // -timeMs : minimum delay in milliseconds.
+ //
+ // Return value:
+ // -1 if failed to set the delay,
+ // 0 if the minimum delay is set.
+ //
+ virtual WebRtc_Word32 SetMinimumPlayoutDelay(
+ const WebRtc_Word32 timeMs) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 Delay()
+ // Get the current playout delay.
+ //
+ // Output:
+ // - delayInMs : delay in millisecond
+ //
+ // Return value:
+ // -1 if failed to get the delay,
+ // 0 if succeeded to get the delay.
+ //
+ virtual WebRtc_Word32 Delay(WebRtc_UWord16& delayMs) const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 RegisterIncomingMessagesCallback()
+ // Used by the module to deliver messages to the codec module/application
+ // when a Dtmf tone is detected, as well as when it stopped.
+ //
+ // Inputs:
+ // -inMsgCallback : pointer to callback function which will be called
+ // if Dtmf is detected.
+ // -cpt : enables CPT (Call Progress Tone) detection for the
+ // specified country. c.f. definition of ACMCountries
+ // in audio_coding_module_typedefs.h for valid
+ // entries. The default value disables CPT
+ // detection.
+ //
+ // Return value:
+ // -1 if the message callback could not be registered
+ // 0 if registration is successful.
+ //
+ virtual WebRtc_Word32 RegisterIncomingMessagesCallback(
+ AudioCodingFeedback* inMsgCallback,
+ const ACMCountries cpt = ACMDisableCountryDetection) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SetDtmfPlayoutStatus()
+ // Configure Dtmf playout, i.e. whether out-of-band
+ // Dtmf tones are played or not.
+ //
+ // Input:
+ // -enable : if true to enable playout out-of-band Dtmf tones,
+ // false to disable.
+ //
+ // Return value:
+ // -1 if the method fails, e.g. Dtmf playout is not supported.
+ // 0 if the status is set successfully.
+ //
+ virtual WebRtc_Word32 SetDtmfPlayoutStatus(const bool enable) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // bool DtmfPlayoutStatus()
+ // Get Dtmf playout status.
+ //
+ // Return value:
+ // true if out-of-band Dtmf tones are played,
+ // false if playout of Dtmf tones is disabled.
+ //
+ virtual bool DtmfPlayoutStatus() const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SetBackgroundNoiseMode()
+ // Sets the mode of the background noise playout in an event of long
+ // packetloss burst. For the valid modes see the declaration of
+ // ACMBackgroundNoiseMode in audio_coding_module_typedefs.h.
+ //
+ // Input:
+ // -mode : the mode for the background noise playout.
+ //
+ // Return value:
+ // -1 if failed to set the mode.
+ // 0 if succeeded in setting the mode.
+ //
+ virtual WebRtc_Word32 SetBackgroundNoiseMode(
+ const ACMBackgroundNoiseMode mode) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 BackgroundNoiseMode()
+ // Call this method to get the mode of the background noise playout.
+ // Playout of background noise is a result of a long packetloss burst.
+ // See ACMBackgroundNoiseMode in audio_coding_module_typedefs.h for
+ // possible modes.
+ //
+ // Output:
+ // -mode : a reference to ACMBackgroundNoiseMode enumerator.
+ //
+ // Return value:
+ // 0 if the output is a valid mode.
+ // -1 if ACM failed to output a valid mode.
+ //
+ virtual WebRtc_Word32 BackgroundNoiseMode(
+ ACMBackgroundNoiseMode& mode) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 PlayoutTimestamp()
+ // The send timestamp of an RTP packet is associated with the decoded
+ // audio of the packet in question. This function returns the timestamp of
+ // the latest audio obtained by calling PlayoutData10ms().
+ //
+ // Input:
+ // -timestamp : a reference to a WebRtc_UWord32 to receive the
+ // timestamp.
+ // Return value:
+ // 0 if the output is a correct timestamp.
+ // -1 if failed to output the correct timestamp.
+ //
+ //
+ virtual WebRtc_Word32 PlayoutTimestamp(
+ WebRtc_UWord32& timestamp) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 DecoderEstimatedBandwidth()
+ // Get the estimate of the Bandwidth, in bits/second, based on the incoming
+ // stream. This API is useful in one-way communication scenarios, where
+ // the bandwidth information is sent in an out-of-band fashion.
+ // Currently only supported if iSAC is registered as a reciever.
+ //
+ // Return value:
+ // >0 bandwidth in bits/second.
+ // -1 if failed to get a bandwidth estimate.
+ //
+ virtual WebRtc_Word32 DecoderEstimatedBandwidth() const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SetPlayoutMode()
+ // Call this API to set the playout mode. Playout mode could be optimized
+ // for i) voice, ii) FAX or iii) streaming. In Voice mode, NetEQ is
+ // optimized to deliver highest audio quality while maintaining a minimum
+ // delay. In FAX mode, NetEQ is optimized to have few delay changes as
+ // possible and maintain a constant delay, perhaps large relative to voice
+ // mode, to avoid PLC. In streaming mode, we tolerate a little more delay
+ // to acheive better jitter robustness.
+ //
+ // Input:
+ // -mode : playout mode. Possible inputs are:
+ // "voice",
+ // "fax" and
+ // "streaming".
+ //
+ // Return value:
+ // -1 if failed to set the mode,
+ // 0 if succeeding.
+ //
+ virtual WebRtc_Word32 SetPlayoutMode(
+ const AudioPlayoutMode mode) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // AudioPlayoutMode PlayoutMode()
+ // Get playout mode, i.e. whether it is speech, FAX or streaming. See
+ // audio_coding_module_typedefs.h for definition of AudioPlayoutMode.
+ //
+ // Return value:
+ // voice: is for voice output,
+ // fax: a mode that is optimized for receiving FAX signals.
+ // In this mode NetEq tries to maintain a constant high
+ // delay to avoid PLC if possible.
+ // streaming: a mode that is suitable for streaminq. In this mode we
+ // accept longer delay to improve jitter robustness.
+ //
+ virtual AudioPlayoutMode PlayoutMode() const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 PlayoutData10Ms(
+ // Get 10 milliseconds of raw audio data for playout, at the given sampling
+ // frequency. ACM will perform a resampling if required.
+ //
+ // Input:
+ // -desiredFreqHz : the desired sampling frequency, in Hertz, of the
+ // output audio. If set to -1, the function returns the
+ // audio at the current sampling frequency.
+ //
+ // Output:
+ // -audioFrame : output audio frame which contains raw audio data
+ // and other relevant parameters, c.f.
+ // module_common_types.h for the definition of
+ // AudioFrame.
+ //
+ // Return value:
+ // -1 if the function fails,
+ // 0 if the function succeeds.
+ //
+ virtual WebRtc_Word32 PlayoutData10Ms(
+ const WebRtc_Word32 desiredFreqHz,
+ AudioFrame &audioFrame) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // (CNG) Comfort Noise Generation
+ // Generate comfort noise when receiving DTX packets
+ //
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 SetReceiveVADStatus()
+ // configure VAD status i.e. on/off on the incoming stream
+ // Running VAD on decoded audio is desired in some applications, e.g.
+ // conferencing.
+ //
+ // Input:
+ // -enable : true to enable VAD on incoming stream, and false
+ // to disable.
+ //
+ // Return value:
+ // -1 if failed to enable/disable VAD,
+ // 0 if succeded to enable/disable VAD.
+ //
+ virtual WebRtc_Word16 SetReceiveVADStatus(
+ const bool enable) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // bool ReceiveVADStatus()
+ // Call this API to get whether VAD is enabled on incoming stream or not.
+ // Running VAD on decoded audio is desired in some applications, e.g.
+ // conferencing.
+ //
+ // Return value:
+ // true if VAD is enabled on the incoming stream,
+ // false if VAD is disabled on the incoming stream.
+ //
+ virtual bool ReceiveVADStatus() const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 SetReceiveVADMode()
+ // Configure VAD aggressiveness on the incoming stream.
+ //
+ // Input:
+ // -mode : aggressiveness of the VAD on incoming stream.
+ // See audio_coding_module_typedefs.h for the
+ // definition of ACMVADMode, and possible
+ // values for aggressiveness.
+ //
+ // Return value:
+ // -1 if fails to set the mode,
+ // 0 if the mode is set successfully.
+ //
+ virtual WebRtc_Word16 SetReceiveVADMode(
+ const ACMVADMode mode) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // ACMVADMode ReceiveVADMode()
+ // Get VAD aggressiveness on the incoming stream.
+ //
+ // Return value:
+ // aggressiveness of VAD, running on the incoming stream. A more
+ // aggressive mode means more audio frames will be labeled as in-active.
+ // See audio_coding_module_typedefs.h for the definition of
+ // ACMVADMode.
+ //
+ virtual ACMVADMode ReceiveVADMode() const = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Codec specific
+ //
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SetISACMaxRate()
+ // Set the maximum instantaneous rate of iSAC. For a payload of B bits
+ // with a frame-size of T sec the instantaneous rate is B/T bist per
+ // second. Therefore, (B/T < maxRateBitPerSec) and
+ // (B < maxPayloadLenBytes * 8) are always satisfied for iSAC payloads,
+ // c.f SetISACMaxPayloadSize().
+ //
+ // Input:
+ // -maxRateBitPerSec : maximum instantaneous bit-rate given in bits/sec.
+ //
+ // Return value:
+ // -1 if failed to set the maximum rate.
+ // 0 if the maximum rate is set successfully.
+ //
+ virtual WebRtc_Word32 SetISACMaxRate(
+ const WebRtc_UWord32 maxRateBitPerSec) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SetISACMaxPayloadSize()
+ // Set the maximum payload size of iSAC packets. No iSAC payload,
+ // regardless of its frame-size, may exceed the given limit. For
+ // an iSAC payload of size B bits and frame-size T sec we have;
+ // (B < maxPayloadLenBytes * 8) and (B/T < maxRateBitPerSec), c.f.
+ // SetISACMaxRate().
+ //
+ // Input:
+ // -maxPayloadLenBytes : maximum payload size in bytes.
+ //
+ // Return value:
+ // -1 if failed to set the maximm payload-size.
+ // 0 if the given linit is seet successfully.
+ //
+ virtual WebRtc_Word32 SetISACMaxPayloadSize(
+ const WebRtc_UWord16 maxPayloadLenBytes) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 ConfigISACBandwidthEstimator()
+ // Call this function to configure the bandwidth estimator of ISAC.
+ // During the adaptation of bit-rate, iSAC atomatically adjusts the
+ // frame-size (either 30 or 60 ms) to save on RTP header. The initial
+ // frame-size can be specified by the first argument. The configuration also
+ // regards the initial estimate of bandwidths. The estimator starts from
+ // this point and converges to the actual bottleneck. This is given by the
+ // second parameter. Furthermore, it is also possible to control the
+ // adaptation of frame-size. This is specified by the last parameter.
+ //
+ // Input:
+ // -initFrameSizeMsec : initial frame-size in milisecods. For iSAC-wb
+ // 30 ms and 60 ms (default) are acceptable values,
+ // and for iSAC-swb 30 ms is the only acceptable
+ // value. Zero indiates default value.
+ // -initRateBitPerSec : initial estimate of the bandwidth. Values
+ // between 10000 and 58000 are acceptable.
+ // -enforceFrameSize : if true, the frame-size will not be adapted.
+ //
+ // Return value:
+ // -1 if failed to configure the bandwidth estimator,
+ // 0 if the configuration was successfully applied.
+ //
+ virtual WebRtc_Word32 ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 initFrameSizeMsec,
+ const WebRtc_UWord16 initRateBitPerSec,
+ const bool enforceFrameSize = false) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // statistics
+ //
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 NetworkStatistics()
+ // Get network statistics.
+ //
+ // Input:
+ // -networkStatistics : a structure that contains network statistics.
+ //
+ // Return value:
+ // -1 if failed to set the network statistics,
+ // 0 if statistics are set successfully.
+ //
+ virtual WebRtc_Word32 NetworkStatistics(
+ ACMNetworkStatistics& networkStatistics) const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 JitterStatistics()
+ // Get the jitter statistics.
+ //
+ // Input:
+ // -jitterStatistics : the given jitter statistics.
+ //
+ // Return value:
+ // -1 if failed to set the jitter statistics,
+ // 0 if jitter statistics are set successfully.
+ //
+ virtual WebRtc_Word32 JitterStatistics(
+ ACMJitterStatistics& jitterStatistics) const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 PreferredBufferSize()
+ // Get the optimal buffer size calculated for the current network
+ // conditions.
+ //
+ // Output:
+ // -prefBuffSize : the optimal size of the jitter buffer in
+ // milliseconds.
+ //
+ // Return value:
+ // -1 if the preferred buffer size could not be computed,
+ // 0 if a valid buffer is computed successfully.
+ //
+ virtual WebRtc_Word32 PreferredBufferSize(
+ WebRtc_UWord16& prefBufSize) const = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 ResetJitterStatistics()
+ // Reset jitter statistics.
+ //
+ // Return value:
+ // -1 if failed to reset the statistics,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word32 ResetJitterStatistics() const = 0;
+};
+
+} // namespace webrtc
+
+#endif
+
diff --git a/src/modules/audio_coding/main/interface/audio_coding_module_typedefs.h b/src/modules/audio_coding/main/interface/audio_coding_module_typedefs.h
new file mode 100644
index 0000000..28b7a4e
--- /dev/null
+++ b/src/modules/audio_coding/main/interface/audio_coding_module_typedefs.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_CODING_MODULE_TYPEDEFS_H
+#define AUDIO_CODING_MODULE_TYPEDEFS_H
+
+#include "typedefs.h"
+
+namespace webrtc
+{
+
+///////////////////////////////////////////////////////////////////////////
+// enum AudioPlayoutMode
+// An enumerator for different playout modes.
+//
+// -voice : This is the standard mode for VoIP calls. The trade-off
+// between low delay and jitter robustness is optimized
+// for high-quality two-way communication.
+// NetEQs packet loss concealment and signal processing
+// capabilities are fully employed.
+// -fax : The fax mode is optimized for decodability of fax signals
+// rather than for perceived audio quality. When this mode
+// is selected, NetEQ will do as few delay changes as possible,
+// trying to maintain a high and constant delay. Meanwhile,
+// the packet loss concealment efforts are reduced.
+//
+// -streaming : In the case of one-way communication such as passive
+// conference participant, a webinar, or a streaming application,
+// this mode can be used to improve the jitter robustness at
+// the cost of increased delay.
+//
+enum AudioPlayoutMode
+{
+ voice = 0,
+ fax = 1,
+ streaming = 2
+};
+
+
+///////////////////////////////////////////////////////////////////////////
+// enum ACMSpeechType
+// An enumerator for possible labels of a decoded frame.
+//
+// -normal : a normal speech frame. If VAD is enabled on the
+// incoming stream this label indicate that the
+// frame is active.
+// -PLC : a PLC frame. The corresponding packet was lost
+// and this frame generated by PLC techniques.
+// -CNG : the frame is comfort noise. This happens if VAD
+// is enabled at the sender and we have received
+// SID.
+// -PLCCNG : PLC will fade to comfort noise if the duration
+// of PLC is long. This labels such a case.
+// -VADPassive : the VAD at the receiver recognizes this frame as
+// passive.
+//
+enum ACMSpeechType
+{
+ normal = 0,
+ PLC = 1,
+ CNG = 2,
+ PLCCNG = 3,
+ VADPassive = 4
+};
+
+
+///////////////////////////////////////////////////////////////////////////
+// enum ACMVADMode
+// An enumerator for aggressiveness of VAD
+// -VADNormal : least aggressive mode.
+// -VADLowBitrate : more aggressive than "VADNormal" to save on
+// bit-rate.
+// -VADAggr : an aggressive mode.
+// -VADVeryAggr : the most agressive mode.
+//
+enum ACMVADMode
+{
+ VADNormal = 0,
+ VADLowBitrate = 1,
+ VADAggr = 2,
+ VADVeryAggr = 3
+};
+
+
+///////////////////////////////////////////////////////////////////////////
+// enum ACMCountries
+// An enumerator for countries, used when enabling CPT for a specific country.
+//
+enum ACMCountries
+{
+ ACMDisableCountryDetection = -1, // disable CPT detection
+ ACMUSA = 0,
+ ACMJapan,
+ ACMCanada,
+ ACMFrance,
+ ACMGermany,
+ ACMAustria,
+ ACMBelgium,
+ ACMUK,
+ ACMCzech,
+ ACMDenmark,
+ ACMFinland,
+ ACMGreece,
+ ACMHungary,
+ ACMIceland ,
+ ACMIreland,
+ ACMItaly,
+ ACMLuxembourg,
+ ACMMexico,
+ ACMNorway,
+ ACMPoland,
+ ACMPortugal,
+ ACMSpain,
+ ACMSweden,
+ ACMTurkey,
+ ACMChina,
+ ACMHongkong,
+ ACMTaiwan,
+ ACMKorea,
+ ACMSingapore,
+ ACMNonStandard1 // non-standard countries
+};
+
+///////////////////////////////////////////////////////////////////////////
+// enum ACMAMRPackingFormat
+// An enumerator for different bit-packing format of AMR codec according to
+// RFC 3267.
+//
+// -AMRUndefined : undefined.
+// -AMRBandwidthEfficient : bandwidth-efficient mode.
+// -AMROctetAlligned : Octet-alligned mode.
+// -AMRFileStorage : file-storage mode.
+//
+enum ACMAMRPackingFormat
+{
+ AMRUndefined = -1,
+ AMRBandwidthEfficient = 0,
+ AMROctetAlligned = 1,
+ AMRFileStorage = 2
+};
+
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Struct containing network statistics
+//
+// -currentBufferSize : current jitter buffer size in ms
+// -preferredBufferSize : preferred (optimal) buffer size in ms
+// -currentPacketLossRate : loss rate (network + late) (in Q14)
+// -currentDiscardRate : late loss rate (in Q14)
+// -currentExpandRate : fraction (of original stream) of synthesized speech
+// inserted through expansion (in Q14)
+// -currentPreemptiveRate : fraction of synthesized speech inserted through
+// pre-emptive expansion (in Q14)
+// -currentAccelerateRate : fraction of data removed through acceleration (in Q14)
+typedef struct
+{
+ WebRtc_UWord16 currentBufferSize;
+ WebRtc_UWord16 preferredBufferSize;
+ WebRtc_UWord16 currentPacketLossRate;
+ WebRtc_UWord16 currentDiscardRate;
+ WebRtc_UWord16 currentExpandRate;
+ WebRtc_UWord16 currentPreemptiveRate;
+ WebRtc_UWord16 currentAccelerateRate;
+} ACMNetworkStatistics;
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Struct containing jitter statistics
+//
+// -jbMinSize : smallest Jitter Buffer size during call in ms
+// -jbMaxSize : largest Jitter Buffer size during call in ms
+// -jbAvgSize : the average JB size, measured over time - ms
+// -jbChangeCount : number of times the Jitter Buffer changed (using Accelerate or Pre-emptive Expand)
+// -lateLossMs : amount (in ms) of audio data received late
+// -accelerateMs : milliseconds removed to reduce jitter buffer size
+// -flushedMs : milliseconds discarded through buffer flushing
+// -generatedSilentMs : milliseconds of generated silence
+// -interpolatedVoiceMs : milliseconds of synthetic audio data (non-background noise)
+// -interpolatedSilentMs : milliseconds of synthetic audio data (background noise level)
+// -numExpandTiny : count of tiny expansions in output audio less than 250 ms*/
+// -numExpandSmall : count of small expansions in output audio 250 to 500 ms*/
+// -numExpandMedium : count of medium expansions in output audio 500 to 2000 ms*/
+// -numExpandLong : count of long expansions in output audio longer than 2000
+// -longestExpandDurationMs : duration of longest audio drop-out
+// -countIAT500ms : count of times we got small network outage (inter-arrival time in [500, 1000) ms)
+// -countIAT1000ms : count of times we got medium network outage (inter-arrival time in [1000, 2000) ms)
+// -countIAT2000ms : count of times we got large network outage (inter-arrival time >= 2000 ms)
+// -longestIATms : longest packet inter-arrival time in ms
+// -minPacketDelayMs : min time incoming Packet "waited" to be played
+// -maxPacketDelayMs : max time incoming Packet "waited" to be played
+// -avgPacketDelayMs : avg time incoming Packet "waited" to be played
+//
+typedef struct
+{
+ WebRtc_UWord32 jbMinSize;
+ WebRtc_UWord32 jbMaxSize;
+ WebRtc_UWord32 jbAvgSize;
+ WebRtc_UWord32 jbChangeCount;
+ WebRtc_UWord32 lateLossMs;
+ WebRtc_UWord32 accelerateMs;
+ WebRtc_UWord32 flushedMs;
+ WebRtc_UWord32 generatedSilentMs;
+ WebRtc_UWord32 interpolatedVoiceMs;
+ WebRtc_UWord32 interpolatedSilentMs;
+ WebRtc_UWord32 numExpandTiny;
+ WebRtc_UWord32 numExpandSmall;
+ WebRtc_UWord32 numExpandMedium;
+ WebRtc_UWord32 numExpandLong;
+ WebRtc_UWord32 longestExpandDurationMs;
+ WebRtc_UWord32 countIAT500ms;
+ WebRtc_UWord32 countIAT1000ms;
+ WebRtc_UWord32 countIAT2000ms;
+ WebRtc_UWord32 longestIATms;
+ WebRtc_UWord32 minPacketDelayMs;
+ WebRtc_UWord32 maxPacketDelayMs;
+ WebRtc_UWord32 avgPacketDelayMs;
+} ACMJitterStatistics;
+
+///////////////////////////////////////////////////////////////////////////
+//
+// Enumeration of background noise mode a mapping from NetEQ interface.
+//
+// -On : default "normal" behavior with eternal noise
+// -Fade : noise fades to zero after some time
+// -Off : background noise is always zero
+//
+enum ACMBackgroundNoiseMode
+{
+ On,
+ Fade,
+ Off
+};
+
+
+} // namespace webrtc
+
+#endif
diff --git a/src/modules/audio_coding/main/source/Android.mk b/src/modules/audio_coding/main/source/Android.mk
new file mode 100644
index 0000000..5074335
--- /dev/null
+++ b/src/modules/audio_coding/main/source/Android.mk
@@ -0,0 +1,90 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := libwebrtc_audio_coding
+LOCAL_MODULE_TAGS := optional
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+LOCAL_SRC_FILES := acm_amr.cc \
+ acm_amrwb.cc \
+ acm_cng.cc \
+ acm_codec_database.cc \
+ acm_dtmf_detection.cc \
+ acm_dtmf_playout.cc \
+ acm_g722.cc \
+ acm_g7221.cc \
+ acm_g7221c.cc \
+ acm_g729.cc \
+ acm_g7291.cc \
+ acm_generic_codec.cc \
+ acm_gsmfr.cc \
+ acm_ilbc.cc \
+ acm_isac.cc \
+ acm_neteq.cc \
+ acm_opus.cc \
+ acm_speex.cc \
+ acm_pcm16b.cc \
+ acm_pcma.cc \
+ acm_pcmu.cc \
+ acm_red.cc \
+ acm_resampler.cc \
+ audio_coding_module.cc \
+ audio_coding_module_impl.cc
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS :=
+MY_CFLAGS_C :=
+MY_DEFS := '-DNO_TCMALLOC' \
+ '-DNO_HEAPCHECKER' \
+ '-DWEBRTC_TARGET_PC' \
+ '-DWEBRTC_LINUX' \
+ '-DWEBRTC_THREAD_RR' \
+ '-DWEBRTC_ANDROID' \
+ '-DANDROID'
+
+LOCAL_CFLAGS := $(MY_CFLAGS_C) $(MY_CFLAGS) $(MY_DEFS)
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES := $(LOCAL_PATH)/../../../.. \
+ $(LOCAL_PATH)/../interface \
+ $(LOCAL_PATH)/../../../interface \
+ $(LOCAL_PATH)/../../codecs/CNG/main/interface \
+ $(LOCAL_PATH)/../../codecs/G711/main/interface \
+ $(LOCAL_PATH)/../../codecs/G722/main/interface \
+ $(LOCAL_PATH)/../../codecs/iLBC/main/interface \
+ $(LOCAL_PATH)/../../codecs/iSAC/main/interface \
+ $(LOCAL_PATH)/../../codecs/iSAC/fix/interface \
+ $(LOCAL_PATH)/../../codecs/PCM16B/main/interface \
+ $(LOCAL_PATH)/../../NetEQ/main/interface \
+ $(LOCAL_PATH)/../../../../common_audio/resampler/main/interface \
+ $(LOCAL_PATH)/../../../../common_audio/signal_processing_library/main/interface \
+ $(LOCAL_PATH)/../../../../common_audio/vad/main/interface \
+ $(LOCAL_PATH)/../../../../system_wrappers/interface
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS :=
+
+LOCAL_LDFLAGS :=
+
+LOCAL_STATIC_LIBRARIES :=
+
+LOCAL_SHARED_LIBRARIES := libcutils \
+ libdl \
+ libstlport
+LOCAL_ADDITIONAL_DEPENDENCIES :=
+
+ifneq ($(MY_WEBRTC_NDK_BUILD),true)
+include external/stlport/libstlport.mk
+include $(BUILD_STATIC_LIBRARY)
+endif
\ No newline at end of file
diff --git a/src/modules/audio_coding/main/source/acm_amr.cc b/src/modules/audio_coding/main/source/acm_amr.cc
new file mode 100644
index 0000000..895dee6
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_amr.cc
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_amr.h"
+#include "acm_common_defs.h"
+#include "acm_neteq.h"
+#include "audio_coding_module_typedefs.h"
+#include "rw_lock_wrapper.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+#ifdef WEBRTC_CODEC_AMR
+ // NOTE! GSM AMR is not included in the open-source package. Modify this file or your codec
+ // API to match the function call and name of used AMR API file.
+ // #include "amr_interface.h"
+#endif
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_AMR
+ACMAMR::ACMAMR(WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+
+ACMAMR::~ACMAMR()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMAMR::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMAMR::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMAMR::EnableDTX()
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMAMR::DisableDTX()
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMAMR::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMAMR::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMAMR::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+ACMGenericCodec*
+ACMAMR::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMAMR::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+void
+ACMAMR::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMAMR::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMAMR::DestructDecoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMAMR::SetBitRateSafe(
+ const WebRtc_Word32 /* rate */)
+{
+ return -1;
+}
+
+
+void
+ACMAMR::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMAMR::SetAMREncoderPackingFormat(
+ ACMAMRPackingFormat /* packingFormat */)
+{
+ return -1;
+}
+
+
+ACMAMRPackingFormat
+ACMAMR::AMREncoderPackingFormat() const
+{
+ return AMRUndefined;
+}
+
+WebRtc_Word16
+ACMAMR::SetAMRDecoderPackingFormat(
+ ACMAMRPackingFormat /* packingFormat */)
+{
+ return -1;
+}
+
+ACMAMRPackingFormat
+ACMAMR::AMRDecoderPackingFormat() const
+{
+ return AMRUndefined;
+}
+
+WebRtc_Word16
+ACMAMR::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+
+#else //===================== Actual Implementation =======================
+
+
+#define WEBRTC_AMR_MR475 0
+#define WEBRTC_AMR_MR515 1
+#define WEBRTC_AMR_MR59 2
+#define WEBRTC_AMR_MR67 3
+#define WEBRTC_AMR_MR74 4
+#define WEBRTC_AMR_MR795 5
+#define WEBRTC_AMR_MR102 6
+#define WEBRTC_AMR_MR122 7
+
+// Remove when integrating a real GSM AMR wrapper
+extern WebRtc_Word16 WebRtcAmr_CreateEnc(AMR_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcAmr_CreateDec(AMR_decinst_t_** decInst);
+extern WebRtc_Word16 WebRtcAmr_FreeEnc(AMR_encinst_t_* encInst);
+extern WebRtc_Word16 WebRtcAmr_FreeDec(AMR_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcAmr_Encode(AMR_encinst_t_* encInst,
+ WebRtc_Word16* input,
+ WebRtc_Word16 len,
+ WebRtc_Word16*output,
+ WebRtc_Word16 mode);
+extern WebRtc_Word16 WebRtcAmr_EncoderInit(AMR_encinst_t_* encInst,
+ WebRtc_Word16 dtxMode);
+extern WebRtc_Word16 WebRtcAmr_EncodeBitmode(AMR_encinst_t_* encInst,
+ ACMAMRPackingFormat format);
+extern WebRtc_Word16 WebRtcAmr_Decode(AMR_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcAmr_DecodePlc(AMR_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcAmr_DecoderInit(AMR_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcAmr_DecodeBitmode(AMR_decinst_t_* decInst,
+ ACMAMRPackingFormat format);
+
+ACMAMR::ACMAMR(WebRtc_Word16 codecID):
+_encoderInstPtr(NULL),
+_decoderInstPtr(NULL),
+_encodingMode(-1), // invalid value
+_encodingRate(0) // invalid value
+{
+ _codecID = codecID;
+ _hasInternalDTX = true;
+ _encoderPackingFormat = AMRBandwidthEfficient;
+ _decoderPackingFormat = AMRBandwidthEfficient;
+ return;
+}
+
+
+ACMAMR::~ACMAMR()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcAmr_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcAmr_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMAMR::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ WebRtc_Word16 vadDecision = 1;
+ // sanity check, if the rate is set correctly. we might skip this
+ // sanity check. if rate is not set correctly, initialization flag
+ // should be false and should not be here.
+ if((_encodingMode < WEBRTC_AMR_MR475) || (_encodingMode > WEBRTC_AMR_MR122))
+ {
+ *bitStreamLenByte = 0;
+ return -1;
+ }
+ *bitStreamLenByte = WebRtcAmr_Encode(_encoderInstPtr,
+ &_inAudio[_inAudioIxRead], _frameLenSmpl, (WebRtc_Word16*)bitStream,
+ _encodingMode);
+
+ // Update VAD, if internal DTX is used
+ if(_hasInternalDTX && _dtxEnabled)
+ {
+ if(*bitStreamLenByte <= (7*_frameLenSmpl/160))
+ {
+ vadDecision = 0;
+ }
+ for(WebRtc_Word16 n = 0; n < MAX_FRAME_SIZE_10MSEC; n++)
+ {
+ _vadLabel[n] = vadDecision;
+ }
+ }
+ // increment the read index
+ _inAudioIxRead += _frameLenSmpl;
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMAMR::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMAMR::EnableDTX()
+{
+ if(_dtxEnabled)
+ {
+ return 0;
+ }
+ else if(_encoderExist) // check if encoder exist
+ {
+ // enable DTX
+ if(WebRtcAmr_EncoderInit(_encoderInstPtr, 1) < 0)
+ {
+ return -1;
+ }
+ _dtxEnabled = true;
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+}
+
+
+WebRtc_Word16
+ACMAMR::DisableDTX()
+{
+ if(!_dtxEnabled)
+ {
+ return 0;
+ }
+ else if(_encoderExist) // check if encoder exist
+ {
+ // disable DTX
+ if(WebRtcAmr_EncoderInit(_encoderInstPtr, 0) < 0)
+ {
+ return -1;
+ }
+ _dtxEnabled = false;
+ return 0;
+ }
+ else
+ {
+ // encoder doesn't exists, therefore disabling is harmless
+ return 0;
+ }
+}
+
+
+WebRtc_Word16
+ACMAMR::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ WebRtc_Word16 status = SetBitRateSafe((codecParams->codecInstant).rate);
+ status += (WebRtcAmr_EncoderInit(_encoderInstPtr,
+ ((codecParams->enableDTX)? 1:0)) < 0)? -1:0;
+ status += (WebRtcAmr_EncodeBitmode(_encoderInstPtr,
+ _encoderPackingFormat ) < 0)? -1:0;
+ return (status < 0)? -1:0;
+}
+
+
+WebRtc_Word16
+ACMAMR::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ WebRtc_Word16 status = (
+ (WebRtcAmr_DecoderInit(_decoderInstPtr) < 0)? -1:0);
+ status +=
+ WebRtcAmr_DecodeBitmode(_decoderInstPtr, _decoderPackingFormat);
+ return (status < 0)? -1:0;
+}
+
+
+WebRtc_Word32
+ACMAMR::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ // Todo:
+ // log error
+ return -1;
+ }
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_AMR_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderAMR, codecInst.pltype,
+ _decoderInstPtr, 8000);
+ SET_AMR_FUNCTIONS((codecDef));
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMAMR::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMAMR::InternalCreateEncoder()
+{
+ return WebRtcAmr_CreateEnc(&_encoderInstPtr);
+}
+
+
+void
+ACMAMR::DestructEncoderSafe()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcAmr_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ // there is no encoder set the following
+ _encoderExist = false;
+ _encoderInitialized = false;
+ _encodingMode = -1; // invalid value
+ _encodingRate = 0; // invalid value
+}
+
+
+WebRtc_Word16
+ACMAMR::InternalCreateDecoder()
+{
+ return WebRtcAmr_CreateDec(&_decoderInstPtr);
+}
+
+
+void
+ACMAMR::DestructDecoderSafe()
+{
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcAmr_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ // there is no encoder instance set the followings
+ _decoderExist = false;
+ _decoderInitialized = false;
+}
+
+
+WebRtc_Word16
+ACMAMR::SetBitRateSafe(const WebRtc_Word32 rate)
+{
+ switch(rate)
+ {
+ case 4750:
+ _encodingMode = WEBRTC_AMR_MR475;
+ _encodingRate = 4750;
+ break;
+ case 5150:
+ _encodingMode = WEBRTC_AMR_MR515;
+ _encodingRate = 5150;
+ break;
+ case 5900:
+ _encodingMode = WEBRTC_AMR_MR59;
+ _encodingRate = 5900;
+ break;
+ case 6700:
+ _encodingMode = WEBRTC_AMR_MR67;
+ _encodingRate = 6700;
+ break;
+ case 7400:
+ _encodingMode = WEBRTC_AMR_MR74;
+ _encodingRate = 7400;
+ break;
+ case 7950:
+ _encodingMode = WEBRTC_AMR_MR795;
+ _encodingRate = 7950;
+ break;
+ case 10200:
+ _encodingMode = WEBRTC_AMR_MR102;
+ _encodingRate = 10200;
+ break;
+ case 12200:
+ _encodingMode = WEBRTC_AMR_MR122;
+ _encodingRate = 12200;
+ break;
+ default:
+ return -1;
+ break;
+ }
+ return 0;
+}
+
+
+void
+ACMAMR::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ // Free the memory where ptrInst is pointing to
+ if(ptrInst != NULL)
+ {
+ WebRtcAmr_FreeEnc((AMR_encinst_t_*)ptrInst);
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMAMR::SetAMREncoderPackingFormat(
+ ACMAMRPackingFormat packingFormat)
+{
+ if((packingFormat != AMRBandwidthEfficient) &&
+ (packingFormat != AMROctetAlligned) &&
+ (packingFormat != AMRFileStorage))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Invalid AMR Encoder packing-format.");
+ return -1;
+ }
+ else
+ {
+ if(WebRtcAmr_EncodeBitmode(_encoderInstPtr,
+ packingFormat) < 0)
+ {
+ return -1;
+ }
+ else
+ {
+ _encoderPackingFormat = packingFormat;
+ return 0;
+ }
+
+ }
+}
+
+
+ACMAMRPackingFormat
+ACMAMR::AMREncoderPackingFormat() const
+{
+ return _encoderPackingFormat;
+}
+
+WebRtc_Word16
+ACMAMR::SetAMRDecoderPackingFormat(
+ ACMAMRPackingFormat packingFormat)
+{
+ if((packingFormat != AMRBandwidthEfficient) &&
+ (packingFormat != AMROctetAlligned) &&
+ (packingFormat != AMRFileStorage))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Invalid AMR decoder packing-format.");
+ return -1;
+ }
+ else
+ {
+ if(WebRtcAmr_DecodeBitmode(_decoderInstPtr,
+ packingFormat) < 0)
+ {
+ return -1;
+ }
+ else
+ {
+ _decoderPackingFormat = packingFormat;
+ return 0;
+ }
+
+ }
+}
+
+ACMAMRPackingFormat
+ACMAMR::AMRDecoderPackingFormat() const
+{
+ return _decoderPackingFormat;
+}
+
+WebRtc_Word16
+ACMAMR::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ return netEq->RemoveCodec(kDecoderAMR);
+}
+
+#endif
+
+}
+
diff --git a/src/modules/audio_coding/main/source/acm_amr.h b/src/modules/audio_coding/main/source/acm_amr.h
new file mode 100644
index 0000000..d206827
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_amr.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_AMR_H
+#define ACM_AMR_H
+
+#include "acm_generic_codec.h"
+
+// forward declaration
+struct AMR_encinst_t_;
+struct AMR_decinst_t_;
+
+namespace webrtc
+{
+
+enum ACMAMRPackingFormat;
+
+class ACMAMR : public ACMGenericCodec
+{
+public:
+ ACMAMR(WebRtc_Word16 codecID);
+ ~ACMAMR();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 SetAMREncoderPackingFormat(
+ const ACMAMRPackingFormat packingFormat);
+
+ ACMAMRPackingFormat AMREncoderPackingFormat() const;
+
+ WebRtc_Word16 SetAMRDecoderPackingFormat(
+ const ACMAMRPackingFormat packingFormat);
+
+ ACMAMRPackingFormat AMRDecoderPackingFormat() const;
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 SetBitRateSafe(
+ const WebRtc_Word32 rate);
+
+ WebRtc_Word16 EnableDTX();
+
+ WebRtc_Word16 DisableDTX();
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ AMR_encinst_t_* _encoderInstPtr;
+ AMR_decinst_t_* _decoderInstPtr;
+ WebRtc_Word16 _encodingMode;
+ WebRtc_Word16 _encodingRate;
+ ACMAMRPackingFormat _encoderPackingFormat;
+ ACMAMRPackingFormat _decoderPackingFormat;
+};
+
+} // namespace webrtc
+
+#endif // ACM_AMR_H
diff --git a/src/modules/audio_coding/main/source/acm_amrwb.cc b/src/modules/audio_coding/main/source/acm_amrwb.cc
new file mode 100644
index 0000000..4958abc
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_amrwb.cc
@@ -0,0 +1,590 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_amrwb.h"
+#include "acm_common_defs.h"
+#include "acm_neteq.h"
+#include "audio_coding_module_typedefs.h"
+#include "rw_lock_wrapper.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+#ifdef WEBRTC_CODEC_AMRWB
+ // NOTE! GSM AMR-wb is not included in the open-source package. Modify this file or your
+ // codec API to match the function call and name of used AMR-wb API file.
+ // #include "amrwb_interface.h"
+#endif
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_AMRWB
+ACMAMRwb::ACMAMRwb(WebRtc_Word16 /* codecID*/)
+{
+ return;
+}
+
+ACMAMRwb::~ACMAMRwb()
+{
+ return;
+}
+
+WebRtc_Word16
+ACMAMRwb::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMAMRwb::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMAMRwb::EnableDTX()
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMAMRwb::DisableDTX()
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMAMRwb::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMAMRwb::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMAMRwb::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+ACMGenericCodec*
+ACMAMRwb::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+
+WebRtc_Word16
+ACMAMRwb::InternalCreateEncoder()
+{
+ return -1;
+}
+
+void
+ACMAMRwb::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMAMRwb::InternalCreateDecoder()
+{
+ return -1;
+}
+
+void
+ACMAMRwb::DestructDecoderSafe()
+{
+ return;
+}
+
+WebRtc_Word16
+ACMAMRwb::SetBitRateSafe(
+ const WebRtc_Word32 /* rate */)
+{
+ return -1;
+}
+
+void
+ACMAMRwb::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+WebRtc_Word16
+ACMAMRwb::SetAMRwbEncoderPackingFormat(
+ ACMAMRPackingFormat /* packingFormat */)
+{
+ return -1;
+}
+
+ACMAMRPackingFormat
+ACMAMRwb::AMRwbEncoderPackingFormat() const
+{
+ return AMRUndefined;
+}
+
+WebRtc_Word16 ACMAMRwb::SetAMRwbDecoderPackingFormat(
+ ACMAMRPackingFormat /* packingFormat */)
+{
+ return -1;
+}
+
+ACMAMRPackingFormat
+ACMAMRwb::AMRwbDecoderPackingFormat() const
+{
+ return AMRUndefined;
+}
+
+WebRtc_Word16
+ACMAMRwb::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+#else //===================== Actual Implementation =======================
+
+#define AMRWB_MODE_7k 0
+#define AMRWB_MODE_9k 1
+#define AMRWB_MODE_12k 2
+#define AMRWB_MODE_14k 3
+#define AMRWB_MODE_16k 4
+#define AMRWB_MODE_18k 5
+#define AMRWB_MODE_20k 6
+#define AMRWB_MODE_23k 7
+#define AMRWB_MODE_24k 8
+
+// Remove when integrating a real GSM AMR wrapper
+extern WebRtc_Word16 WebRtcAmrWb_CreateEnc(AMRWB_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcAmrWb_CreateDec(AMRWB_decinst_t_** decInst);
+extern WebRtc_Word16 WebRtcAmrWb_FreeEnc(AMRWB_encinst_t_* encInst);
+extern WebRtc_Word16 WebRtcAmrWb_FreeDec(AMRWB_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcAmrWb_Encode(AMRWB_encinst_t_* encInst,
+ WebRtc_Word16* input,
+ WebRtc_Word16 len,
+ WebRtc_Word16*output,
+ WebRtc_Word16 mode);
+extern WebRtc_Word16 WebRtcAmrWb_EncoderInit(AMRWB_encinst_t_* encInst,
+ WebRtc_Word16 dtxMode);
+extern WebRtc_Word16 WebRtcAmrWb_EncodeBitmode(AMRWB_encinst_t_* encInst,
+ ACMAMRPackingFormat format);
+extern WebRtc_Word16 WebRtcAmrWb_Decode(AMRWB_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcAmrWb_DecodePlc(AMRWB_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcAmrWb_DecoderInit(AMRWB_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcAmrWb_DecodeBitmode(AMRWB_decinst_t_* decInst,
+ ACMAMRPackingFormat format);
+
+
+ACMAMRwb::ACMAMRwb(WebRtc_Word16 codecID):
+_encoderInstPtr(NULL),
+_decoderInstPtr(NULL),
+_encodingMode(-1), // invalid value
+_encodingRate(0) // invalid value
+{
+ _codecID = codecID;
+ _hasInternalDTX = true;
+ _encoderPackingFormat = AMRBandwidthEfficient;
+ _decoderPackingFormat = AMRBandwidthEfficient;
+ return;
+}
+
+ACMAMRwb::~ACMAMRwb()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcAmrWb_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcAmrWb_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
+}
+
+WebRtc_Word16
+ACMAMRwb::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ WebRtc_Word16 vadDecision = 1;
+ // sanity check, if the rate is set correctly. we might skip this
+ // sanity check. if rate is not set correctly, initialization flag
+ // should be false and should not be here.
+ if((_encodingMode < AMRWB_MODE_7k) || (_encodingMode > AMRWB_MODE_24k))
+ {
+ *bitStreamLenByte = 0;
+ return -1;
+ }
+ *bitStreamLenByte =
+ WebRtcAmrWb_Encode(_encoderInstPtr, &_inAudio[_inAudioIxRead],
+ _frameLenSmpl, (WebRtc_Word16*)bitStream, _encodingMode);
+
+ // Update VAD, if internal DTX is used
+ if(_hasInternalDTX && _dtxEnabled)
+ {
+ if (*bitStreamLenByte <= (7*_frameLenSmpl/160))
+ {
+ vadDecision = 0;
+ }
+ for(WebRtc_Word16 n = 0; n < MAX_FRAME_SIZE_10MSEC; n++)
+ {
+ _vadLabel[n] = vadDecision;
+ }
+ }
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _frameLenSmpl;
+ return *bitStreamLenByte;
+}
+
+WebRtc_Word16
+ACMAMRwb::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+WebRtc_Word16
+ACMAMRwb::EnableDTX()
+{
+ if(_dtxEnabled)
+ {
+ return 0;
+ }
+ else if(_encoderExist) // check if encoder exist
+ {
+ // enable DTX
+ if(WebRtcAmrWb_EncoderInit(_encoderInstPtr, 1) < 0)
+ {
+ return -1;
+ }
+ _dtxEnabled = true;
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+}
+
+WebRtc_Word16
+ACMAMRwb::DisableDTX()
+{
+ if(!_dtxEnabled)
+ {
+ return 0;
+ }
+ else if(_encoderExist) // check if encoder exist
+ {
+ // disable DTX
+ if(WebRtcAmrWb_EncoderInit(_encoderInstPtr, 0) < 0)
+ {
+ return -1;
+ }
+ _dtxEnabled = false;
+ return 0;
+ }
+ else
+ {
+ // encoder doesn't exists, therefore disabling is harmless
+ return 0;
+ }
+}
+
+WebRtc_Word16
+ACMAMRwb::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ // sanity check
+ if (_encoderInstPtr == NULL)
+ {
+ return -1;
+ }
+
+ WebRtc_Word16 status = SetBitRateSafe((codecParams->codecInstant).rate);
+ status += (WebRtcAmrWb_EncoderInit(
+ _encoderInstPtr, ((codecParams->enableDTX)? 1:0)) < 0)? -1:0;
+ status += (WebRtcAmrWb_EncodeBitmode(
+ _encoderInstPtr, _encoderPackingFormat ) < 0)? -1:0;
+ return (status < 0)? -1:0;
+}
+
+WebRtc_Word16
+ACMAMRwb::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ WebRtc_Word16 status = WebRtcAmrWb_DecodeBitmode(
+ _decoderInstPtr, _decoderPackingFormat);
+ status += ((WebRtcAmrWb_DecoderInit(_decoderInstPtr) < 0)? -1:0);
+ return (status < 0)? -1:0;
+}
+
+WebRtc_Word32
+ACMAMRwb::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ // Todo:
+ // log error
+ return -1;
+ }
+
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_AMRWB_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderAMRWB, codecInst.pltype,
+ _decoderInstPtr, 16000);
+ SET_AMRWB_FUNCTIONS((codecDef));
+ return 0;
+}
+
+ACMGenericCodec*
+ACMAMRwb::CreateInstance(void)
+{
+ return NULL;
+}
+
+WebRtc_Word16
+ACMAMRwb::InternalCreateEncoder()
+{
+ return WebRtcAmrWb_CreateEnc(&_encoderInstPtr);
+}
+
+void
+ACMAMRwb::DestructEncoderSafe()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcAmrWb_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ // there is no encoder set the following
+ _encoderExist = false;
+ _encoderInitialized = false;
+ _encodingMode = -1; // invalid value
+ _encodingRate = 0;
+}
+
+
+WebRtc_Word16
+ACMAMRwb::InternalCreateDecoder()
+{
+ return WebRtcAmrWb_CreateDec(&_decoderInstPtr);
+}
+
+void
+ACMAMRwb::DestructDecoderSafe()
+{
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcAmrWb_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ // there is no encoder instance set the followings
+ _decoderExist = false;
+ _decoderInitialized = false;
+}
+
+WebRtc_Word16
+ACMAMRwb::SetBitRateSafe(
+ const WebRtc_Word32 rate)
+{
+ switch(rate)
+ {
+ case 7000:
+ {
+ _encodingMode = AMRWB_MODE_7k;
+ _encodingRate = 7000;
+ break;
+ }
+ case 9000:
+ {
+ _encodingMode = AMRWB_MODE_9k;
+ _encodingRate = 9000;
+ break;
+ }
+ case 12000:
+ {
+ _encodingMode = AMRWB_MODE_12k;
+ _encodingRate = 12000;
+ break;
+ }
+ case 14000:
+ {
+ _encodingMode = AMRWB_MODE_14k;
+ _encodingRate = 14000;
+ break;
+ }
+ case 16000:
+ {
+ _encodingMode = AMRWB_MODE_16k;
+ _encodingRate = 16000;
+ break;
+ }
+ case 18000:
+ {
+ _encodingMode = AMRWB_MODE_18k;
+ _encodingRate = 18000;
+ break;
+ }
+ case 20000:
+ {
+ _encodingMode = AMRWB_MODE_20k;
+ _encodingRate = 20000;
+ break;
+ }
+ case 23000:
+ {
+ _encodingMode = AMRWB_MODE_23k;
+ _encodingRate = 23000;
+ break;
+ }
+ case 24000:
+ {
+ _encodingMode = AMRWB_MODE_24k;
+ _encodingRate = 24000;
+ break;
+ }
+ default:
+ {
+ return -1;
+ break;
+ }
+ }
+ return 0;
+}
+
+void
+ACMAMRwb::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ WebRtcAmrWb_FreeEnc((AMRWB_encinst_t_*)ptrInst);
+ }
+ return;
+}
+
+WebRtc_Word16
+ACMAMRwb::SetAMRwbEncoderPackingFormat(
+ ACMAMRPackingFormat packingFormat)
+{
+ if((packingFormat != AMRBandwidthEfficient) &&
+ (packingFormat != AMROctetAlligned) &&
+ (packingFormat != AMRFileStorage))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Invalid AMRwb encoder packing-format.");
+ return -1;
+ }
+ else
+ {
+
+ if(WebRtcAmrWb_EncodeBitmode(_encoderInstPtr,
+ packingFormat) < 0)
+ {
+ return -1;
+ }
+ else
+ {
+ _encoderPackingFormat = packingFormat;
+ return 0;
+ }
+ }
+}
+
+ACMAMRPackingFormat
+ACMAMRwb::AMRwbEncoderPackingFormat() const
+{
+ return _encoderPackingFormat;
+}
+
+WebRtc_Word16 ACMAMRwb::SetAMRwbDecoderPackingFormat(
+ ACMAMRPackingFormat packingFormat)
+{
+ if((packingFormat != AMRBandwidthEfficient) &&
+ (packingFormat != AMROctetAlligned) &&
+ (packingFormat != AMRFileStorage))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Invalid AMRwb decoder packing-format.");
+ return -1;
+ }
+ else
+ {
+ if(WebRtcAmrWb_DecodeBitmode(_decoderInstPtr,
+ packingFormat) < 0)
+ {
+ return -1;
+ }
+ else
+ {
+ _decoderPackingFormat = packingFormat;
+ return 0;
+ }
+ }
+}
+
+ACMAMRPackingFormat
+ACMAMRwb::AMRwbDecoderPackingFormat() const
+{
+ return _decoderPackingFormat;
+}
+
+WebRtc_Word16
+ACMAMRwb::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ return netEq->RemoveCodec(kDecoderAMRWB);
+}
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_amrwb.h b/src/modules/audio_coding/main/source/acm_amrwb.h
new file mode 100644
index 0000000..2aa1491
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_amrwb.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_AMRWB_H
+#define ACM_AMRWB_H
+
+#include "acm_generic_codec.h"
+
+// forward declaration
+struct AMRWB_encinst_t_;
+struct AMRWB_decinst_t_;
+
+namespace webrtc
+{
+
+enum ACMAMRPackingFormat;
+
+class ACMAMRwb : public ACMGenericCodec
+{
+public:
+ ACMAMRwb(WebRtc_Word16 codecID);
+ ~ACMAMRwb();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 SetAMRwbEncoderPackingFormat(
+ const ACMAMRPackingFormat packingFormat);
+
+ ACMAMRPackingFormat AMRwbEncoderPackingFormat() const;
+
+ WebRtc_Word16 SetAMRwbDecoderPackingFormat(
+ const ACMAMRPackingFormat packingFormat);
+
+ ACMAMRPackingFormat AMRwbDecoderPackingFormat() const;
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 SetBitRateSafe(
+ const WebRtc_Word32 rate);
+
+ WebRtc_Word16 EnableDTX();
+
+ WebRtc_Word16 DisableDTX();
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ AMRWB_encinst_t_* _encoderInstPtr;
+ AMRWB_decinst_t_* _decoderInstPtr;
+
+ WebRtc_Word16 _encodingMode;
+ WebRtc_Word16 _encodingRate;
+ ACMAMRPackingFormat _encoderPackingFormat;
+ ACMAMRPackingFormat _decoderPackingFormat;
+};
+
+} // namespace webrtc
+
+#endif // ACM_AMRWB_H
diff --git a/src/modules/audio_coding/main/source/acm_cng.cc b/src/modules/audio_coding/main/source/acm_cng.cc
new file mode 100644
index 0000000..20bbdf4
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_cng.cc
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_cng.h"
+#include "acm_codec_database.h"
+#include "acm_common_defs.h"
+#include "acm_neteq.h"
+#include "trace.h"
+#include "webrtc_cng.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+namespace webrtc
+{
+
+ACMCNG::ACMCNG(WebRtc_Word16 codecID)
+{
+ _encoderInstPtr = NULL;
+ _decoderInstPtr = NULL;
+ _codecID = codecID;
+ if(_codecID == ACMCodecDB::cnNB)
+ {
+ _sampFreqHz = 8000;
+ }
+ else if(_codecID == ACMCodecDB::cnWB)
+ {
+ _sampFreqHz = 16000;
+ }
+ else if(_codecID == ACMCodecDB::cnSWB)
+ {
+ _sampFreqHz = 32000;
+ }
+ else
+ {
+ _sampFreqHz = -1;
+ }
+ return;
+}
+
+
+ACMCNG::~ACMCNG()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcCng_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcCng_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
+}
+
+
+// CNG is not like a regular encoder, this function
+// should not be called normally
+// instead the following function is called from inside
+// ACMGenericCodec::ProcessFrameVADDTX
+WebRtc_Word16
+ACMCNG::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMCNG::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+// CNG is not like a regular encoder,
+// this function should not be called normally
+// instead the following function is called from inside
+// ACMGenericCodec::ProcessFrameVADDTX
+WebRtc_Word16
+ACMCNG::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMCNG::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return WebRtcCng_InitDec(_decoderInstPtr);
+}
+
+
+WebRtc_Word32
+ACMCNG::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ // TODO (tlegrand): log error
+ return -1;
+ }
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_CNG_FUNCTION."
+ // Then return the structure back to NetEQ to add the codec to it's
+ // database.
+
+ if (_sampFreqHz == 8000 || _sampFreqHz == 16000 || _sampFreqHz == 32000)
+ {
+ SET_CODEC_PAR((codecDef), kDecoderCNG, codecInst.pltype,
+ _decoderInstPtr, _sampFreqHz);
+ SET_CNG_FUNCTIONS((codecDef));
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+}
+
+
+ACMGenericCodec* ACMCNG::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMCNG::InternalCreateEncoder()
+{
+ if(WebRtcCng_CreateEnc(&_encoderInstPtr) < 0)
+ {
+ _encoderInstPtr = NULL;
+ return -1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+
+void
+ACMCNG::DestructEncoderSafe()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcCng_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ _encoderExist = false;
+ _encoderInitialized = false;
+}
+
+
+WebRtc_Word16
+ACMCNG::InternalCreateDecoder()
+{
+ if(WebRtcCng_CreateDec(&_decoderInstPtr) < 0)
+ {
+ _decoderInstPtr = NULL;
+ return -1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+
+void
+ACMCNG::DestructDecoderSafe()
+{
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcCng_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ _decoderExist = false;
+ _decoderInitialized = false;
+}
+
+
+void
+ACMCNG::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ WebRtcCng_FreeEnc((CNG_enc_inst*)ptrInst);
+ }
+ return;
+}
+
+WebRtc_Word16
+ACMCNG::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ return netEq->RemoveCodec(kDecoderCNG);
+}
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_cng.h b/src/modules/audio_coding/main/source/acm_cng.h
new file mode 100644
index 0000000..2ceb59f
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_cng.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_CNG_H
+#define ACM_CNG_H
+
+#include "acm_generic_codec.h"
+
+
+// forward declaration
+struct WebRtcCngEncInst;
+struct WebRtcCngDecInst;
+
+namespace webrtc
+{
+
+class ACMCNG : public ACMGenericCodec
+{
+public:
+ ACMCNG(WebRtc_Word16 codecID);
+ ~ACMCNG();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 EnableDTX()
+ {
+ return -1;
+ }
+
+ WebRtc_Word16 DisableDTX()
+ {
+ return -1;
+ }
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ WebRtcCngEncInst* _encoderInstPtr;
+ WebRtcCngDecInst* _decoderInstPtr;
+ WebRtc_Word16 _sampFreqHz;
+};
+
+} // namespace webrtc
+
+#endif // ACM_CNG_H
+
diff --git a/src/modules/audio_coding/main/source/acm_codec_database.cc b/src/modules/audio_coding/main/source/acm_codec_database.cc
new file mode 100644
index 0000000..f38331b
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_codec_database.cc
@@ -0,0 +1,953 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// 'conversion' conversion from 'type1' to 'type2', possible loss of data
+#pragma warning(disable: 4267)
+
+#include <stdio.h>
+
+#include "acm_codec_database.h"
+#include "acm_common_defs.h"
+#include "trace.h"
+
+// Includes needed to get version info
+// and to create the codecs
+#include "acm_pcma.h"
+#include "acm_pcmu.h"
+#include "g711_interface.h"
+#include "webrtc_neteq.h"
+#include "webrtc_cng.h"
+#include "acm_cng.h"
+#ifdef WEBRTC_CODEC_AVT
+ #include "acm_dtmf_playout.h"
+#endif
+#ifdef WEBRTC_CODEC_RED
+ #include "acm_red.h"
+#endif
+#ifdef WEBRTC_CODEC_ILBC
+ #include "acm_ilbc.h"
+ #include "ilbc.h"
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+ #include "acm_isac.h"
+ #include "acm_isac_macros.h"
+ #include "isac.h"
+#endif
+#ifdef WEBRTC_CODEC_ISACFX
+ #include "acm_isac.h"
+ #include "acm_isac_macros.h"
+ #include "isacfix.h"
+#endif
+#ifdef WEBRTC_CODEC_PCM16
+ #include "pcm16b.h"
+ #include "acm_pcm16b.h"
+#endif
+#ifdef WEBRTC_CODEC_G722
+ #include "acm_g722.h"
+ #include "g722_interface.h"
+#endif
+
+namespace webrtc
+{
+
+#define TEMPORARY_BUFFER_SIZE 500
+
+bool ACMCodecDB::_isInitiated = false;
+WebRtc_Word16 ACMCodecDB::_noOfCodecs = 0;
+WebRtc_Word16 ACMCodecDB::_noNetEqDecoders = 0;
+WebRtc_Word16 ACMCodecDB::_noPayloads = 0;
+
+WebRtc_Word16 ACMCodecDB::isac = -1;
+WebRtc_Word16 ACMCodecDB::isacswb = -1;
+WebRtc_Word16 ACMCodecDB::pcm16b = -1;
+WebRtc_Word16 ACMCodecDB::pcm16bwb = -1;
+WebRtc_Word16 ACMCodecDB::pcm16bswb32 = -1;
+WebRtc_Word16 ACMCodecDB::pcm16bswb48 = -1;
+WebRtc_Word16 ACMCodecDB::pcmu = -1;
+WebRtc_Word16 ACMCodecDB::pcma = -1;
+WebRtc_Word16 ACMCodecDB::ilbc = -1;
+WebRtc_Word16 ACMCodecDB::gsmAMR = -1;
+WebRtc_Word16 ACMCodecDB::gsmAMRWB = -1;
+WebRtc_Word16 ACMCodecDB::g722 = -1;
+WebRtc_Word16 ACMCodecDB::g722_1_32 = -1;
+WebRtc_Word16 ACMCodecDB::g722_1_24 = -1;
+WebRtc_Word16 ACMCodecDB::g722_1_16 = -1;
+WebRtc_Word16 ACMCodecDB::g722_1C_48 = -1;
+WebRtc_Word16 ACMCodecDB::g722_1C_32 = -1;
+WebRtc_Word16 ACMCodecDB::g722_1C_24 = -1;
+WebRtc_Word16 ACMCodecDB::g729 = -1;
+WebRtc_Word16 ACMCodecDB::gsmfr = -1;
+WebRtc_Word16 ACMCodecDB::speex8 = -1;
+WebRtc_Word16 ACMCodecDB::speex16 = -1;
+WebRtc_Word16 ACMCodecDB::cnNB = -1;
+WebRtc_Word16 ACMCodecDB::cnWB = -1;
+WebRtc_Word16 ACMCodecDB::cnSWB = -1;
+WebRtc_Word16 ACMCodecDB::avt = -1;
+WebRtc_Word16 ACMCodecDB::red = -1;
+
+WebRtc_UWord8 ACMCodecDB::_nrOfAllowedPacSizes[MAX_NR_OF_CODECS];
+WebRtc_UWord16 ACMCodecDB::_allowedPacSizesSmpl[MAX_NR_OF_CODECS][MAX_NR_OF_PACSIZES];
+CodecInst ACMCodecDB::_mycodecs[MAX_NR_OF_CODECS];
+enum WebRtcNetEQDecoder ACMCodecDB::_netEqDecoders[MAX_NR_OF_CODECS];
+WebRtc_Word8 ACMCodecDB::_versions[VERSION_SIZE];
+WebRtc_UWord16 ACMCodecDB::_basicCodingBlockSmpl[MAX_NR_OF_CODECS];
+WebRtc_UWord16 ACMCodecDB::_channelSupport[MAX_NR_OF_CODECS];
+WebRtc_UWord32 ACMCodecDB::_versionStringSize = 0;
+
+// We dynamically allocate some of the dynamic payload types to the defined codecs
+// Note! There are a limited number of payload types. If more codecs are defined
+// they will receive reserved fixed payload types (values 65-95).
+static int kDynamicPayloadtypes[MAX_NR_OF_CODECS] = {
+ 105, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 95, 94, 93, 92, 91, 90, 89,
+ 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 78, 77, 76, 75,
+ 74, 73, 72, 71, 70, 69, 68, 67, 66, 65
+};
+
+WebRtc_Word16
+ACMCodecDB::Codec(
+ WebRtc_Word16 listnr,
+ CodecInst* codec_inst)
+{
+ // Error check to se that listnr is between 0 and (_noOfCodecs - 1)
+ if ((listnr < 0) || (listnr >= _noOfCodecs))
+ {
+ return -1;
+ }
+
+ memcpy(codec_inst,&_mycodecs[listnr],sizeof(CodecInst));
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMCodecDB::CodecNumber(
+ const CodecInst* codecInst,
+ WebRtc_Word16& mirrorID,
+ WebRtc_Word8* errMessage,
+ WebRtc_Word16 maxErrMsgLenByte)
+{
+ WebRtc_Word16 codecID = ACMCodecDB::CodecNumber(codecInst, mirrorID);
+ if((codecID < 0) && (errMessage != NULL))
+ {
+ WebRtc_Word8 myErrMsg[1000];
+ if (codecID == -10)
+ {
+ // Codec not supported
+ sprintf(myErrMsg,
+ "Call to ACMCodecDB::CodecNumber failed, plname=%s is \
+not a valid codec", codecInst->plname);
+ }
+ else if (codecID == -20)
+ {
+ // Sampling frequency doesn't match codec
+ sprintf(myErrMsg,
+ "Call to ACMCodecDB::CodecNumber failed, plfreq=%d is \
+not a valid frequency for the codec %s", codecInst->plfreq, codecInst->plname);
+ }
+ else if (codecID == -30)
+ {
+ // Wrong payload type for Comfort Noise
+ sprintf(myErrMsg,
+ "Call to ACMCodecDB::CodecNumber failed, payload \
+number %d is out of range for %s", codecInst->pltype, codecInst->plname);
+ }
+ else if (codecID == -40)
+ {
+ // Wrong payload type for RED
+ sprintf(myErrMsg,
+ "Call to ACMCodecDB::CodecNumber failed, payload \
+number %d is out of range for %s", codecInst->pltype, codecInst->plname);
+ }
+ else if (codecID == -50)
+ {
+ // Packet size is out of range for the codec
+ sprintf(myErrMsg,
+ "Call to ACMCodecDB::CodecNumber failed, Packet \
+size is out of range for %s", codecInst->plname);
+ }
+ else if (codecID == -60)
+ {
+ // Not a valid rate for the codec
+ sprintf(myErrMsg,
+ "Call to ACMCodecDB::CodecNumber failed, rate=%d \
+is not a valid rate for %s", codecInst->rate, codecInst->plname);
+ }
+ else
+ {
+ // Other error
+ sprintf(myErrMsg,
+ "invalid codec parameters to be registered, \
+ACMCodecDB::CodecNumber failed");
+ }
+ strncpy(errMessage, myErrMsg, maxErrMsgLenByte - 1);
+ // make sure that the massage is null-terminated.
+ errMessage[maxErrMsgLenByte - 1] = '\0';
+ }
+ return codecID;
+}
+
+
+WebRtc_Word16
+ACMCodecDB::CodecNumber(
+ const CodecInst* codec_inst,
+ WebRtc_Word16& mirrorID)
+{
+ WebRtc_Word16 codecNumber = -1;
+ WebRtc_Word16 nameMatch = 0;
+
+ // Find a matching payload name and frequency in the codec list
+ // Need to check both since some codecs have several codec entries with
+ // Different frequencies (like iSAC)
+ for(WebRtc_Word16 i = 0; i < _noOfCodecs; i++)
+ {
+ if(STR_CASE_CMP(_mycodecs[i].plname, codec_inst->plname) == 0)
+ {
+ //We have found a matching codec name in the list
+ nameMatch = 1;
+
+ // Check if frequncy match
+ if (codec_inst->plfreq == _mycodecs[i].plfreq)
+ {
+ codecNumber = i;
+ break;
+ }
+ }
+ }
+
+ if(codecNumber == -1)
+ {
+ if (!nameMatch) {
+ // Codec name doesn't match any codec in the list
+ return -10;
+ } else {
+ // Error in payload frequency, doesn't match codec
+ return -20;
+ }
+ }
+
+ // Check the validity of payload type
+ if(ValidPayloadType(codec_inst->pltype) < 0)
+ {
+ // Payload type out of range
+ return -40;
+ }
+
+ // Comfort Noise is special case, packet-size & rate is not checked
+ if(STR_CASE_CMP(_mycodecs[codecNumber].plname, "CN") == 0)
+ {
+ mirrorID = codecNumber;
+ return codecNumber;
+ }
+
+ // RED is special case, packet-size & rate is not checked
+ if(STR_CASE_CMP(_mycodecs[codecNumber].plname, "red") == 0)
+ {
+ mirrorID = codecNumber;
+ return codecNumber;
+ }
+
+ // Check the validity of packet size
+ if(_nrOfAllowedPacSizes[codecNumber] > 0)
+ {
+ // Check that the new packet size is in the valid range.
+ bool pacSizeOK = false;
+ for(WebRtc_Word32 i=0;i< _nrOfAllowedPacSizes[codecNumber];i++)
+ {
+ if(codec_inst->pacsize == _allowedPacSizesSmpl[codecNumber][i])
+ {
+ pacSizeOK = true;
+ break;
+ }
+ }
+ if(!pacSizeOK)
+ {
+ // Packet size is out of range
+ return -50;
+ }
+ }
+ if( codec_inst->pacsize < 1)
+ {
+ // Packet size is out of range
+ return -50;
+ }
+
+ mirrorID = codecNumber;
+
+ // Check the validity of rate
+ if(STR_CASE_CMP("isac", codec_inst->plname) == 0)
+ {
+ if(IsISACRateValid(codec_inst->rate))
+ {
+ // Set mirrorID to iSAC WB which is only created
+ // once to be used both for iSAC WB and SWB, because
+ // they need to share struct
+ mirrorID = ACMCodecDB::isac;
+ return codecNumber;
+ }
+ else
+ {
+ // Not a valid rate
+ return -60;
+ }
+ }
+ else if(STR_CASE_CMP("ilbc", codec_inst->plname) == 0)
+ {
+ return IsILBCRateValid(codec_inst->rate, codec_inst->pacsize) ? codecNumber : -60;
+ }
+
+ return IsRateValid(codecNumber, codec_inst->rate) ? codecNumber : -60;
+}
+
+
+WebRtc_Word16
+ACMCodecDB::ReceiverCodecNumber(
+ const CodecInst& codecInst,
+ WebRtc_Word16& mirrorID)
+{
+ WebRtc_Word16 codecNumber = -1;
+ WebRtc_Word16 nameMatch = 0;
+
+ // Find a matching payload name and frequency in the codec list
+ // Need to check both since some codecs have several codec entries with
+ // Different frequencies (like iSAC)
+ for(WebRtc_Word16 i = 0; i < _noOfCodecs; i++)
+ {
+ if(STR_CASE_CMP(_mycodecs[i].plname, codecInst.plname) == 0)
+ {
+ //We have found a matching codec name in the list
+ nameMatch = 1;
+
+ // Check if frequency match
+ if (codecInst.plfreq == _mycodecs[i].plfreq)
+ {
+ codecNumber = i;
+ mirrorID = codecNumber;
+
+ // Check if codec is iSAC, set mirrorID to iSAC WB
+ // which is only created once to be used both for
+ // iSAC WB and SWB, because they need to share struct
+ if(STR_CASE_CMP(codecInst.plname, "ISAC") == 0)
+ {
+ mirrorID = ACMCodecDB::isac;
+ }
+ break;
+ }
+ }
+ }
+
+ if(codecNumber == -1)
+ {
+ return codecNumber;
+ }
+
+ return codecNumber;
+}
+
+
+// Return number of codecs in the database
+WebRtc_Word16
+ACMCodecDB::NoOfCodecs()
+{
+ return _noOfCodecs;
+}
+
+
+// Return number of NetEQ decoders in the database.
+// Note that the number is huigher than _moOfCodecs because some payload names
+// are treated as different decoders in NetEQ, like iSAC wb and swb.
+WebRtc_Word16
+ACMCodecDB::NoNetEqDecoders()
+{
+ return _noNetEqDecoders;
+}
+
+
+// Return the codec sampling frequency for code number "listnr" in database
+WebRtc_Word32
+ACMCodecDB::CodecFreq(WebRtc_Word16 listnr)
+{
+ // Error check to se that listnr is between 0 and (_noOfCodecs - 1)
+ if ( listnr < 0 || listnr >= _noOfCodecs)
+ return -1;
+
+ return _mycodecs[listnr].plfreq;
+}
+
+// Return the codecs basic coding block size in samples
+WebRtc_Word16
+ACMCodecDB::BasicCodingBlock(
+ WebRtc_Word16 listNr)
+{
+ // Error check to se that listnr is between 0 and (_noOfCodecs - 1)
+ if ( listNr < 0 || listNr >= _noOfCodecs)
+ return -1;
+
+ return _basicCodingBlockSmpl[listNr];
+}
+
+// Return the NetEQ decoder database
+enum WebRtcNetEQDecoder*
+ACMCodecDB::NetEqDecoders()
+{
+ return _netEqDecoders;
+}
+
+// All version numbers for the codecs in the data base are listed in text.
+WebRtc_Word16
+ACMCodecDB::CodecsVersion(
+ WebRtc_Word8* version,
+ WebRtc_UWord32& remainingBufferInBytes,
+ WebRtc_UWord32& position)
+{
+ WebRtc_UWord32 len = position;
+ strncpy(&version[len], _versions, remainingBufferInBytes);
+ position = (WebRtc_UWord32)strlen(version);
+ remainingBufferInBytes -= (position - len);
+ if(remainingBufferInBytes < _versionStringSize)
+ {
+ return -1;
+ }
+ return 0;
+}
+
+// Get mirror id. The Id is used for codecs sharing struct for settings that
+// need different payload types.
+WebRtc_Word16
+ACMCodecDB::MirrorID(
+ const WebRtc_Word16 codecID)
+{
+ if(STR_CASE_CMP(_mycodecs[codecID].plname, "isac") == 0)
+ {
+ return ACMCodecDB::isac;
+ }
+ else
+ {
+ return codecID;
+ }
+}
+
+
+
+ACMGenericCodec*
+ACMCodecDB::CreateCodecInstance(
+ const CodecInst* codecInst)
+{
+ // All we have support for right now
+ if(!STR_CASE_CMP(codecInst->plname, "ISAC"))
+ {
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+ return new ACMISAC(isac);
+#endif
+ }
+ else if(!STR_CASE_CMP(codecInst->plname, "PCMU"))
+ {
+ return new ACMPCMU(pcmu);
+ }
+ else if(!STR_CASE_CMP(codecInst->plname, "PCMA"))
+ {
+ return new ACMPCMA(pcma);
+ }
+ else if(!STR_CASE_CMP(codecInst->plname, "ILBC"))
+ {
+#ifdef WEBRTC_CODEC_ILBC
+ return new ACMILBC(ilbc);
+#endif
+ }
+ else if(!STR_CASE_CMP(codecInst->plname, "G722"))
+ {
+#ifdef WEBRTC_CODEC_G722
+ return new ACMG722(g722);
+#endif
+ }
+ else if(!STR_CASE_CMP(codecInst->plname, "CN"))
+ {
+ // We need to check sampling frequency
+ // know what codec to create.
+ WebRtc_Word16 codecID;
+ switch(codecInst->plfreq)
+ {
+ case 8000:
+ {
+ codecID = ACMCodecDB::cnNB;
+ break;
+ }
+ case 16000:
+ {
+ codecID = ACMCodecDB::cnWB;
+ break;
+ }
+ case 32000:
+ {
+ codecID = ACMCodecDB::cnSWB;
+ break;
+ }
+ default:
+ return NULL;
+ }
+ return new ACMCNG(codecID);
+ }
+ else if(!STR_CASE_CMP(codecInst->plname, "L16"))
+ {
+#ifdef WEBRTC_CODEC_PCM16
+ // For this codec we need to check sampling frequency
+ // to know what codec to create.
+ WebRtc_Word16 codecID;
+ switch(codecInst->plfreq)
+ {
+ case 8000:
+ {
+ codecID = ACMCodecDB::pcm16b;
+ break;
+ }
+ case 16000:
+ {
+ codecID = ACMCodecDB::pcm16bwb;
+ break;
+ }
+ case 32000:
+ {
+ codecID = ACMCodecDB::pcm16bswb32;
+ break;
+ }
+ default:
+ return NULL;
+ }
+ return new ACMPCM16B(codecID);
+#endif
+ }
+ else if(!STR_CASE_CMP(codecInst->plname, "telephone-event"))
+ {
+#ifdef WEBRTC_CODEC_AVT
+ return new ACMDTMFPlayout(avt);
+#endif
+ }
+ else if(!STR_CASE_CMP(codecInst->plname, "red"))
+ {
+#ifdef WEBRTC_CODEC_RED
+ return new ACMRED(red);
+#endif
+ }
+ return NULL;
+}
+
+
+//Here we build the complete database "_mycodecs" of our codecs
+void
+ACMCodecDB::initACMCodecDB()
+{
+ if(_isInitiated)
+ {
+ return;
+ }
+ else
+ {
+ _isInitiated = true;
+ }
+ WebRtc_Word8 versionNrBuff[TEMPORARY_BUFFER_SIZE];
+ WebRtc_Word32 remainingSize = VERSION_SIZE;
+
+ _versions[0] = '\0';
+
+ // Init the stereo settings vector
+ for (int i=0; i<MAX_NR_OF_CODECS; i++)
+ {
+ _channelSupport[i] = 1;
+ }
+
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+ strcpy(_mycodecs[_noOfCodecs].plname,"ISAC");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = ISACWB_DEFAULT_RATE; // Default rate
+ _mycodecs[_noOfCodecs].plfreq = 16000;
+ _mycodecs[_noOfCodecs].pltype = 103;
+ _mycodecs[_noOfCodecs].pacsize = ISACWB_PAC_SIZE; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 2;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = ISACWB_PAC_SIZE; // 480 sampels equals 30 ms
+ _allowedPacSizesSmpl[_noOfCodecs][1] = ISACWB_PAC_SIZE*2; // 960 sampels equals 60 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 0;
+
+ isac=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderISAC;
+ _noOfCodecs++;
+
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ ACM_ISAC_VERSION(versionNrBuff);
+ strncat(_versions, "ISAC\t\t", remainingSize);
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ strncat(_versions, versionNrBuff, remainingSize);
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ strncat(_versions, "\n", remainingSize);
+# if (defined(WEBRTC_CODEC_ISAC))
+ strcpy(_mycodecs[_noOfCodecs].plname,"ISAC");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = ISACSWB_DEFAULT_RATE; // Default rate
+ _mycodecs[_noOfCodecs].plfreq = 32000;
+ _mycodecs[_noOfCodecs].pltype = 104;
+ _mycodecs[_noOfCodecs].pacsize = ISACSWB_PAC_SIZE; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 1;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = ISACSWB_PAC_SIZE; // 960 sampels equals 60 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 0;
+
+ isacswb = _noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderISACswb;
+ _noOfCodecs++;
+# endif
+#endif
+#ifdef WEBRTC_CODEC_PCM16
+ strcpy(_mycodecs[_noOfCodecs].plname,"L16");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = 128000;
+ _mycodecs[_noOfCodecs].plfreq = 8000;
+ _mycodecs[_noOfCodecs].pltype = kDynamicPayloadtypes[_noPayloads++];
+ _mycodecs[_noOfCodecs].pacsize = 80; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 4;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 80; // 80 sampels equals 10 ms
+ _allowedPacSizesSmpl[_noOfCodecs][1] = 160; // 160 sampels equals 20 ms
+ _allowedPacSizesSmpl[_noOfCodecs][2] = 240; // 240 sampels equals 30 ms
+ _allowedPacSizesSmpl[_noOfCodecs][3] = 320; // 320 sampels equals 40 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 0;
+ _channelSupport[_noOfCodecs] = 2;
+
+ pcm16b=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderPCM16B;
+ _noOfCodecs++;
+
+
+ strcpy(_mycodecs[_noOfCodecs].plname,"L16");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = 256000;
+ _mycodecs[_noOfCodecs].plfreq = 16000;
+ _mycodecs[_noOfCodecs].pltype = kDynamicPayloadtypes[_noPayloads++];
+ _mycodecs[_noOfCodecs].pacsize = 160; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 4;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 160; // 160 sampels equals 10 ms
+ _allowedPacSizesSmpl[_noOfCodecs][1] = 320; // 320 sampels equals 20 ms
+ _allowedPacSizesSmpl[_noOfCodecs][2] = 480; // 480 sampels equals 30 ms
+ _allowedPacSizesSmpl[_noOfCodecs][3] = 640; // 640 sampels equals 40 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 0;
+ _channelSupport[_noOfCodecs] = 2;
+
+ pcm16bwb=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderPCM16Bwb;
+ _noOfCodecs++;
+
+ strcpy(_mycodecs[_noOfCodecs].plname,"L16");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = 512000;
+ _mycodecs[_noOfCodecs].plfreq = 32000;
+ _mycodecs[_noOfCodecs].pltype = kDynamicPayloadtypes[_noPayloads++];
+ _mycodecs[_noOfCodecs].pacsize = 320; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 2;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 320; // 320 sampels equals 10 ms
+ _allowedPacSizesSmpl[_noOfCodecs][1] = 640; // 640 sampels equals 20 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 0;
+ _channelSupport[_noOfCodecs] = 2;
+
+ pcm16bswb32=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderPCM16Bswb32kHz;
+ _noOfCodecs++;
+
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ strncat(_versions, "L16\t\t1.0.0\n", remainingSize);
+#endif
+ strcpy(_mycodecs[_noOfCodecs].plname,"PCMU");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = 64000;
+ _mycodecs[_noOfCodecs].plfreq = 8000;
+ _mycodecs[_noOfCodecs].pltype = 0;
+ _mycodecs[_noOfCodecs].pacsize = 160; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 6;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 80; // 80 sampels equals 10 ms
+ _allowedPacSizesSmpl[_noOfCodecs][1] = 160; // 160 sampels equals 20 ms
+ _allowedPacSizesSmpl[_noOfCodecs][2] = 240; // 240 sampels equals 30 ms
+ _allowedPacSizesSmpl[_noOfCodecs][3] = 320; // 320 sampels equals 40 ms
+ _allowedPacSizesSmpl[_noOfCodecs][4] = 400; // 400 sampels equals 50 ms
+ _allowedPacSizesSmpl[_noOfCodecs][5] = 480; // 480 sampels equals 60 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 0; /* 0 indicates all allowed
+ packetsizes can be used as
+ basic coding block */
+ _channelSupport[_noOfCodecs] = 2;
+
+ pcmu=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderPCMu;
+ _noOfCodecs++;
+
+ strcpy(_mycodecs[_noOfCodecs].plname,"PCMA");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = 64000;
+ _mycodecs[_noOfCodecs].plfreq = 8000;
+ _mycodecs[_noOfCodecs].pltype = 8;
+ _mycodecs[_noOfCodecs].pacsize = 160; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 6;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 80; // 80 sampels equals 10 ms
+ _allowedPacSizesSmpl[_noOfCodecs][1] = 160; // 160 sampels equals 20 ms
+ _allowedPacSizesSmpl[_noOfCodecs][2] = 240; // 240 sampels equals 30 ms
+ _allowedPacSizesSmpl[_noOfCodecs][3] = 320; // 320 sampels equals 40 ms
+ _allowedPacSizesSmpl[_noOfCodecs][4] = 400; // 400 sampels equals 50 ms
+ _allowedPacSizesSmpl[_noOfCodecs][5] = 480; // 480 sampels equals 60 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 0; /* 0 indicates all allowed
+ packetsizes can be used as
+ basic coding block */
+ _channelSupport[_noOfCodecs] = 2;
+
+ pcma=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderPCMa;
+ _noOfCodecs++;
+
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ WebRtcG711_Version(versionNrBuff, TEMPORARY_BUFFER_SIZE);
+ strncat(_versions, "G.711\t\t", remainingSize);
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ strncat(_versions, versionNrBuff, remainingSize);
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ strncat(_versions, "\n", remainingSize);
+
+#ifdef WEBRTC_CODEC_ILBC
+ strcpy(_mycodecs[_noOfCodecs].plname,"iLBC");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = 13300;
+ _mycodecs[_noOfCodecs].plfreq = 8000;
+ _mycodecs[_noOfCodecs].pltype = 102;
+ _mycodecs[_noOfCodecs].pacsize = 240; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 4;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 160; // 160 sampels equals 20 ms
+ _allowedPacSizesSmpl[_noOfCodecs][1] = 240; // 240 sampels equals 30 ms
+ _allowedPacSizesSmpl[_noOfCodecs][2] = 320; // 320 sampels equals 40 ms
+ _allowedPacSizesSmpl[_noOfCodecs][3] = 480; // 480 sampels equals 60 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 0; /* 0 indicates all allowed
+ packetsizes can be used as
+ basic coding block */
+
+ ilbc=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderILBC;
+ _noOfCodecs++;
+
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ WebRtcIlbcfix_version(versionNrBuff);
+ strncat(_versions, "ILBC\t\t", remainingSize);
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ strncat(_versions, versionNrBuff, remainingSize);
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ strncat(_versions, "\n", remainingSize);
+#endif
+#ifdef WEBRTC_CODEC_G722
+ strcpy(_mycodecs[_noOfCodecs].plname,"G722");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = 64000;
+ _mycodecs[_noOfCodecs].plfreq = 16000;
+ _mycodecs[_noOfCodecs].pltype = 9;
+ _mycodecs[_noOfCodecs].pacsize = 320; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 6;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 160; // 160 sampels equals 10 ms
+ _allowedPacSizesSmpl[_noOfCodecs][1] = 320; // 320 sampels equals 20 ms
+ _allowedPacSizesSmpl[_noOfCodecs][2] = 480; // 480 sampels equals 30 ms
+ _allowedPacSizesSmpl[_noOfCodecs][3] = 640; // 640 sampels equals 40 ms
+ _allowedPacSizesSmpl[_noOfCodecs][4] = 800; // 480 sampels equals 50 ms
+ _allowedPacSizesSmpl[_noOfCodecs][5] = 960; // 640 sampels equals 60 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 0;
+ _channelSupport[_noOfCodecs] = 2;
+
+ g722=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderG722;
+ _noOfCodecs++;
+
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ WebRtcG722_Version(versionNrBuff, TEMPORARY_BUFFER_SIZE);
+ strncat(_versions, "G.722\t\t", remainingSize);
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ strncat(_versions, versionNrBuff, remainingSize);
+
+#endif
+
+ // Comfort Noise is always included in the build, no #ifdef needed
+ strcpy(_mycodecs[_noOfCodecs].plname,"CN");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = 0;
+ _mycodecs[_noOfCodecs].plfreq = 8000;
+ _mycodecs[_noOfCodecs].pltype = 13;
+ _mycodecs[_noOfCodecs].pacsize = 240; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 1;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 240; // 240 samples equals 30 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 240;
+
+ cnNB=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderCNG;
+ _noOfCodecs++;
+
+ strcpy(_mycodecs[_noOfCodecs].plname,"CN");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = 0;
+ _mycodecs[_noOfCodecs].plfreq = 16000;
+ _mycodecs[_noOfCodecs].pltype = 98;
+ _mycodecs[_noOfCodecs].pacsize = 480; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 1;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 480; // 480 samples equals 30 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 480;
+
+ cnWB=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderCNG;
+ _noOfCodecs++;
+
+ strcpy(_mycodecs[_noOfCodecs].plname,"CN");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = 0;
+ _mycodecs[_noOfCodecs].plfreq = 32000;
+ _mycodecs[_noOfCodecs].pltype = 99;
+ _mycodecs[_noOfCodecs].pacsize = 960; // Default packet size
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 1;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 960; // 960 samples equals 30 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 960;
+
+ cnSWB=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderCNG;
+ _noOfCodecs++;
+
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ WebRtcCng_Version(versionNrBuff);
+ strncat(_versions, "CNG\t\t", remainingSize);
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ strncat(_versions, versionNrBuff, remainingSize);
+
+#ifdef WEBRTC_CODEC_AVT
+ strcpy(_mycodecs[_noOfCodecs].plname,"telephone-event");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].plfreq = 8000;
+ _mycodecs[_noOfCodecs].rate = 0;
+ _mycodecs[_noOfCodecs].pltype = 106;
+ _mycodecs[_noOfCodecs].pacsize = 240; // Default packet size 240ms
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 1;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 240; // 240 samples equals 30 ms
+ _basicCodingBlockSmpl[_noOfCodecs] = 240;
+
+ avt=_noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderAVT;
+ _noOfCodecs++;
+
+ // Currently tone generation doesn't have a getVersion-function
+ remainingSize = (WebRtc_Word32)(VERSION_SIZE - strlen(_versions));
+ strncat(_versions, "Tone Generation\t1.0.0\n", remainingSize);
+#endif
+#ifdef WEBRTC_CODEC_RED
+ strcpy(_mycodecs[_noOfCodecs].plname,"red");
+ _mycodecs[_noOfCodecs].channels = 1;
+ _mycodecs[_noOfCodecs].rate = 0;
+ _mycodecs[_noOfCodecs].plfreq = 8000;
+ _mycodecs[_noOfCodecs].pltype = 127;
+ _mycodecs[_noOfCodecs].pacsize = 0;
+
+ _nrOfAllowedPacSizes[_noOfCodecs] = 1;
+ _allowedPacSizesSmpl[_noOfCodecs][0] = 0;
+ _basicCodingBlockSmpl[_noOfCodecs] = 0;
+
+ red = _noOfCodecs;
+ _netEqDecoders[_noNetEqDecoders++] = kDecoderRED;
+ _noOfCodecs++;
+#endif
+
+
+ _versionStringSize = (WebRtc_UWord32)strlen(_versions);
+}
+
+
+// Check if the bitrate is valid for the codec
+bool
+ACMCodecDB::IsRateValid(
+ const WebRtc_Word16 listNr,
+ const WebRtc_Word32 rate)
+{
+ if(_mycodecs[listNr].rate == rate)
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+
+// Check if the bitrate is valid for iSAC
+bool
+ACMCodecDB::IsISACRateValid(
+#if (!defined(WEBRTC_CODEC_ISAC) && !defined(WEBRTC_CODEC_ISACFX))
+ const WebRtc_Word32 /* rate */)
+{
+ return false;
+#else
+ const WebRtc_Word32 rate)
+{
+ if((rate == -1) ||
+ ((rate <= 56000) && (rate >= 10000)))
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+#endif
+}
+
+// Check if the bitrate is valid for iLBC
+bool
+ACMCodecDB::IsILBCRateValid(
+#ifndef WEBRTC_CODEC_ILBC
+ const WebRtc_Word32 /* rate */,
+ const WebRtc_Word16 /* frameSizeSamples */)
+{
+ return false;
+#else
+ const WebRtc_Word32 rate,
+ const WebRtc_Word16 frameSizeSamples)
+{
+ if(((frameSizeSamples == 240) || (frameSizeSamples == 480)) &&
+ (rate == 13300))
+ {
+ return true;
+ }
+ else if(((frameSizeSamples == 160) || (frameSizeSamples == 320)) &&
+ (rate == 15200))
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+#endif
+}
+
+// Check if the payload type is valid
+WebRtc_Word16
+ACMCodecDB::ValidPayloadType(
+ const int payloadType)
+{
+ if((payloadType < 0) || (payloadType > 127))
+ {
+ return -1;
+ }
+ return 0;
+}
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_codec_database.h b/src/modules/audio_coding/main/source/acm_codec_database.h
new file mode 100644
index 0000000..c83e84b
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_codec_database.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_CODEC_DATABASE_H
+#define ACM_CODEC_DATABASE_H
+
+#include "acm_generic_codec.h"
+#include "common_types.h"
+#include "typedefs.h"
+#include "webrtc_neteq.h"
+
+namespace webrtc
+{
+
+// These might need to be increased if adding a new codec to
+// the database
+#define MAX_NR_OF_CODECS 52
+#define MAX_NR_OF_PACSIZES 6
+#define VERSION_SIZE 1000
+
+class ACMCodecDB
+{
+public:
+ static WebRtc_Word16 Codec(
+ WebRtc_Word16 listnr,
+ CodecInst* codec_inst);
+
+
+ static WebRtc_Word16 CodecNumber(
+ const CodecInst* codec_inst,
+ WebRtc_Word16& mirrorID,
+ WebRtc_Word8* errMessage,
+ WebRtc_Word16 maxErrMsgLenByte);
+
+ static WebRtc_Word16 CodecNumber(
+ const CodecInst* codec_inst,
+ WebRtc_Word16& mirrorID);
+
+ static WebRtc_Word16 ReceiverCodecNumber(
+ const CodecInst& codecInst,
+ WebRtc_Word16& mirrorID);
+
+ static WebRtc_Word16 NoOfCodecs();
+
+ static WebRtc_Word16 NoNetEqDecoders();
+
+ static WebRtc_Word32 CodecFreq(
+ WebRtc_Word16 listnr);
+
+ static WebRtc_Word16 BasicCodingBlock(
+ WebRtc_Word16 listnr);
+
+ static enum WebRtcNetEQDecoder* NetEqDecoders();
+
+ static WebRtc_Word16 CodecsVersion(
+ WebRtc_Word8* version,
+ WebRtc_UWord32& remainingBufferInBytes,
+ WebRtc_UWord32& position);
+
+ static WebRtc_Word16 MirrorID(
+ const WebRtc_Word16 codecID);
+
+ static ACMGenericCodec* CreateCodecInstance(
+ const CodecInst* codecInst);
+
+ static void initACMCodecDB();
+
+ static bool IsRateValid(
+ const WebRtc_Word16 listNr,
+ const WebRtc_Word32 rate);
+
+ static bool IsISACRateValid(
+ const WebRtc_Word32 rate);
+
+ static bool IsILBCRateValid(
+ const WebRtc_Word32 rate,
+ const WebRtc_Word16 frameSizeSamples);
+
+ static WebRtc_Word16 ValidPayloadType(
+ const int payloadType);
+
+ static WebRtc_Word16
+ pcm16b,
+ pcm16bwb,
+ pcm16bswb32,
+ pcm16bswb48,
+ pcmu,
+ pcma,
+ ilbc,
+ gsmAMR,
+ gsmAMRWB,
+ g722,
+ g722_1_32,
+ g722_1_24,
+ g722_1_16,
+ g722_1C_48,
+ g722_1C_32,
+ g722_1C_24,
+ g729,
+ isac,
+ isacswb,
+ gsmfr,
+ speex8,
+ speex16,
+ cnNB,
+ cnWB,
+ cnSWB,
+ avt,
+ red;
+
+ static WebRtc_Word16 _noOfCodecs;
+ static WebRtc_Word16 _noNetEqDecoders;
+ static WebRtc_Word16 _noPayloads;
+
+ // Information about the supported codecs
+ static CodecInst _mycodecs[MAX_NR_OF_CODECS];
+ static enum WebRtcNetEQDecoder _netEqDecoders[MAX_NR_OF_CODECS];
+ static WebRtc_UWord16 _allowedPacSizesSmpl[MAX_NR_OF_CODECS][MAX_NR_OF_PACSIZES];
+ static WebRtc_UWord8 _nrOfAllowedPacSizes[MAX_NR_OF_CODECS];
+ static WebRtc_UWord16 _basicCodingBlockSmpl[MAX_NR_OF_CODECS];
+ static WebRtc_UWord16 _channelSupport[MAX_NR_OF_CODECS];
+
+private:
+ static bool _isInitiated;
+ static WebRtc_Word8 _versions[VERSION_SIZE];
+ static WebRtc_UWord32 _versionStringSize;
+};
+
+} // namespace webrtc
+
+#endif //ACM_CODEC_DATABASE_H
diff --git a/src/modules/audio_coding/main/source/acm_common_defs.h b/src/modules/audio_coding/main/source/acm_common_defs.h
new file mode 100644
index 0000000..7258fc7
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_common_defs.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_ACM_COMMON_DEFS_H
+#define WEBRTC_ACM_COMMON_DEFS_H
+
+#include <string.h>
+
+#include "audio_coding_module_typedefs.h"
+#include "common_types.h"
+#include "engine_configurations.h"
+#include "typedefs.h"
+
+
+// Checks for enabled codecs, we prevent enabling codecs which are not
+// compatible.
+#if ((defined WEBRTC_CODEC_ISAC) && (defined WEBRTC_CODEC_ISACFX))
+ # error iSAC and iSACFX codecs cannot be enabled at the same time
+#endif
+
+#ifdef WIN32
+ // OS-dependent case-insensitive string comparison
+ #define STR_CASE_CMP(x,y) ::_stricmp(x,y)
+#else
+ // OS-dependent case-insensitive string comparison
+ #define STR_CASE_CMP(x,y) ::strcasecmp(x,y)
+#endif
+
+namespace webrtc
+{
+
+// 60 ms is the maximum block size we support. An extra 20 ms is considered
+// for safety if process() method is not called when it should be, i.e. we
+// accept 20 ms of jitter. 80 ms @ 32 kHz (super wide-band) is 2560 samples.
+#define AUDIO_BUFFER_SIZE_W16 2560
+
+// There is one timestamp per each 10 ms of audio
+// the audio buffer, at max, may contain 32 blocks of 10ms
+// audio if the sampling frequency is 8000 Hz (80 samples per block).
+// Therefore, The size of the buffer where we keep timestamps
+// is defined as follows
+#define TIMESTAMP_BUFFER_SIZE_W32 AUDIO_BUFFER_SIZE_W16/80
+
+// The maximum size of a payload, that is 60 ms of PCM-16 @ 32 kHz stereo
+#define MAX_PAYLOAD_SIZE_BYTE 7680
+
+// General codec specific defines
+#define ISACWB_DEFAULT_RATE 32000
+#define ISACSWB_DEFAULT_RATE 56000
+#define ISACWB_PAC_SIZE 480
+#define ISACSWB_PAC_SIZE 960
+
+// An encoded bit-stream is labeled by one of the following enumerators.
+//
+// kNoEncoding : There has been no encoding.
+// kActiveNormalEncoded : Active audio frame coded by the codec.
+// kPassiveNormalEncoded : Passive audio frame coded by the codec.
+// kPassiveDTXNB : Passive audio frame coded by narrow-band CN.
+// kPassiveDTXWB : Passive audio frame coded by wide-band CN.
+// kPassiveDTXSWB : Passive audio frame coded by super-wide-band CN.
+//
+enum WebRtcACMEncodingType
+{
+ kNoEncoding,
+ kActiveNormalEncoded,
+ kPassiveNormalEncoded,
+ kPassiveDTXNB,
+ kPassiveDTXWB,
+ kPassiveDTXSWB
+};
+
+// A structure which contains codec parameters. For instance, used when
+// initializing encoder and decoder.
+//
+// codecInstant : c.f. common_types.h
+// enableDTX : set true to enable DTX. If codec does not have
+// internal DTX, this will enable VAD.
+// enableVAD : set true to enable VAD.
+// vadMode : VAD mode, c.f. audio_coding_module_typedefs.h
+// for possible values.
+struct WebRtcACMCodecParams
+{
+ CodecInst codecInstant;
+ bool enableDTX;
+ bool enableVAD;
+ ACMVADMode vadMode;
+};
+
+// A structure that encapsulates audio buffer and related parameters
+// used for synchronization of audio of two ACMs.
+//
+// inAudio : same as ACMGenericCodec::_inAudio
+// inAudioIxRead : same as ACMGenericCodec::_inAudioIxRead
+// inAudioIxWrite : same as ACMGenericCodec::_inAudioIxWrite
+// inTimestamp : same as ACMGenericCodec::_inTimestamp
+// inTimestampIxWrite : same as ACMGenericCodec::_inTImestampIxWrite
+// lastTimestamp : same as ACMGenericCodec::_lastTimestamp
+// lastInTimestamp : same as AudioCodingModuleImpl::_lastInTimestamp
+//
+struct WebRtcACMAudioBuff
+{
+ WebRtc_Word16 inAudio[AUDIO_BUFFER_SIZE_W16];
+ WebRtc_Word16 inAudioIxRead;
+ WebRtc_Word16 inAudioIxWrite;
+ WebRtc_UWord32 inTimestamp[TIMESTAMP_BUFFER_SIZE_W32];
+ WebRtc_Word16 inTimestampIxWrite;
+ WebRtc_UWord32 lastTimestamp;
+ WebRtc_UWord32 lastInTimestamp;
+};
+
+} // namespace webrtc
+
+#endif
diff --git a/src/modules/audio_coding/main/source/acm_dtmf_detection.cc b/src/modules/audio_coding/main/source/acm_dtmf_detection.cc
new file mode 100644
index 0000000..cab9ea5
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_dtmf_detection.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "acm_dtmf_detection.h"
+#include "audio_coding_module_typedefs.h"
+
+namespace webrtc
+{
+
+ACMDTMFDetection::ACMDTMFDetection()
+{
+}
+
+
+ACMDTMFDetection::~ACMDTMFDetection()
+{
+}
+
+
+WebRtc_Word16
+ACMDTMFDetection::Enable(
+ ACMCountries /* cpt */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMDTMFDetection::Disable()
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMDTMFDetection::Detect(
+ const WebRtc_Word16* /* inAudioBuff */,
+ const WebRtc_UWord16 /* inBuffLenWord16 */,
+ const WebRtc_Word32 /* inFreqHz */,
+ bool& /* toneDetected */,
+ WebRtc_Word16& /* tone */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMDTMFDetection::GetVersion(
+ WebRtc_Word8* /* version */,
+ WebRtc_UWord32& /* remainingBufferInBytes */,
+ WebRtc_UWord32& /* position */)
+{
+ return -1;
+}
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_dtmf_detection.h b/src/modules/audio_coding/main/source/acm_dtmf_detection.h
new file mode 100644
index 0000000..0142fc9
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_dtmf_detection.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_DTMF_DETECTION_H
+#define ACM_DTMF_DETECTION_H
+
+#include "acm_resampler.h"
+#include "audio_coding_module_typedefs.h"
+#include "typedefs.h"
+
+namespace webrtc
+{
+
+class ACMDTMFDetection
+{
+public:
+ ACMDTMFDetection();
+ ~ACMDTMFDetection();
+ WebRtc_Word16 Enable(ACMCountries cpt = ACMDisableCountryDetection);
+ WebRtc_Word16 Disable();
+ WebRtc_Word16 Detect(
+ const WebRtc_Word16* inAudioBuff,
+ const WebRtc_UWord16 inBuffLenWord16,
+ const WebRtc_Word32 inFreqHz,
+ bool& toneDetected,
+ WebRtc_Word16& tone);
+
+ static WebRtc_Word16 GetVersion(
+ WebRtc_Word8* version,
+ WebRtc_UWord32& remainingBufferInBytes,
+ WebRtc_UWord32& position);
+
+private:
+ ACMResampler _resampler;
+ bool _init;
+};
+
+} // namespace webrtc
+
+#endif // ACM_DTMF_DETECTION_H
diff --git a/src/modules/audio_coding/main/source/acm_dtmf_playout.cc b/src/modules/audio_coding/main/source/acm_dtmf_playout.cc
new file mode 100644
index 0000000..8286cb5
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_dtmf_playout.cc
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_common_defs.h"
+#include "acm_dtmf_playout.h"
+#include "acm_neteq.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_AVT
+
+ACMDTMFPlayout::ACMDTMFPlayout(
+ WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+
+ACMDTMFPlayout::~ACMDTMFPlayout()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word32
+ACMDTMFPlayout::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+
+ACMGenericCodec*
+ACMDTMFPlayout::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMDTMFPlayout::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+void
+ACMDTMFPlayout::DestructEncoderSafe()
+{
+ return;
+}
+
+void
+ACMDTMFPlayout::DestructDecoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+#else //===================== Actual Implementation =======================
+
+ACMDTMFPlayout::ACMDTMFPlayout(
+ WebRtc_Word16 codecID)
+{
+ _codecID = codecID;
+}
+
+
+ACMDTMFPlayout::~ACMDTMFPlayout()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ // This codec does not need initialization,
+ // DTMFPlayout has no instance
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ // This codec does not need initialization,
+ // DTMFPlayout has no instance
+ return 0;
+}
+
+
+WebRtc_Word32
+ACMDTMFPlayout::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_AVT_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderAVT, codecInst.pltype, NULL, 8000);
+ SET_AVT_FUNCTIONS((codecDef));
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMDTMFPlayout::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::InternalCreateEncoder()
+{
+ // DTMFPlayout has no instance
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::InternalCreateDecoder()
+{
+ // DTMFPlayout has no instance
+ return 0;
+}
+
+
+void
+ACMDTMFPlayout::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ // DTMFPlayout has no instance
+ return;
+}
+
+
+void
+ACMDTMFPlayout::DestructEncoderSafe()
+{
+ // DTMFPlayout has no instance
+ return;
+}
+
+void
+ACMDTMFPlayout::DestructDecoderSafe()
+{
+ // DTMFPlayout has no instance
+ return;
+}
+
+
+WebRtc_Word16
+ACMDTMFPlayout::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ return netEq->RemoveCodec(kDecoderAVT);
+}
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_dtmf_playout.h b/src/modules/audio_coding/main/source/acm_dtmf_playout.h
new file mode 100644
index 0000000..9f38baa
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_dtmf_playout.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_DTMF_PLAYOUT_H
+#define ACM_DTMF_PLAYOUT_H
+
+#include "acm_generic_codec.h"
+
+namespace webrtc
+{
+
+class ACMDTMFPlayout : public ACMGenericCodec
+{
+public:
+ ACMDTMFPlayout(WebRtc_Word16 codecID);
+ ~ACMDTMFPlayout();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+};
+
+} // namespace webrtc
+
+#endif // ACM_DTMF_PLAYOUT_H
+
diff --git a/src/modules/audio_coding/main/source/acm_g722.cc b/src/modules/audio_coding/main/source/acm_g722.cc
new file mode 100644
index 0000000..948910f
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_g722.cc
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_codec_database.h"
+#include "acm_common_defs.h"
+#include "acm_g722.h"
+#include "acm_neteq.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+#include "g722_interface.h"
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_G722
+
+ACMG722::ACMG722(
+ WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+
+ACMG722::~ACMG722()
+{
+ return;
+}
+
+
+WebRtc_Word32
+ACMG722::Add10MsDataSafe(
+ const WebRtc_UWord32 /* timestamp */,
+ const WebRtc_Word16* /* data */,
+ const WebRtc_UWord16 /* lengthSmpl */,
+ const WebRtc_UWord8 /* audioChannel */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG722::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG722::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG722::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG722::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word32
+ACMG722::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+
+ACMGenericCodec*
+ACMG722::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMG722::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+void
+ACMG722::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMG722::DestructDecoderSafe()
+{
+ return;
+}
+
+
+void
+ACMG722::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+
+#else //===================== Actual Implementation =======================
+
+
+// Encoder and decoder memory
+struct ACMG722EncStr {
+ G722EncInst* inst; // instance for left channel in case of stereo
+ G722EncInst* instRight; // instance for right channel in case of stereo
+};
+struct ACMG722DecStr {
+ G722DecInst* inst; // instance for left channel in case of stereo
+ G722DecInst* instRight; // instance for right channel in case of stereo
+};
+
+ACMG722::ACMG722(
+ WebRtc_Word16 codecID)
+{
+ // Encoder
+ _ptrEncStr = new ACMG722EncStr;
+ if(_ptrEncStr != NULL)
+ {
+ _ptrEncStr->inst = NULL;
+ _ptrEncStr->instRight = NULL;
+ }
+ // Decoder
+ _ptrDecStr = new ACMG722DecStr;
+ if(_ptrDecStr != NULL)
+ {
+ _ptrDecStr->inst = NULL;
+ _ptrDecStr->instRight = NULL; // Not used
+ }
+ _codecID = codecID;
+ return;
+}
+
+
+ACMG722::~ACMG722()
+{
+ if(_ptrEncStr != NULL)
+ {
+ if(_ptrEncStr->inst != NULL)
+ {
+ WebRtcG722_FreeEncoder(_ptrEncStr->inst);
+ _ptrEncStr->inst = NULL;
+ }
+
+ if(_ptrEncStr->instRight != NULL)
+ {
+ WebRtcG722_FreeEncoder(_ptrEncStr->instRight);
+ _ptrEncStr->instRight = NULL;
+ }
+
+ delete _ptrEncStr;
+ _ptrEncStr = NULL;
+ }
+ if(_ptrDecStr != NULL)
+ {
+ if(_ptrDecStr->inst != NULL)
+ {
+ WebRtcG722_FreeDecoder(_ptrDecStr->inst);
+ _ptrDecStr->inst = NULL;
+ }
+ if(_ptrDecStr->instRight != NULL)
+ {
+ WebRtcG722_FreeDecoder(_ptrDecStr->instRight);
+ _ptrDecStr->instRight = NULL;
+ }
+
+ delete _ptrDecStr;
+ _ptrDecStr = NULL;
+ }
+ return;
+}
+
+
+WebRtc_Word32
+ACMG722::Add10MsDataSafe(
+ const WebRtc_UWord32 timestamp,
+ const WebRtc_Word16* data,
+ const WebRtc_UWord16 lengthSmpl,
+ const WebRtc_UWord8 audioChannel)
+{
+ return ACMGenericCodec::Add10MsDataSafe((timestamp>>1), data, lengthSmpl,
+ audioChannel);
+}
+
+WebRtc_Word16
+ACMG722::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ // If stereo, split input signal in left and right channel before encoding
+ if(_noChannels == 2) {
+ WebRtc_Word16 leftChannel[960];
+ WebRtc_Word16 rightChannel[960];
+ WebRtc_UWord8 outLeft[480];
+ WebRtc_UWord8 outRight[480];
+ WebRtc_Word16 lenInBytes;
+ for (int i=0, j=0; i<_frameLenSmpl*2; i+=2, j++) {
+ leftChannel[j] = _inAudio[_inAudioIxRead+i];
+ rightChannel[j] = _inAudio[_inAudioIxRead+i+1];
+ }
+ lenInBytes = WebRtcG722_Encode(_encoderInstPtr,
+ leftChannel, _frameLenSmpl, (WebRtc_Word16*)outLeft);
+ lenInBytes += WebRtcG722_Encode(_encoderInstPtrRight,
+ rightChannel, _frameLenSmpl, (WebRtc_Word16*)outRight);
+ *bitStreamLenByte = lenInBytes;
+
+ // Interleave the 4 bits per sample from left and right channel
+ for (int i=0, j=0; i<lenInBytes; i+=2, j++)
+ {
+ bitStream[i] = (outRight[j] & 0xF0) + (outLeft[j] >> 4);
+ bitStream[i+1] = ((outRight[j] & 0x0F) << 4) + (outLeft[j] & 0x0F);
+ }
+ } else {
+ *bitStreamLenByte = WebRtcG722_Encode(_encoderInstPtr,
+ &_inAudio[_inAudioIxRead], _frameLenSmpl, (WebRtc_Word16*)bitStream);
+ }
+
+ // increment the read index this tell the caller how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _frameLenSmpl*_noChannels;
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMG722::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMG722::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ if(codecParams->codecInstant.channels == 2)
+ {
+ // Create codec struct for right channel
+ if (_ptrEncStr->instRight == NULL)
+ {
+ WebRtcG722_CreateEncoder(&_ptrEncStr->instRight);
+ if(_ptrEncStr->instRight == NULL)
+ {
+ return -1;
+ }
+ }
+ _encoderInstPtrRight = (G722EncInst*)_ptrEncStr->instRight;
+ if (WebRtcG722_EncoderInit(_encoderInstPtrRight) < 0)
+ {
+ return -1;
+ }
+ }
+
+ return WebRtcG722_EncoderInit(_encoderInstPtr);
+}
+
+
+WebRtc_Word16
+ACMG722::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return WebRtcG722_DecoderInit(_decoderInstPtr);
+}
+
+
+WebRtc_Word32
+ACMG722::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ // TODO: log error
+ return -1;
+ }
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_G722_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderG722, codecInst.pltype,
+ _decoderInstPtr, 16000);
+ SET_G722_FUNCTIONS((codecDef));
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMG722::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMG722::InternalCreateEncoder()
+{
+ if(_ptrEncStr == NULL)
+ {
+ // this structure must be created at the costructor
+ // if it is still NULL then there is a probelm and
+ // we dont continue
+ return -1;
+ }
+ WebRtcG722_CreateEncoder(&_ptrEncStr->inst);
+ if(_ptrEncStr->inst == NULL)
+ {
+ return -1;
+ }
+ _encoderInstPtr = (G722EncInst*)_ptrEncStr->inst;
+ return 0;
+}
+
+
+void
+ACMG722::DestructEncoderSafe()
+{
+ if(_ptrEncStr != NULL)
+ {
+ if(_ptrEncStr->inst != NULL)
+ {
+ WebRtcG722_FreeEncoder(_ptrEncStr->inst);
+ _ptrEncStr->inst = NULL;
+ }
+ }
+ _encoderExist = false;
+ _encoderInitialized = false;
+
+}
+
+WebRtc_Word16
+ACMG722::InternalCreateDecoder()
+{
+ if(_ptrDecStr == NULL)
+ {
+ // this structure must be created at the costructor
+ // if it is still NULL then there is a probelm and
+ // we dont continue
+ return -1;
+ }
+
+ WebRtcG722_CreateDecoder(&_ptrDecStr->inst);
+ if(_ptrDecStr->inst == NULL)
+ {
+ return -1;
+ }
+ _decoderInstPtr = (G722DecInst*)_ptrDecStr->inst;
+ return 0;
+}
+
+void
+ACMG722::DestructDecoderSafe()
+{
+ _decoderExist = false;
+ _decoderInitialized = false;
+ if(_ptrDecStr != NULL)
+ {
+ if(_ptrDecStr->inst != NULL)
+ {
+ WebRtcG722_FreeDecoder(_ptrDecStr->inst);
+ _ptrDecStr->inst = NULL;
+ }
+ }
+}
+
+
+void
+ACMG722::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ WebRtcG722_FreeEncoder((G722EncInst*)ptrInst);
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ return netEq->RemoveCodec(kDecoderG722);
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_g722.h b/src/modules/audio_coding/main/source/acm_g722.h
new file mode 100644
index 0000000..a2035cf
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_g722.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_G722_H
+#define ACM_G722_H
+
+#include "acm_generic_codec.h"
+
+typedef struct WebRtcG722EncInst G722EncInst;
+typedef struct WebRtcG722DecInst G722DecInst;
+
+namespace webrtc
+{
+
+// forward declaration
+struct ACMG722EncStr;
+struct ACMG722DecStr;
+
+class ACMG722 : public ACMGenericCodec
+{
+public:
+ ACMG722(WebRtc_Word16 codecID);
+ ~ACMG722();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ WebRtc_Word32 Add10MsDataSafe(
+ const WebRtc_UWord32 timestamp,
+ const WebRtc_Word16* data,
+ const WebRtc_UWord16 lengthSmpl,
+ const WebRtc_UWord8 audioChannel);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ ACMG722EncStr* _ptrEncStr;
+ ACMG722DecStr* _ptrDecStr;
+
+ G722EncInst* _encoderInstPtr;
+ G722EncInst* _encoderInstPtrRight; // Prepared for stereo
+ G722DecInst* _decoderInstPtr;
+};
+
+} // namespace webrtc
+
+#endif // ACM_G722_H
diff --git a/src/modules/audio_coding/main/source/acm_g7221.cc b/src/modules/audio_coding/main/source/acm_g7221.cc
new file mode 100644
index 0000000..76cc3ad
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_g7221.cc
@@ -0,0 +1,675 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_codec_database.h"
+#include "acm_common_defs.h"
+#include "acm_g7221.h"
+#include "acm_neteq.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+#ifdef WEBRTC_CODEC_G722_1
+ // NOTE! G.722.1 is not included in the open-source package. Modify this file or your codec
+ // API to match the function call and name of used G.722.1 API file.
+ // #include "g7221_interface.h"
+#endif
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_G722_1
+
+ACMG722_1::ACMG722_1(
+ WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+ACMG722_1::~ACMG722_1()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722_1::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG722_1::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG722_1::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG722_1::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word32
+ACMG722_1::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+ACMGenericCodec*
+ACMG722_1::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMG722_1::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+void
+ACMG722_1::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722_1::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMG722_1::DestructDecoderSafe()
+{
+ return;
+}
+
+
+void
+ACMG722_1::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722_1::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+
+#else //===================== Actual Implementation =======================
+
+// Remove when integrating a real G.722.1 wrapper
+struct G722_1_Inst_t_;
+
+extern WebRtc_Word16 WebRtcG7221_CreateEnc16(G722_1_16_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221_CreateEnc24(G722_1_24_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221_CreateEnc32(G722_1_32_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221_CreateDec16(G722_1_16_decinst_t_** decInst);
+extern WebRtc_Word16 WebRtcG7221_CreateDec24(G722_1_24_decinst_t_** decInst);
+extern WebRtc_Word16 WebRtcG7221_CreateDec32(G722_1_32_decinst_t_** decInst);
+
+extern WebRtc_Word16 WebRtcG7221_FreeEnc16(G722_1_16_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221_FreeEnc24(G722_1_24_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221_FreeEnc32(G722_1_32_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221_FreeDec16(G722_1_16_decinst_t_** decInst);
+extern WebRtc_Word16 WebRtcG7221_FreeDec24(G722_1_24_decinst_t_** decInst);
+extern WebRtc_Word16 WebRtcG7221_FreeDec32(G722_1_32_decinst_t_** decInst);
+
+extern WebRtc_Word16 WebRtcG7221_EncoderInit16(G722_1_16_encinst_t_* encInst);
+extern WebRtc_Word16 WebRtcG7221_EncoderInit24(G722_1_24_encinst_t_* encInst);
+extern WebRtc_Word16 WebRtcG7221_EncoderInit32(G722_1_32_encinst_t_* encInst);
+extern WebRtc_Word16 WebRtcG7221_DecoderInit16(G722_1_16_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcG7221_DecoderInit24(G722_1_24_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcG7221_DecoderInit32(G722_1_32_decinst_t_* decInst);
+
+extern WebRtc_Word16 WebRtcG7221_Encode16(G722_1_16_encinst_t_* encInst,
+ WebRtc_Word16* input,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+extern WebRtc_Word16 WebRtcG7221_Encode24(G722_1_24_encinst_t_* encInst,
+ WebRtc_Word16* input,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+extern WebRtc_Word16 WebRtcG7221_Encode32(G722_1_32_encinst_t_* encInst,
+ WebRtc_Word16* input,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+
+extern WebRtc_Word16 WebRtcG7221_Decode16(G722_1_16_decinst_t_* decInst,
+ WebRtc_Word16* bitstream,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+extern WebRtc_Word16 WebRtcG7221_Decode24(G722_1_24_decinst_t_* decInst,
+ WebRtc_Word16* bitstream,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+extern WebRtc_Word16 WebRtcG7221_Decode32(G722_1_32_decinst_t_* decInst,
+ WebRtc_Word16* bitstream,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+
+extern WebRtc_Word16 WebRtcG7221_DecodePlc16(G722_1_16_decinst_t_* decInst,
+ WebRtc_Word16* output,
+ WebRtc_Word16 nrLostFrames);
+extern WebRtc_Word16 WebRtcG7221_DecodePlc24(G722_1_24_decinst_t_* decInst,
+ WebRtc_Word16* output,
+ WebRtc_Word16 nrLostFrames);
+extern WebRtc_Word16 WebRtcG7221_DecodePlc32(G722_1_32_decinst_t_* decInst,
+ WebRtc_Word16* output,
+ WebRtc_Word16 nrLostFrames);
+
+
+ACMG722_1::ACMG722_1(
+ WebRtc_Word16 codecID):
+_encoderInstPtr(NULL),
+_encoderInstPtrRight(NULL),
+_decoderInstPtr(NULL),
+_encoderInst16Ptr(NULL),
+_encoderInst16PtrR(NULL),
+_encoderInst24Ptr(NULL),
+_encoderInst24PtrR(NULL),
+_encoderInst32Ptr(NULL),
+_encoderInst32PtrR(NULL),
+_decoderInst16Ptr(NULL),
+_decoderInst24Ptr(NULL),
+_decoderInst32Ptr(NULL)
+{
+ _codecID = codecID;
+ if(_codecID == ACMCodecDB::g722_1_16)
+ {
+ _operationalRate = 16000;
+ }
+ else if(_codecID == ACMCodecDB::g722_1_24)
+ {
+ _operationalRate = 24000;
+ }
+ else if(_codecID == ACMCodecDB::g722_1_32)
+ {
+ _operationalRate = 32000;
+ }
+ else
+ {
+ _operationalRate = -1;
+ }
+ return;
+}
+
+ACMG722_1::~ACMG722_1()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ delete _encoderInstPtr;
+ _encoderInstPtr = NULL;
+ }
+ if(_encoderInstPtrRight != NULL)
+ {
+ delete _encoderInstPtrRight;
+ _encoderInstPtrRight = NULL;
+ }
+ if(_decoderInstPtr != NULL)
+ {
+ delete _decoderInstPtr;
+ _decoderInstPtr = NULL;
+ }
+
+ switch(_operationalRate)
+ {
+ case 16000:
+ {
+ _encoderInst16Ptr = NULL;
+ _encoderInst16PtrR = NULL;
+ _decoderInst16Ptr = NULL;
+ break;
+ }
+ case 24000:
+ {
+ _encoderInst24Ptr = NULL;
+ _encoderInst24PtrR = NULL;
+ _decoderInst24Ptr = NULL;
+ break;
+ }
+ case 32000:
+ {
+ _encoderInst32Ptr = NULL;
+ _encoderInst32PtrR = NULL;
+ _decoderInst32Ptr = NULL;
+ break;
+ }
+ default:
+ {
+ break;
+ }
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722_1::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ WebRtc_Word16 leftChannel[320];
+ WebRtc_Word16 rightChannel[320];
+ WebRtc_Word16 lenInBytes;
+ WebRtc_Word16 outB[160];
+
+ // If stereo, split input signal in left and right channel before encoding
+ if(_noChannels == 2)
+ {
+ for (int i=0, j=0; i<_frameLenSmpl*2; i+=2, j++) {
+ leftChannel[j] = _inAudio[_inAudioIxRead+i];
+ rightChannel[j] = _inAudio[_inAudioIxRead+i+1];
+ }
+ } else {
+ memcpy(leftChannel, &_inAudio[_inAudioIxRead], 320);
+ }
+
+ switch(_operationalRate)
+ {
+ case 16000:
+ {
+ lenInBytes = WebRtcG7221_Encode16(_encoderInst16Ptr,
+ leftChannel, 320, &outB[0]);
+ if (_noChannels == 2)
+ {
+ lenInBytes += WebRtcG7221_Encode16(_encoderInst16PtrR,
+ rightChannel, 320, &outB[lenInBytes/2]);
+ }
+ break;
+ }
+ case 24000:
+ {
+ lenInBytes = WebRtcG7221_Encode24(_encoderInst24Ptr,
+ leftChannel, 320, &outB[0]);
+ if (_noChannels == 2)
+ {
+ lenInBytes += WebRtcG7221_Encode24(_encoderInst24PtrR,
+ rightChannel, 320, &outB[lenInBytes/2]);
+ }
+ break;
+ }
+ case 32000:
+ {
+ lenInBytes = WebRtcG7221_Encode32(_encoderInst32Ptr,
+ leftChannel, 320, &outB[0]);
+ if (_noChannels == 2)
+ {
+ lenInBytes += WebRtcG7221_Encode32(_encoderInst32PtrR,
+ rightChannel, 320, &outB[lenInBytes/2]);
+ }
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitEncode: Wrong rate for G722_1.");
+ return -1;
+ break;
+ }
+ }
+ memcpy(bitStream, outB, lenInBytes);
+ *bitStreamLenByte = lenInBytes;
+
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += 320*_noChannels;
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMG722_1::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMG722_1::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ WebRtc_Word16 ret;
+
+ switch(_operationalRate)
+ {
+ case 16000:
+ {
+ ret = WebRtcG7221_EncoderInit16(_encoderInst16PtrR);
+ if (ret < 0) {
+ return ret;
+ }
+ return WebRtcG7221_EncoderInit16(_encoderInst16Ptr);
+ break;
+ }
+ case 24000:
+ {
+ ret = WebRtcG7221_EncoderInit24(_encoderInst24PtrR);
+ if (ret < 0) {
+ return ret;
+ }
+ return WebRtcG7221_EncoderInit24(_encoderInst24Ptr);
+ break;
+ }
+ case 32000:
+ {
+ ret = WebRtcG7221_EncoderInit32(_encoderInst32PtrR);
+ if (ret < 0) {
+ return ret;
+ }
+ return WebRtcG7221_EncoderInit32(_encoderInst32Ptr);
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitEncoder: Wrong rate for G722_1.");
+ return -1;
+ break;
+ }
+ }
+}
+
+
+WebRtc_Word16
+ACMG722_1::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ switch(_operationalRate)
+ {
+ case 16000:
+ {
+ return WebRtcG7221_DecoderInit16(_decoderInst16Ptr);
+ break;
+ }
+ case 24000:
+ {
+ return WebRtcG7221_DecoderInit24(_decoderInst24Ptr);
+ break;
+ }
+ case 32000:
+ {
+ return WebRtcG7221_DecoderInit32(_decoderInst32Ptr);
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitDecoder: Wrong rate for G722_1.");
+ return -1;
+ break;
+ }
+ }
+}
+
+
+WebRtc_Word32
+ACMG722_1::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ // Todo:
+ // log error
+ return -1;
+ }
+ // NetEq has an array of pointers to WebRtcNetEQ_CodecDef.
+ // get an entry of that array (neteq wrapper will allocate memory)
+ // by calling "netEq->CodecDef", where "NETEQ_CODEC_G722_1_XX" would
+ // be the index of the entry.
+ // Fill up the given structure by calling
+ // "SET_CODEC_PAR" & "SET_G722_1_XX_FUNCTION."
+ // Then return the structure back to NetEQ to add the codec to it's
+ // database.
+ switch(_operationalRate)
+ {
+ case 16000:
+ {
+ SET_CODEC_PAR((codecDef), kDecoderG722_1_16, codecInst.pltype,
+ _decoderInst16Ptr, 16000);
+ SET_G722_1_16_FUNCTIONS((codecDef));
+ break;
+ }
+ case 24000:
+ {
+ SET_CODEC_PAR((codecDef), kDecoderG722_1_24, codecInst.pltype,
+ _decoderInst24Ptr, 16000);
+ SET_G722_1_24_FUNCTIONS((codecDef));
+ break;
+ }
+ case 32000:
+ {
+ SET_CODEC_PAR((codecDef), kDecoderG722_1_32, codecInst.pltype,
+ _decoderInst32Ptr, 16000);
+ SET_G722_1_32_FUNCTIONS((codecDef));
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CodecDef: Wrong rate for G722_1.");
+ return -1;
+ break;
+ }
+ }
+ return 0;
+}
+
+ACMGenericCodec*
+ACMG722_1::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMG722_1::InternalCreateEncoder()
+{
+ if((_encoderInstPtr == NULL) || (_encoderInstPtrRight == NULL))
+ {
+ return -1;
+ }
+ switch(_operationalRate)
+ {
+ case 16000:
+ {
+ WebRtcG7221_CreateEnc16(&_encoderInst16Ptr);
+ WebRtcG7221_CreateEnc16(&_encoderInst16PtrR);
+ break;
+ }
+ case 24000:
+ {
+ WebRtcG7221_CreateEnc24(&_encoderInst24Ptr);
+ WebRtcG7221_CreateEnc24(&_encoderInst24PtrR);
+ break;
+ }
+ case 32000:
+ {
+ WebRtcG7221_CreateEnc32(&_encoderInst32Ptr);
+ WebRtcG7221_CreateEnc32(&_encoderInst32PtrR);
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateEncoder: Wrong rate for G722_1.");
+ return -1;
+ break;
+ }
+ }
+ return 0;
+}
+
+
+void
+ACMG722_1::DestructEncoderSafe()
+{
+ _encoderExist = false;
+ _encoderInitialized = false;
+ if(_encoderInstPtr != NULL)
+ {
+ delete _encoderInstPtr;
+ _encoderInstPtr = NULL;
+ }
+ if(_encoderInstPtrRight != NULL)
+ {
+ delete _encoderInstPtrRight;
+ _encoderInstPtrRight = NULL;
+ }
+ _encoderInst16Ptr = NULL;
+ _encoderInst24Ptr = NULL;
+ _encoderInst32Ptr = NULL;
+}
+
+
+WebRtc_Word16
+ACMG722_1::InternalCreateDecoder()
+{
+ if(_decoderInstPtr == NULL)
+ {
+ return -1;
+ }
+ switch(_operationalRate)
+ {
+ case 16000:
+ {
+ WebRtcG7221_CreateDec16(&_decoderInst16Ptr);
+ break;
+ }
+ case 24000:
+ {
+ WebRtcG7221_CreateDec24(&_decoderInst24Ptr);
+ break;
+ }
+ case 32000:
+ {
+ WebRtcG7221_CreateDec32(&_decoderInst32Ptr);
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateDecoder: Wrong rate for G722_1.");
+ return -1;
+ break;
+ }
+ }
+ return 0;
+}
+
+
+void
+ACMG722_1::DestructDecoderSafe()
+{
+ _decoderExist = false;
+ _decoderInitialized = false;
+ if(_decoderInstPtr != NULL)
+ {
+ delete _decoderInstPtr;
+ _decoderInstPtr = NULL;
+ }
+ _decoderInst16Ptr = NULL;
+ _decoderInst24Ptr = NULL;
+ _decoderInst32Ptr = NULL;
+}
+
+
+void
+ACMG722_1::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ delete ptrInst;
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722_1::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ switch(_operationalRate)
+ {
+ case 16000:
+ {
+ return netEq->RemoveCodec(kDecoderG722_1_16);
+ }
+ case 24000:
+ {
+ return netEq->RemoveCodec(kDecoderG722_1_24);
+ }
+ case 32000:
+ {
+ return netEq->RemoveCodec(kDecoderG722_1_32);
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "UnregisterFromNetEqSafe: Wrong rate for G722_1.");
+ return -1;
+ }
+ }
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_g7221.h b/src/modules/audio_coding/main/source/acm_g7221.h
new file mode 100644
index 0000000..829fbf0
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_g7221.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_G722_1_H
+#define ACM_G722_1_H
+
+#include "acm_generic_codec.h"
+
+namespace webrtc
+{
+
+// forward declaration
+struct G722_1_16_encinst_t_;
+struct G722_1_16_decinst_t_;
+
+struct G722_1_24_encinst_t_;
+struct G722_1_24_decinst_t_;
+
+struct G722_1_32_encinst_t_;
+struct G722_1_32_decinst_t_;
+
+struct G722_1_Inst_t_;
+
+
+class ACMG722_1 : public ACMGenericCodec
+{
+public:
+ ACMG722_1(WebRtc_Word16 codecID);
+ ~ACMG722_1();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ WebRtc_Word32 _operationalRate;
+
+ G722_1_Inst_t_* _encoderInstPtr;
+ G722_1_Inst_t_* _encoderInstPtrRight; //Used in stereo mode
+ G722_1_Inst_t_* _decoderInstPtr;
+
+ // Only one set of these pointer is valid at any instance
+ G722_1_16_encinst_t_* _encoderInst16Ptr;
+ G722_1_16_encinst_t_* _encoderInst16PtrR;
+ G722_1_24_encinst_t_* _encoderInst24Ptr;
+ G722_1_24_encinst_t_* _encoderInst24PtrR;
+ G722_1_32_encinst_t_* _encoderInst32Ptr;
+ G722_1_32_encinst_t_* _encoderInst32PtrR;
+
+ // Only one of these pointer is valid at any instance
+ G722_1_16_decinst_t_* _decoderInst16Ptr;
+ G722_1_24_decinst_t_* _decoderInst24Ptr;
+ G722_1_32_decinst_t_* _decoderInst32Ptr;
+};
+
+} // namespace webrtc
+
+#endif // ACM_G722_1_H
diff --git a/src/modules/audio_coding/main/source/acm_g7221c.cc b/src/modules/audio_coding/main/source/acm_g7221c.cc
new file mode 100644
index 0000000..9aede6e
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_g7221c.cc
@@ -0,0 +1,685 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_codec_database.h"
+#include "acm_common_defs.h"
+#include "acm_g7221c.h"
+#include "acm_neteq.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+#include "trace.h"
+
+#ifdef WEBRTC_CODEC_G722_1C
+ // NOTE! G.722.1C is not included in the open-source package. Modify this file or your
+ // codec API to match the function call and name of used G.722.1C API file.
+ // #include "g7221C_interface.h"
+#endif
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_G722_1C
+
+ACMG722_1C::ACMG722_1C(
+ WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+ACMG722_1C::~ACMG722_1C()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word32
+ACMG722_1C::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+ACMGenericCodec*
+ACMG722_1C::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+void
+ACMG722_1C::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMG722_1C::DestructDecoderSafe()
+{
+ return;
+}
+
+
+void
+ACMG722_1C::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+
+#else //===================== Actual Implementation =======================
+// Remove when integrating a real G,722.1 wrapper
+struct G722_1_Inst_t_;
+
+extern WebRtc_Word16 WebRtcG7221C_CreateEnc24(G722_1C_24_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221C_CreateEnc32(G722_1C_32_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221C_CreateEnc48(G722_1C_48_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221C_CreateDec24(G722_1C_24_decinst_t_** decInst);
+extern WebRtc_Word16 WebRtcG7221C_CreateDec32(G722_1C_32_decinst_t_** decInst);
+extern WebRtc_Word16 WebRtcG7221C_CreateDec48(G722_1C_48_decinst_t_** decInst);
+
+extern WebRtc_Word16 WebRtcG7221C_FreeEnc24(G722_1C_24_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221C_FreeEnc32(G722_1C_32_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221C_FreeEnc48(G722_1C_48_encinst_t_** encInst);
+extern WebRtc_Word16 WebRtcG7221C_FreeDec24(G722_1C_24_decinst_t_** decInst);
+extern WebRtc_Word16 WebRtcG7221C_FreeDec32(G722_1C_32_decinst_t_** decInst);
+extern WebRtc_Word16 WebRtcG7221C_FreeDec48(G722_1C_48_decinst_t_** decInst);
+
+
+extern WebRtc_Word16 WebRtcG7221C_EncoderInit24(G722_1C_24_encinst_t_* encInst);
+extern WebRtc_Word16 WebRtcG7221C_EncoderInit32(G722_1C_32_encinst_t_* encInst);
+extern WebRtc_Word16 WebRtcG7221C_EncoderInit48(G722_1C_48_encinst_t_* encInst);
+extern WebRtc_Word16 WebRtcG7221C_DecoderInit24(G722_1C_24_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcG7221C_DecoderInit32(G722_1C_32_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcG7221C_DecoderInit48(G722_1C_48_decinst_t_* decInst);
+
+extern WebRtc_Word16 WebRtcG7221C_Encode24(G722_1C_24_encinst_t_* encInst,
+ WebRtc_Word16* input,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+extern WebRtc_Word16 WebRtcG7221C_Encode32(G722_1C_32_encinst_t_* encInst,
+ WebRtc_Word16* input,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+extern WebRtc_Word16 WebRtcG7221C_Encode48(G722_1C_48_encinst_t_* encInst,
+ WebRtc_Word16* input,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+
+extern WebRtc_Word16 WebRtcG7221C_Decode24(G722_1C_24_decinst_t_* decInst,
+ WebRtc_Word16* bitstream,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+extern WebRtc_Word16 WebRtcG7221C_Decode32(G722_1C_32_decinst_t_* decInst,
+ WebRtc_Word16* bitstream,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+extern WebRtc_Word16 WebRtcG7221C_Decode48(G722_1C_48_decinst_t_* decInst,
+ WebRtc_Word16* bitstream,
+ WebRtc_Word16 len,
+ WebRtc_Word16* output);
+
+extern WebRtc_Word16 WebRtcG7221C_DecodePlc24(G722_1C_24_decinst_t_* decInst,
+ WebRtc_Word16* output,
+ WebRtc_Word16 nrLostFrames);
+extern WebRtc_Word16 WebRtcG7221C_DecodePlc32(G722_1C_32_decinst_t_* decInst,
+ WebRtc_Word16* output,
+ WebRtc_Word16 nrLostFrames);
+extern WebRtc_Word16 WebRtcG7221C_DecodePlc48(G722_1C_48_decinst_t_* decInst,
+ WebRtc_Word16* output,
+ WebRtc_Word16 nrLostFrames);
+
+
+ACMG722_1C::ACMG722_1C(
+ WebRtc_Word16 codecID):
+_encoderInstPtr(NULL),
+_encoderInstPtrRight(NULL),
+_decoderInstPtr(NULL),
+_encoderInst24Ptr(NULL),
+_encoderInst24PtrR(NULL),
+_encoderInst32Ptr(NULL),
+_encoderInst32PtrR(NULL),
+_encoderInst48Ptr(NULL),
+_encoderInst48PtrR(NULL),
+_decoderInst24Ptr(NULL),
+_decoderInst32Ptr(NULL),
+_decoderInst48Ptr(NULL)
+{
+ _codecID = codecID;
+ if(_codecID == ACMCodecDB::g722_1C_24)
+ {
+ _operationalRate = 24000;
+ }
+ else if(_codecID == ACMCodecDB::g722_1C_32)
+ {
+ _operationalRate = 32000;
+ }
+ else if(_codecID == ACMCodecDB::g722_1C_48)
+ {
+ _operationalRate = 48000;
+ }
+ else
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Wrong codec id for G722_1c.");
+
+ _operationalRate = -1;
+ }
+ return;
+}
+
+ACMG722_1C::~ACMG722_1C()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ delete _encoderInstPtr;
+ _encoderInstPtr = NULL;
+ }
+ if(_encoderInstPtrRight != NULL)
+ {
+ delete _encoderInstPtrRight;
+ _encoderInstPtrRight = NULL;
+ }
+ if(_decoderInstPtr != NULL)
+ {
+ delete _decoderInstPtr;
+ _decoderInstPtr = NULL;
+ }
+
+ switch(_operationalRate)
+ {
+ case 24000:
+ {
+ _encoderInst24Ptr = NULL;
+ _encoderInst24PtrR = NULL;
+ _decoderInst24Ptr = NULL;
+ break;
+ }
+ case 32000:
+ {
+ _encoderInst32Ptr = NULL;
+ _encoderInst32PtrR = NULL;
+ _decoderInst32Ptr = NULL;
+ break;
+ }
+ case 48000:
+ {
+ _encoderInst48Ptr = NULL;
+ _encoderInst48PtrR = NULL;
+ _decoderInst48Ptr = NULL;
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Wrong rate for G722_1c.");
+ break;
+ }
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ WebRtc_Word16 leftChannel[640];
+ WebRtc_Word16 rightChannel[640];
+ WebRtc_Word16 lenInBytes;
+ WebRtc_Word16 outB[240];
+
+ // If stereo, split input signal in left and right channel before encoding
+ if(_noChannels == 2)
+ {
+ for (int i=0, j=0; i<_frameLenSmpl*2; i+=2, j++) {
+ leftChannel[j] = _inAudio[_inAudioIxRead+i];
+ rightChannel[j] = _inAudio[_inAudioIxRead+i+1];
+ }
+ } else {
+ memcpy(leftChannel, &_inAudio[_inAudioIxRead], 640);
+ }
+
+ switch(_operationalRate)
+ {
+ case 24000:
+ {
+ lenInBytes = WebRtcG7221C_Encode24(_encoderInst24Ptr,
+ leftChannel, 640, &outB[0]);
+ if (_noChannels == 2)
+ {
+ lenInBytes += WebRtcG7221C_Encode24(_encoderInst24PtrR,
+ rightChannel, 640, &outB[lenInBytes/2]);
+ }
+ break;
+ }
+ case 32000:
+ {
+ lenInBytes = WebRtcG7221C_Encode32(_encoderInst32Ptr,
+ leftChannel, 640, &outB[0]);
+ if (_noChannels == 2)
+ {
+ lenInBytes += WebRtcG7221C_Encode32(_encoderInst32PtrR,
+ rightChannel, 640, &outB[lenInBytes/2]);
+ }
+ break;
+ }
+ case 48000:
+ {
+ lenInBytes = WebRtcG7221C_Encode48(_encoderInst48Ptr,
+ leftChannel, 640, &outB[0]);
+ if (_noChannels == 2)
+ {
+ lenInBytes += WebRtcG7221C_Encode48(_encoderInst48PtrR,
+ rightChannel, 640, &outB[lenInBytes/2]);
+ }
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalEncode: Wrong rate for G722_1c.");
+ return -1;
+ break;
+ }
+ }
+
+ memcpy(bitStream, outB, lenInBytes);
+ *bitStreamLenByte = lenInBytes;
+
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += 640*_noChannels;
+
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+WebRtc_Word16
+ACMG722_1C::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ WebRtc_Word16 ret;
+
+ switch(_operationalRate)
+ {
+ case 24000:
+ {
+ ret = WebRtcG7221C_EncoderInit24(_encoderInst24PtrR);
+ if (ret < 0) {
+ return ret;
+ }
+ return WebRtcG7221C_EncoderInit24(_encoderInst24Ptr);
+ break;
+ }
+ case 32000:
+ {
+ ret = WebRtcG7221C_EncoderInit32(_encoderInst32PtrR);
+ if (ret < 0) {
+ return ret;
+ }
+ return WebRtcG7221C_EncoderInit32(_encoderInst32Ptr);
+ break;
+ }
+ case 48000:
+ {
+ ret = WebRtcG7221C_EncoderInit48(_encoderInst48PtrR);
+ if (ret < 0) {
+ return ret;
+ }
+ return WebRtcG7221C_EncoderInit48(_encoderInst48Ptr);
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitEncode: Wrong rate for G722_1c.");
+ return -1;
+ break;
+ }
+ }
+}
+
+
+WebRtc_Word16
+ACMG722_1C::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ switch(_operationalRate)
+ {
+ case 24000:
+ {
+ return WebRtcG7221C_DecoderInit24(_decoderInst24Ptr);
+ break;
+ }
+ case 32000:
+ {
+ return WebRtcG7221C_DecoderInit32(_decoderInst32Ptr);
+ break;
+ }
+ case 48000:
+ {
+ return WebRtcG7221C_DecoderInit48(_decoderInst48Ptr);
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitDecoder: Wrong rate for G722_1c.");
+ return -1;
+ break;
+ }
+ }
+}
+
+
+WebRtc_Word32
+ACMG722_1C::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+
+ if (!_decoderInitialized)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CodeDef: decoder not initialized for G722_1c");
+ return -1;
+ }
+ // NetEq has an array of pointers to WebRtcNetEQ_CodecDef.
+ // get an entry of that array (neteq wrapper will allocate memory)
+ // by calling "netEq->CodecDef", where "NETEQ_CODEC_G722_1_XX" would
+ // be the index of the entry.
+ // Fill up the given structure by calling
+ // "SET_CODEC_PAR" & "SET_G722_1_XX_FUNCTION."
+ // Then return the structure back to NetEQ to add the codec to it's
+ // database.
+ switch(_operationalRate)
+ {
+ case 24000:
+ {
+ SET_CODEC_PAR((codecDef), kDecoderG722_1C_24, codecInst.pltype,
+ _decoderInst24Ptr, 32000);
+ SET_G722_1C_24_FUNCTIONS((codecDef));
+ break;
+ }
+ case 32000:
+ {
+ SET_CODEC_PAR((codecDef), kDecoderG722_1C_32, codecInst.pltype,
+ _decoderInst32Ptr, 32000);
+ SET_G722_1C_32_FUNCTIONS((codecDef));
+ break;
+ }
+ case 48000:
+ {
+ SET_CODEC_PAR((codecDef), kDecoderG722_1C_32, codecInst.pltype,
+ _decoderInst48Ptr, 32000);
+ SET_G722_1C_48_FUNCTIONS((codecDef));
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CodeDef: Wrong rate for G722_1c.");
+ return -1;
+ break;
+ }
+ }
+ return 0;
+}
+
+ACMGenericCodec*
+ACMG722_1C::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::InternalCreateEncoder()
+{
+ if((_encoderInstPtr == NULL) || (_encoderInstPtrRight == NULL))
+ {
+ return -1;
+ }
+ switch(_operationalRate)
+ {
+ case 24000:
+ {
+ WebRtcG7221C_CreateEnc24(&_encoderInst24Ptr);
+ WebRtcG7221C_CreateEnc24(&_encoderInst24PtrR);
+ break;
+ }
+ case 32000:
+ {
+ WebRtcG7221C_CreateEnc32(&_encoderInst32Ptr);
+ WebRtcG7221C_CreateEnc32(&_encoderInst32PtrR);
+ break;
+ }
+ case 48000:
+ {
+ WebRtcG7221C_CreateEnc48(&_encoderInst48Ptr);
+ WebRtcG7221C_CreateEnc48(&_encoderInst48PtrR);
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateEncoder: Wrong rate for G722_1c.");
+ return -1;
+ break;
+ }
+ }
+ return 0;
+}
+
+
+void
+ACMG722_1C::DestructEncoderSafe()
+{
+ _encoderExist = false;
+ _encoderInitialized = false;
+ if(_encoderInstPtr != NULL)
+ {
+ delete _encoderInstPtr;
+ _encoderInstPtr = NULL;
+ }
+ if(_encoderInstPtrRight != NULL)
+ {
+ delete _encoderInstPtrRight;
+ _encoderInstPtrRight = NULL;
+ }
+ _encoderInst24Ptr = NULL;
+ _encoderInst32Ptr = NULL;
+ _encoderInst48Ptr = NULL;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::InternalCreateDecoder()
+{
+ if(_decoderInstPtr == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateEncoder: cannot create decoder");
+ return -1;
+ }
+ switch(_operationalRate)
+ {
+ case 24000:
+ {
+ WebRtcG7221C_CreateDec24(&_decoderInst24Ptr);
+ break;
+ }
+ case 32000:
+ {
+ WebRtcG7221C_CreateDec32(&_decoderInst32Ptr);
+ break;
+ }
+ case 48000:
+ {
+ WebRtcG7221C_CreateDec48(&_decoderInst48Ptr);
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateEncoder: Wrong rate for G722_1c.");
+ return -1;
+ break;
+ }
+ }
+ return 0;
+}
+
+
+void
+ACMG722_1C::DestructDecoderSafe()
+{
+ _decoderExist = false;
+ _decoderInitialized = false;
+ if(_decoderInstPtr != NULL)
+ {
+ delete _decoderInstPtr;
+ _decoderInstPtr = NULL;
+ }
+ _decoderInst24Ptr = NULL;
+ _decoderInst32Ptr = NULL;
+ _decoderInst48Ptr = NULL;
+}
+
+
+void
+ACMG722_1C::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ delete ptrInst;
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMG722_1C::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ switch(_operationalRate)
+ {
+ case 24000:
+ {
+ return netEq->RemoveCodec(kDecoderG722_1C_24);
+ }
+ case 32000:
+ {
+ return netEq->RemoveCodec(kDecoderG722_1C_32);
+ }
+ case 48000:
+ {
+ return netEq->RemoveCodec(kDecoderG722_1C_48);
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Could not remove codec from NetEQ for G722_1c. \
+Sampling frequency doesn't match");
+ return -1;
+ }
+ }
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_g7221c.h b/src/modules/audio_coding/main/source/acm_g7221c.h
new file mode 100644
index 0000000..9fb55cb
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_g7221c.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_G722_1C_H
+#define ACM_G722_1C_H
+
+#include "acm_generic_codec.h"
+
+namespace webrtc
+{
+
+// forward declaration
+struct G722_1C_24_encinst_t_;
+struct G722_1C_24_decinst_t_;
+
+struct G722_1C_32_encinst_t_;
+struct G722_1C_32_decinst_t_;
+
+struct G722_1C_48_encinst_t_;
+struct G722_1C_48_decinst_t_;
+
+struct G722_1_Inst_t_;
+
+
+class ACMG722_1C : public ACMGenericCodec
+{
+public:
+ ACMG722_1C(WebRtc_Word16 codecID);
+ ~ACMG722_1C();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ WebRtc_Word32 _operationalRate;
+
+ G722_1_Inst_t_* _encoderInstPtr;
+ G722_1_Inst_t_* _encoderInstPtrRight; //Used in stereo mode
+ G722_1_Inst_t_* _decoderInstPtr;
+
+ // Only one set of these pointer is valid at any instance
+ G722_1C_24_encinst_t_* _encoderInst24Ptr;
+ G722_1C_24_encinst_t_* _encoderInst24PtrR;
+ G722_1C_32_encinst_t_* _encoderInst32Ptr;
+ G722_1C_32_encinst_t_* _encoderInst32PtrR;
+ G722_1C_48_encinst_t_* _encoderInst48Ptr;
+ G722_1C_48_encinst_t_* _encoderInst48PtrR;
+
+ // Only one of these pointer is valid at any instance
+ G722_1C_24_decinst_t_* _decoderInst24Ptr;
+ G722_1C_32_decinst_t_* _decoderInst32Ptr;
+ G722_1C_48_decinst_t_* _decoderInst48Ptr;
+};
+
+} // namespace webrtc;
+
+#endif // ACM_G722_1C_H
diff --git a/src/modules/audio_coding/main/source/acm_g729.cc b/src/modules/audio_coding/main/source/acm_g729.cc
new file mode 100644
index 0000000..6bfd968
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_g729.cc
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_common_defs.h"
+#include "acm_g729.h"
+#include "acm_neteq.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+#ifdef WEBRTC_CODEC_G729
+ // NOTE! G.729 is not included in the open-source package. Modify this file or your codec
+ // API to match the function call and name of used G.729 API file.
+ // #include "g729_interface.h"
+#endif
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_G729
+
+ACMG729::ACMG729(
+ WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+
+ACMG729::~ACMG729()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG729::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG729::EnableDTX()
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG729::DisableDTX()
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMG729::ReplaceInternalDTXSafe(
+ const bool /*replaceInternalDTX*/)
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMG729::IsInternalDTXReplacedSafe(
+ bool* /* internalDTXReplaced */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG729::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG729::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG729::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word32
+ACMG729::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+
+ACMGenericCodec*
+ACMG729::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMG729::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+void
+ACMG729::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG729::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMG729::DestructDecoderSafe()
+{
+ return;
+}
+
+
+void
+ACMG729::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG729::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+
+#else //===================== Actual Implementation =======================
+
+// Remove when integrating a real GSM FR wrapper
+extern WebRtc_Word16 WebRtcG729_CreateEnc(G729_encinst_t_** inst);
+extern WebRtc_Word16 WebRtcG729_CreateDec(G729_decinst_t_** inst);
+extern WebRtc_Word16 WebRtcG729_FreeEnc(G729_encinst_t_* inst);
+extern WebRtc_Word16 WebRtcG729_FreeDec(G729_decinst_t_* inst);
+extern WebRtc_Word16 WebRtcG729_Encode(G729_encinst_t_* encInst, WebRtc_Word16* input,
+ WebRtc_Word16 len, WebRtc_Word16* output);
+extern WebRtc_Word16 WebRtcG729_EncoderInit(G729_encinst_t_* encInst, WebRtc_Word16 mode);
+extern WebRtc_Word16 WebRtcG729_Decode(G729_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcG729_DecodeBwe(G729_decinst_t_* decInst, WebRtc_Word16* input);
+extern WebRtc_Word16 WebRtcG729_DecodePlc(G729_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcG729_DecoderInit(G729_decinst_t_* decInst);
+
+
+ACMG729::ACMG729(
+ WebRtc_Word16 codecID):
+_encoderInstPtr(NULL),
+_decoderInstPtr(NULL)
+{
+ _codecID = codecID;
+ _hasInternalDTX = true;
+ return;
+}
+
+
+ACMG729::~ACMG729()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ // Delete encoder memory
+ WebRtcG729_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if(_decoderInstPtr != NULL)
+ {
+ // Delete decoder memory
+ WebRtcG729_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMG729::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ // Initialize before entering the loop
+ WebRtc_Word16 noEncodedSamples = 0;
+ WebRtc_Word16 tmpLenByte = 0;
+ WebRtc_Word16 vadDecision = 0;
+ *bitStreamLenByte = 0;
+ while(noEncodedSamples < _frameLenSmpl)
+ {
+ // Call G.729 encoder with pointer to encoder memory, input
+ // audio, number of samples and bitsream
+ tmpLenByte = WebRtcG729_Encode(_encoderInstPtr,
+ &_inAudio[_inAudioIxRead], 80,
+ (WebRtc_Word16*)(&(bitStream[*bitStreamLenByte])));
+
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += 80;
+
+ // sanity check
+ if(tmpLenByte < 0)
+ {
+ // error has happened
+ *bitStreamLenByte = 0;
+ return -1;
+ }
+
+ // increment number of written bytes
+ *bitStreamLenByte += tmpLenByte;
+ switch(tmpLenByte)
+ {
+ case 0:
+ {
+ if(0 == noEncodedSamples)
+ {
+ // this is the first 10 ms in this packet and there is
+ // no data generated, perhaps DTX is enabled and the
+ // codec is not generating any bit-stream for this 10 ms.
+ // we do not continue encoding this frame.
+ return 0;
+ }
+ break;
+ }
+ case 2:
+ {
+ // check if G.729 internal DTX is enabled
+ if(_hasInternalDTX && _dtxEnabled)
+ {
+ vadDecision = 0;
+ for(WebRtc_Word16 n = 0; n < MAX_FRAME_SIZE_10MSEC; n++)
+ {
+ _vadLabel[n] = vadDecision;
+ }
+ }
+ // we got a SID and have to send out this packet no matter
+ // how much audio we have encoded
+ return *bitStreamLenByte;
+ }
+ case 10:
+ {
+ vadDecision = 1;
+ // this is a valid length just continue encoding
+ break;
+ }
+ default:
+ {
+ return -1;
+ }
+ }
+
+ // update number of encoded samples
+ noEncodedSamples += 80;
+ }
+
+ // update VAD decision vector
+ if(_hasInternalDTX && !vadDecision && _dtxEnabled)
+ {
+ for(WebRtc_Word16 n = 0; n < MAX_FRAME_SIZE_10MSEC; n++)
+ {
+ _vadLabel[n] = vadDecision;
+ }
+ }
+
+ // done encoding, return number of encoded bytes
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMG729::EnableDTX()
+{
+ if(_dtxEnabled)
+ {
+ // DTX already enabled, do nothing
+ return 0;
+ }
+ else if(_encoderExist)
+ {
+ // Re-init the G.729 encoder to turn on DTX
+ if(WebRtcG729_EncoderInit(_encoderInstPtr, 1) < 0)
+ {
+ return -1;
+ }
+ _dtxEnabled = true;
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+}
+
+
+WebRtc_Word16
+ACMG729::DisableDTX()
+{
+ if(!_dtxEnabled)
+ {
+ // DTX already dissabled, do nothing
+ return 0;
+ }
+ else if(_encoderExist)
+ {
+ // Re-init the G.729 decoder to turn off DTX
+ if(WebRtcG729_EncoderInit(_encoderInstPtr, 0) < 0)
+ {
+ return -1;
+ }
+ _dtxEnabled = false;
+ return 0;
+ }
+ else
+ {
+ // encoder doesn't exists, therefore disabling is harmless
+ return 0;
+ }
+}
+
+
+WebRtc_Word32
+ACMG729::ReplaceInternalDTXSafe(
+ const bool replaceInternalDTX)
+{
+ // This function is used to dissable the G.729 built in DTX and use an
+ // external instead.
+
+ if(replaceInternalDTX == _hasInternalDTX)
+ {
+ // Make sure we keep the DTX/VAD setting if possible
+ bool oldEnableDTX = _dtxEnabled;
+ bool oldEnableVAD = _vadEnabled;
+ ACMVADMode oldMode = _vadMode;
+ if (replaceInternalDTX)
+ {
+ // Disable internal DTX before enabling external DTX
+ DisableDTX();
+ }
+ else
+ {
+ // Disable external DTX before enabling internal
+ ACMGenericCodec::DisableDTX();
+ }
+ _hasInternalDTX = !replaceInternalDTX;
+ WebRtc_Word16 status = SetVADSafe(oldEnableDTX, oldEnableVAD, oldMode);
+ // Check if VAD status has changed from inactive to active, or if error was
+ // reported
+ if (status == 1) {
+ _vadEnabled = true;
+ return status;
+ } else if (status < 0) {
+ _hasInternalDTX = replaceInternalDTX;
+ return -1;
+ }
+ }
+ return 0;
+}
+
+
+WebRtc_Word32
+ACMG729::IsInternalDTXReplacedSafe(
+ bool* internalDTXReplaced)
+{
+ // Get status of wether DTX is replaced or not
+ *internalDTXReplaced = !_hasInternalDTX;
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMG729::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ // This function is not used. G.729 decoder is called from inside NetEQ
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMG729::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ // Init G.729 encoder
+ return WebRtcG729_EncoderInit(_encoderInstPtr,
+ ((codecParams->enableDTX)? 1:0));
+}
+
+
+WebRtc_Word16
+ACMG729::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ // Init G.729 decoder
+ return WebRtcG729_DecoderInit(_decoderInstPtr);
+}
+
+
+WebRtc_Word32
+ACMG729::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ // Todo:
+ // log error
+ return -1;
+ }
+
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_G729_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderG729, codecInst.pltype,
+ _decoderInstPtr, 8000);
+ SET_G729_FUNCTIONS((codecDef));
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMG729::CreateInstance(void)
+{
+ // Function not used
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMG729::InternalCreateEncoder()
+{
+ // Create encoder memory
+ return WebRtcG729_CreateEnc(&_encoderInstPtr);
+}
+
+
+void
+ACMG729::DestructEncoderSafe()
+{
+ // Free encoder memory
+ _encoderExist = false;
+ _encoderInitialized = false;
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcG729_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+}
+
+
+WebRtc_Word16
+ACMG729::InternalCreateDecoder()
+{
+ // Create decoder memory
+ return WebRtcG729_CreateDec(&_decoderInstPtr);
+}
+
+
+void
+ACMG729::DestructDecoderSafe()
+{
+ // Free decoder memory
+ _decoderExist = false;
+ _decoderInitialized = false;
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcG729_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+}
+
+
+void
+ACMG729::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ WebRtcG729_FreeEnc((G729_encinst_t_*)ptrInst);
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMG729::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ // Remove codec from the NetEQ database
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ return netEq->RemoveCodec(kDecoderG729);
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_g729.h b/src/modules/audio_coding/main/source/acm_g729.h
new file mode 100644
index 0000000..3c00f63
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_g729.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_G729_H
+#define ACM_G729_H
+
+#include "acm_generic_codec.h"
+
+// forward declaration
+struct G729_encinst_t_;
+struct G729_decinst_t_;
+
+namespace webrtc
+{
+
+class ACMG729 : public ACMGenericCodec
+{
+public:
+ ACMG729(WebRtc_Word16 codecID);
+ ~ACMG729();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 EnableDTX();
+
+ WebRtc_Word16 DisableDTX();
+
+ WebRtc_Word32 ReplaceInternalDTXSafe(
+ const bool replaceInternalDTX);
+
+ WebRtc_Word32 IsInternalDTXReplacedSafe(
+ bool* internalDTXReplaced);
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ G729_encinst_t_* _encoderInstPtr;
+ G729_decinst_t_* _decoderInstPtr;
+
+};
+
+} // namespace webrtc
+
+#endif //ACM_G729_H
+
diff --git a/src/modules/audio_coding/main/source/acm_g7291.cc b/src/modules/audio_coding/main/source/acm_g7291.cc
new file mode 100644
index 0000000..3c3d519
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_g7291.cc
@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_common_defs.h"
+#include "acm_g7291.h"
+#include "acm_neteq.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+#ifdef WEBRTC_CODEC_G729_1
+ // NOTE! G.729.1 is not included in the open-source package. Modify this file or your codec
+ // API to match the function call and name of used G.729.1 API file.
+ // #include "g7291_interface.h"
+#endif
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_G729_1
+
+ACMG729_1::ACMG729_1(
+ WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+
+ACMG729_1::~ACMG729_1()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG729_1::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG729_1::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG729_1::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMG729_1::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word32
+ACMG729_1::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+
+ACMGenericCodec*
+ACMG729_1::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMG729_1::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+void
+ACMG729_1::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG729_1::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMG729_1::DestructDecoderSafe()
+{
+ return;
+}
+
+
+void
+ACMG729_1::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMG729_1::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMG729_1::SetBitRateSafe(
+ const WebRtc_Word32 /*rate*/ )
+{
+ return -1;
+}
+
+#else //===================== Actual Implementation =======================
+
+// Remove when integrating a real GSM AMR wrapper
+
+struct G729_1_inst_t_;
+extern WebRtc_Word16 WebRtcG7291_Create(G729_1_inst_t_** inst);
+extern WebRtc_Word16 WebRtcG7291_Free(G729_1_inst_t_* inst);
+extern WebRtc_Word16 WebRtcG7291_Encode(G729_1_inst_t_* encInst,
+ WebRtc_Word16* input,
+ WebRtc_Word16* output,
+ WebRtc_Word16 myRate,
+ WebRtc_Word16 nrFrames);
+extern WebRtc_Word16 WebRtcG7291_EncoderInit(G729_1_inst_t_* encInst,
+ WebRtc_Word16 myRate,
+ WebRtc_Word16 flag8kHz,
+ WebRtc_Word16 flagG729mode);
+extern WebRtc_Word16 WebRtcG7291_Decode(G729_1_inst_t_* decInst);
+extern WebRtc_Word16 WebRtcG7291_DecodeBwe(G729_1_inst_t_* decInst, WebRtc_Word16* input);
+extern WebRtc_Word16 WebRtcG7291_DecodePlc(G729_1_inst_t_* decInst);
+extern WebRtc_Word16 WebRtcG7291_DecoderInit(G729_1_inst_t_* decInst);
+
+ACMG729_1::ACMG729_1(
+ WebRtc_Word16 codecID):
+_encoderInstPtr(NULL),
+_decoderInstPtr(NULL)
+{
+ _codecID = codecID;
+ // Our current G729.1 does not support Annex C
+ // which is DTX.
+ _hasInternalDTX = false;
+
+ // Default rate
+ _myRate = 32000;
+
+ _flag8kHz = 0;
+ _flagG729mode = 0;
+ return;
+}
+
+ACMG729_1::~ACMG729_1()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcG7291_Free(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcG7291_Free(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMG729_1::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+
+ // Initialize before entering the loop
+ WebRtc_Word16 noEncodedSamples = 0;
+ WebRtc_Word16 tmpLenByte = 0;
+ *bitStreamLenByte = 0;
+
+ WebRtc_Word16 byteLengthFrame = 0;
+
+ // Derive number of 20ms frames per encoded packet.
+ // [1,2,3] <=> [20,40,60]ms <=> [320,640,960] samples
+ WebRtc_Word16 n20msFrames = (_frameLenSmpl / 320);
+ // Byte length for the frame. +1 is for rate information.
+ byteLengthFrame = _myRate/(8*50) * n20msFrames + (1 - _flagG729mode);
+
+ // The following might be revised if we have G729.1 Annex C (support for DTX);
+ do
+ {
+ *bitStreamLenByte = WebRtcG7291_Encode(_encoderInstPtr, &_inAudio[_inAudioIxRead],
+ (WebRtc_Word16*)bitStream, _myRate, n20msFrames);
+
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += 160;
+
+ // sanity check
+ if(*bitStreamLenByte < 0)
+ {
+ // error has happened
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalEncode: Encode error for G729_1");
+ *bitStreamLenByte = 0;
+ return -1;
+ }
+
+ noEncodedSamples += 160;
+ } while(*bitStreamLenByte == 0);
+
+
+ // This criteria will change if we have Annex C.
+ if(*bitStreamLenByte != byteLengthFrame)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalEncode: Encode error for G729_1");
+ *bitStreamLenByte = 0;
+ return -1;
+ }
+
+
+ if(noEncodedSamples != _frameLenSmpl)
+ {
+ *bitStreamLenByte = 0;
+ return -1;
+ }
+
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMG729_1::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMG729_1::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ //set the bit rate and initialize
+ _myRate = codecParams->codecInstant.rate;
+ return SetBitRateSafe( (WebRtc_UWord32)_myRate);
+}
+
+
+WebRtc_Word16
+ACMG729_1::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ if (WebRtcG7291_DecoderInit(_decoderInstPtr) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitDecoder: init decoder failed for G729_1");
+ return -1;
+ }
+ return 0;
+}
+
+
+WebRtc_Word32
+ACMG729_1::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CodeDef: Decoder uninitialized for G729_1");
+ return -1;
+ }
+
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_G729_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderG729_1, codecInst.pltype,
+ _decoderInstPtr, 16000);
+ SET_G729_1_FUNCTIONS((codecDef));
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMG729_1::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMG729_1::InternalCreateEncoder()
+{
+ if (WebRtcG7291_Create(&_encoderInstPtr) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateEncoder: create encoder failed for G729_1");
+ return -1;
+ }
+ return 0;
+}
+
+
+void
+ACMG729_1::DestructEncoderSafe()
+{
+ _encoderExist = false;
+ _encoderInitialized = false;
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcG7291_Free(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+}
+
+
+WebRtc_Word16
+ACMG729_1::InternalCreateDecoder()
+{
+ if (WebRtcG7291_Create(&_decoderInstPtr) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateDecoder: create decoder failed for G729_1");
+ return -1;
+ }
+ return 0;
+}
+
+
+void
+ACMG729_1::DestructDecoderSafe()
+{
+ _decoderExist = false;
+ _decoderInitialized = false;
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcG7291_Free(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+}
+
+
+void
+ACMG729_1::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ //WebRtcG7291_Free((G729_1_inst_t*)ptrInst);
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMG729_1::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec: given payload-type does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ return netEq->RemoveCodec(kDecoderG729_1);
+}
+
+WebRtc_Word16
+ACMG729_1::SetBitRateSafe(
+ const WebRtc_Word32 rate)
+{
+ //allowed rates: { 8000, 12000, 14000, 16000, 18000, 20000,
+ // 22000, 24000, 26000, 28000, 30000, 32000};
+ switch(rate)
+ {
+ case 8000:
+ {
+ _myRate = 8000;
+ break;
+ }
+ case 12000:
+ {
+ _myRate = 12000;
+ break;
+ }
+ case 14000:
+ {
+ _myRate = 14000;
+ break;
+ }
+ case 16000:
+ {
+ _myRate = 16000;
+ break;
+ }
+ case 18000:
+ {
+ _myRate = 18000;
+ break;
+ }
+ case 20000:
+ {
+ _myRate = 20000;
+ break;
+ }
+ case 22000:
+ {
+ _myRate = 22000;
+ break;
+ }
+ case 24000:
+ {
+ _myRate = 24000;
+ break;
+ }
+ case 26000:
+ {
+ _myRate = 26000;
+ break;
+ }
+ case 28000:
+ {
+ _myRate = 28000;
+ break;
+ }
+ case 30000:
+ {
+ _myRate = 30000;
+ break;
+ }
+ case 32000:
+ {
+ _myRate = 32000;
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "SetBitRateSafe: Invalid rate G729_1");
+ return -1;
+ break;
+ }
+ }
+
+ // Re-init with new rate
+ if (WebRtcG7291_EncoderInit(_encoderInstPtr, _myRate, _flag8kHz, _flagG729mode) >= 0)
+ {
+ _encoderParams.codecInstant.rate = _myRate;
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+}
+
+
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_g7291.h b/src/modules/audio_coding/main/source/acm_g7291.h
new file mode 100644
index 0000000..0454b01
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_g7291.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_G729_1_H
+#define ACM_G729_1_H
+
+#include "acm_generic_codec.h"
+
+namespace webrtc
+{
+
+// forward declaration
+struct G729_1_inst_t_;
+struct G729_1_inst_t_;
+
+class ACMG729_1: public ACMGenericCodec
+{
+public:
+ ACMG729_1(WebRtc_Word16 codecID);
+ ~ACMG729_1();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ //WebRtc_Word16 EnableDTX();
+ //
+ //WebRtc_Word16 DisableDTX();
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ WebRtc_Word16 SetBitRateSafe(
+ const WebRtc_Word32 rate);
+
+ G729_1_inst_t_* _encoderInstPtr;
+ G729_1_inst_t_* _decoderInstPtr;
+
+ WebRtc_UWord16 _myRate;
+ WebRtc_Word16 _flag8kHz;
+ WebRtc_Word16 _flagG729mode;
+
+};
+
+} // namespace webrtc
+
+#endif // ACM_G729_1_H
+
diff --git a/src/modules/audio_coding/main/source/acm_generic_codec.cc b/src/modules/audio_coding/main/source/acm_generic_codec.cc
new file mode 100644
index 0000000..ce0c93c
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_generic_codec.cc
@@ -0,0 +1,1551 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <string.h>
+
+#include "acm_codec_database.h"
+#include "acm_common_defs.h"
+#include "acm_generic_codec.h"
+#include "acm_neteq.h"
+#include "trace.h"
+#include "webrtc_vad.h"
+#include "webrtc_cng.h"
+
+namespace webrtc
+{
+
+// Enum for CNG
+enum
+{
+ kMaxPLCParamsCNG = WEBRTC_CNG_MAX_LPC_ORDER,
+ kNewCNGNumPLCParams = 8
+};
+
+#define ACM_SID_INTERVAL_MSEC 100
+
+// We set some of the variables to invalid values as a check point
+// if a proper initialization has happened. Another approach is
+// to initialize to a default codec that we are sure is always included.
+ACMGenericCodec::ACMGenericCodec():
+_inAudioIxWrite(0),
+_inAudioIxRead(0),
+_inTimestampIxWrite(0),
+_inAudio(NULL),
+_inTimestamp(NULL),
+_frameLenSmpl(-1), // invalid value
+_noChannels(1),
+_codecID(-1), // invalid value
+_noMissedSamples(0),
+_encoderExist(false),
+_decoderExist(false),
+_encoderInitialized(false),
+_decoderInitialized(false),
+_registeredInNetEq(false),
+_hasInternalDTX(false),
+_ptrVADInst(NULL),
+_vadEnabled(false),
+_vadMode(VADNormal),
+_dtxEnabled(false),
+_ptrDTXInst(NULL),
+_numLPCParams(kNewCNGNumPLCParams),
+_sentCNPrevious(false),
+_isMaster(true),
+_netEqDecodeLock(NULL),
+_codecWrapperLock(*RWLockWrapper::CreateRWLock()),
+_lastEncodedTimestamp(0),
+_lastTimestamp(0),
+_isAudioBuffFresh(true),
+_uniqueID(0)
+{
+ _lastTimestamp = 0xD87F3F9F;
+ //NullifyCodecInstance();
+}
+ACMGenericCodec::~ACMGenericCodec()
+{
+ // Check all the members which are pointers and
+ // if they are not NULL delete/free them.
+
+ if(_ptrVADInst != NULL)
+ {
+ WebRtcVad_Free(_ptrVADInst);
+ _ptrVADInst = NULL;
+ }
+
+ if (_inAudio != NULL)
+ {
+ delete [] _inAudio;
+ _inAudio = NULL;
+ }
+
+ if (_inTimestamp != NULL)
+ {
+ delete [] _inTimestamp;
+ _inTimestamp = NULL;
+ }
+ if(_ptrDTXInst != NULL)
+ {
+ WebRtcCng_FreeEnc(_ptrDTXInst);
+ _ptrDTXInst = NULL;
+ }
+ delete &_codecWrapperLock;
+}
+
+WebRtc_Word32
+ACMGenericCodec::Add10MsData(
+ const WebRtc_UWord32 timestamp,
+ const WebRtc_Word16* data,
+ const WebRtc_UWord16 lengthSmpl,
+ const WebRtc_UWord8 audioChannel)
+{
+ WriteLockScoped wl(_codecWrapperLock);
+ return Add10MsDataSafe(timestamp, data, lengthSmpl, audioChannel);
+}
+
+WebRtc_Word32
+ACMGenericCodec::Add10MsDataSafe(
+ const WebRtc_UWord32 timestamp,
+ const WebRtc_Word16* data,
+ const WebRtc_UWord16 lengthSmpl,
+ const WebRtc_UWord8 audioChannel)
+{
+ // The codec expects to get data in correct sampling rate.
+ // get the sampling frequency of the codec
+ WebRtc_UWord16 plFreqHz;
+
+ if(EncoderSampFreq(plFreqHz) < 0)
+ {
+ // _codecID is not correct, perhaps the codec is not initialized yet.
+ return -1;
+ }
+
+ // Sanity check, if the length of the input corresponds to 10 ms.
+ if((plFreqHz / 100) != lengthSmpl)
+ {
+ // This is not 10 ms of audio, given the sampling frequency of the
+ // codec
+ return -1;
+ }
+ if(_lastTimestamp == timestamp)
+ {
+ // Same timestamp as the last time, overwrite.
+ if((_inAudioIxWrite >= lengthSmpl) && (_inTimestampIxWrite > 0))
+ {
+ _inAudioIxWrite -= lengthSmpl;
+ _inTimestampIxWrite--;
+ WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _uniqueID,
+ "Adding 10ms with previous timestamp, \
+overwriting the previous 10ms");
+ }
+ else
+ {
+ WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _uniqueID,
+ "Adding 10ms with previous timestamp, this will sound bad");
+ }
+ }
+
+ _lastTimestamp = timestamp;
+
+ if ((_inAudioIxWrite + lengthSmpl*audioChannel) > AUDIO_BUFFER_SIZE_W16)
+ {
+ // Get the number of samples to be overwritten
+ WebRtc_Word16 missedSamples = _inAudioIxWrite + lengthSmpl*audioChannel -
+ AUDIO_BUFFER_SIZE_W16;
+
+ // Move the data (overwite the old data)
+ memmove(_inAudio, _inAudio + missedSamples,
+ (AUDIO_BUFFER_SIZE_W16 - lengthSmpl*audioChannel)*sizeof(WebRtc_Word16));
+ // Copy the new data
+ memcpy(_inAudio + (AUDIO_BUFFER_SIZE_W16 - lengthSmpl*audioChannel), data,
+ lengthSmpl*audioChannel * sizeof(WebRtc_Word16));
+
+ // Get the number of 10 ms blocks which are overwritten
+ WebRtc_Word16 missed10MsecBlocks =
+ (WebRtc_Word16)((missedSamples/audioChannel * 100) / plFreqHz);
+
+ // Move the timestamps
+ memmove(_inTimestamp, _inTimestamp + missed10MsecBlocks,
+ (_inTimestampIxWrite - missed10MsecBlocks) * sizeof(WebRtc_UWord32));
+ _inTimestampIxWrite -= missed10MsecBlocks;
+ _inTimestamp[_inTimestampIxWrite] = timestamp;
+ _inTimestampIxWrite++;
+
+ // Buffer is full
+ _inAudioIxWrite = AUDIO_BUFFER_SIZE_W16;
+ IncreaseNoMissedSamples(missedSamples);
+ _isAudioBuffFresh = false;
+ return -missedSamples;
+ }
+ memcpy(_inAudio + _inAudioIxWrite, data, lengthSmpl*audioChannel * sizeof(WebRtc_Word16));
+ _inAudioIxWrite += lengthSmpl*audioChannel;
+
+ assert(_inTimestampIxWrite < TIMESTAMP_BUFFER_SIZE_W32);
+ assert(_inTimestampIxWrite >= 0);
+
+ _inTimestamp[_inTimestampIxWrite] = timestamp;
+ _inTimestampIxWrite++;
+ _isAudioBuffFresh = false;
+ return 0;
+}
+
+WebRtc_Word16
+ACMGenericCodec::Encode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_UWord32* timeStamp,
+ WebRtcACMEncodingType* encodingType)
+{
+ WriteLockScoped lockCodec(_codecWrapperLock);
+ ReadLockScoped lockNetEq(*_netEqDecodeLock);
+ return EncodeSafe(bitStream, bitStreamLenByte,
+ timeStamp, encodingType);
+}
+
+
+WebRtc_Word16
+ACMGenericCodec::EncodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_UWord32* timeStamp,
+ WebRtcACMEncodingType* encodingType)
+{
+ // Do we have enough data to encode?
+ // we wait until we have a full frame to encode.
+ if(_inAudioIxWrite < _frameLenSmpl*_noChannels)
+ {
+ // There is not enough audio
+ *timeStamp = 0;
+ *bitStreamLenByte = 0;
+ // Doesn't really matter what this parameter set to
+ *encodingType = kNoEncoding;
+ return 0;
+ }
+
+ // Not all codecs accept the whole frame to be pushed into
+ // encoder at once. "myBasicCodingBlockSmpl" is
+ const WebRtc_Word16 myBasicCodingBlockSmpl =
+ ACMCodecDB::_basicCodingBlockSmpl[_codecID];
+ if((myBasicCodingBlockSmpl < 0) ||
+ (!_encoderInitialized) ||
+ (!_encoderExist))
+ {
+ // This should not happen
+ *timeStamp = 0;
+ *bitStreamLenByte = 0;
+ *encodingType = kNoEncoding;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EncodeSafe: error, basic coding sample block is negative");
+ return -1;
+ }
+
+ // This makes the internal encoder read from the begining of the buffer
+ _inAudioIxRead = 0;
+ *timeStamp = _inTimestamp[0];
+
+ // Process the audio through VAD the function doesn't set _vadLabels.
+ // If VAD is disabled all labels are set to ONE (active)
+ WebRtc_Word16 status = 0;
+ WebRtc_Word16 dtxProcessedSamples = 0;
+
+ status = ProcessFrameVADDTX(bitStream, bitStreamLenByte,
+ &dtxProcessedSamples);
+
+ if(status < 0)
+ {
+ *timeStamp = 0;
+ *bitStreamLenByte = 0;
+ *encodingType = kNoEncoding;
+ }
+ else
+ {
+ if(dtxProcessedSamples > 0)
+ {
+ // Dtx have processed some samples may or may not a bit-stream
+ // is generated we should not do any encoding (normally there
+ // will be not enough data)
+
+ // Setting the following makes that the move of audio data
+ // and timestamps happen correctly
+ _inAudioIxRead = dtxProcessedSamples;
+ // This will let the owner of ACMGenericCodec to know that the
+ // generated bit-stream is DTX to use correct payload type
+ WebRtc_UWord16 sampFreqHz;
+ EncoderSampFreq(sampFreqHz);
+ if (sampFreqHz == 8000) {
+ *encodingType = kPassiveDTXNB;
+ } else if (sampFreqHz == 16000) {
+ *encodingType = kPassiveDTXWB;
+ } else if (sampFreqHz == 32000) {
+ *encodingType = kPassiveDTXSWB;
+ } else {
+ status = -1;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EncodeSafe: Wrong sampling frequency for DTX.");
+ }
+
+ // Transport empty frame if we have an empty bitstream
+ if ((*bitStreamLenByte == 0)
+ && (_sentCNPrevious || ((_inAudioIxWrite - _inAudioIxRead) <= 0))
+ )
+ {
+ // Makes sure we transmit an empty frame
+ *bitStreamLenByte = 1;
+ *encodingType = kNoEncoding;
+ }
+ _sentCNPrevious = true;
+ }
+ else
+ {
+ _sentCNPrevious = false;
+ // This will let the caller of the method to know if the frame is
+ // Active or non-Active The caller of the method knows that the
+ // stream is encoded by codec and can use the info for callbacks,
+ // if any registered.
+ if(myBasicCodingBlockSmpl == 0)
+ {
+ // This codec can handle all allowed frame sizes as basic
+ // coding block
+ status = InternalEncode(bitStream, bitStreamLenByte);
+
+ if(status < 0)
+ {
+ // TODO:
+ // Maybe reseting the encoder to be fresh for the next
+ // frame
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EncodeSafe: error in internalEncode");
+ *bitStreamLenByte = 0;
+ *encodingType = kNoEncoding;
+ }
+ }
+ else
+ {
+ // A basic-coding-block for this codec is defined so we loop
+ // over the audio with the steps of the basic-coding-block.
+ // It is not necessary that in each itteration
+ WebRtc_Word16 tmpBitStreamLenByte;
+
+ // Reset the variables which will be increamented in the loop
+ *bitStreamLenByte = 0;
+ bool done = false;
+ while(!done)
+ {
+ status = InternalEncode(&bitStream[*bitStreamLenByte],
+ &tmpBitStreamLenByte);
+ *bitStreamLenByte += tmpBitStreamLenByte;
+
+ // Guard Against errors and too large payloads
+ if((status < 0) ||
+ (*bitStreamLenByte > MAX_PAYLOAD_SIZE_BYTE))
+ {
+ // Error has happened if we are in the middle of a full
+ // frame we have to exit. Before exiting, whatever bits
+ // are in the buffer are probably corruptred. Anyways
+ // we ignore them.
+ *bitStreamLenByte = 0;
+ *encodingType = kNoEncoding;
+ // We might have come here because of the second
+ // condition.
+ status = -1;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding,
+ _uniqueID, "EncodeSafe: error in InternalEncode");
+ // break from the loop
+ break;
+ }
+
+ done = _inAudioIxRead >= _frameLenSmpl;
+ }
+ }
+ if(status >= 0)
+ {
+ *encodingType = (_vadLabel[0] == 1)?
+ kActiveNormalEncoded:kPassiveNormalEncoded;
+ // Transport empty frame if we have an empty bitsteram
+ if ((*bitStreamLenByte == 0) && ((_inAudioIxWrite - _inAudioIxRead) <= 0))
+ {
+ // Makes sure we transmit an empty frame
+ *bitStreamLenByte = 1;
+ *encodingType = kNoEncoding;
+ }
+ }
+ }
+ }
+
+ // Move the timestampe buffer according to the number of 10 ms blocks
+ // which are read.
+ WebRtc_UWord16 sampFreqHz;
+ EncoderSampFreq(sampFreqHz);
+
+ WebRtc_Word16 num10MsecBlocks =
+ (WebRtc_Word16)((_inAudioIxRead/_noChannels * 100) / sampFreqHz);
+ if(_inTimestampIxWrite > num10MsecBlocks)
+ {
+ memmove(_inTimestamp, _inTimestamp + num10MsecBlocks,
+ (_inTimestampIxWrite - num10MsecBlocks) * sizeof(WebRtc_Word32));
+ }
+ _inTimestampIxWrite -= num10MsecBlocks;
+
+ // We have to move the audio that is not encoded to the beginning
+ // of the buffer and accordingly adjust the read and write indices.
+ if(_inAudioIxRead < _inAudioIxWrite)
+ {
+ memmove(_inAudio, &_inAudio[_inAudioIxRead],
+ (_inAudioIxWrite - _inAudioIxRead)*sizeof(WebRtc_Word16));
+ }
+
+ _inAudioIxWrite -= _inAudioIxRead;
+
+ _inAudioIxRead = 0;
+ _lastEncodedTimestamp = *timeStamp;
+ return (status < 0) ? (-1):(*bitStreamLenByte);
+}
+
+WebRtc_Word16
+ACMGenericCodec::Decode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType)
+{
+ WriteLockScoped wl(_codecWrapperLock);
+ return DecodeSafe(bitStream, bitStreamLenByte, audio,
+ audioSamples, speechType);
+}
+
+bool
+ACMGenericCodec::EncoderInitialized()
+{
+ ReadLockScoped rl(_codecWrapperLock);
+ return _encoderInitialized;
+}
+
+bool
+ACMGenericCodec::DecoderInitialized()
+{
+ ReadLockScoped rl(_codecWrapperLock);
+ return _decoderInitialized;
+}
+
+
+WebRtc_Word32
+ACMGenericCodec::RegisterInNetEq(
+ ACMNetEQ* netEq,
+ const CodecInst& codecInst)
+{
+ WebRtcNetEQ_CodecDef codecDef;
+ WriteLockScoped wl(_codecWrapperLock);
+
+ if(CodecDef(codecDef, codecInst) < 0)
+ {
+ // Failed to register
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "RegisterInNetEq: error, failed to register");
+ _registeredInNetEq = false;
+ return -1;
+ }
+ else
+ {
+ if(netEq->AddCodec(&codecDef, _isMaster) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "RegisterInNetEq: error, failed to add codec");
+ _registeredInNetEq = false;
+ return -1;
+ }
+ // Registered
+ _registeredInNetEq = true;
+ return 0;
+ }
+}
+
+WebRtc_Word16
+ACMGenericCodec::EncoderParams(
+ WebRtcACMCodecParams* encParams)
+{
+ ReadLockScoped rl(_codecWrapperLock);
+ return EncoderParamsSafe(encParams);
+}
+
+WebRtc_Word16
+ACMGenericCodec::EncoderParamsSafe(
+ WebRtcACMCodecParams* encParams)
+{
+ // Codec parameters are valid only if the encoder is initialized
+ if(_encoderInitialized)
+ {
+ WebRtc_Word32 currentRate;
+ memcpy(encParams, &_encoderParams, sizeof(WebRtcACMCodecParams));
+ currentRate = encParams->codecInstant.rate;
+ CurrentRate(currentRate);
+ encParams->codecInstant.rate = currentRate;
+ return 0;
+ }
+ else
+ {
+ encParams->codecInstant.plname[0] = '\0';
+ encParams->codecInstant.pltype = -1;
+ encParams->codecInstant.pacsize = 0;
+ encParams->codecInstant.rate = 0;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EncoderParamsSafe: error, encoder not initialized");
+ return -1;
+ }
+}
+
+bool
+ACMGenericCodec::DecoderParams(
+ WebRtcACMCodecParams* decParams,
+ const WebRtc_UWord8 payloadType)
+{
+ ReadLockScoped rl(_codecWrapperLock);
+ return DecoderParamsSafe(decParams, payloadType);
+}
+
+bool
+ACMGenericCodec::DecoderParamsSafe(
+ WebRtcACMCodecParams* decParams,
+ const WebRtc_UWord8 payloadType)
+{
+ // Decoder parameters are valid only if decoder is initialized
+ if(_decoderInitialized)
+ {
+ if(payloadType == _decoderParams.codecInstant.pltype)
+ {
+ memcpy(decParams, &_decoderParams, sizeof(WebRtcACMCodecParams));
+ return true;
+ }
+ }
+
+ decParams->codecInstant.plname[0] = '\0';
+ decParams->codecInstant.pltype = -1;
+ decParams->codecInstant.pacsize = 0;
+ decParams->codecInstant.rate = 0;
+ return false;
+}
+
+WebRtc_Word16
+ACMGenericCodec::ResetEncoder()
+{
+ WriteLockScoped lockCodec(_codecWrapperLock);
+ ReadLockScoped lockNetEq(*_netEqDecodeLock);
+ return ResetEncoderSafe();
+}
+
+WebRtc_Word16
+ACMGenericCodec::ResetEncoderSafe()
+{
+ if(!_encoderExist || !_encoderInitialized)
+ {
+ // We don't reset if doesn't exists or not initialized yet
+ return 0;
+ }
+
+ _inAudioIxWrite = 0;
+ _inAudioIxRead = 0;
+ _inTimestampIxWrite = 0;
+ _noMissedSamples = 0;
+ _isAudioBuffFresh = true;
+ memset(_inAudio, 0, AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
+ memset(_inTimestamp, 0, TIMESTAMP_BUFFER_SIZE_W32 * sizeof(WebRtc_Word32));
+
+ // Store DTX/VAD params
+ bool enableVAD = _vadEnabled;
+ bool enableDTX = _dtxEnabled;
+ ACMVADMode mode = _vadMode;
+
+ // Reset the encoder
+ if(InternalResetEncoder() < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "ResetEncoderSafe: error in reset encoder");
+ return -1;
+ }
+
+ // Disable DTX & VAD this deletes the states
+ // we like to have fresh start
+ DisableDTX();
+ DisableVAD();
+
+ // Set DTX/VAD
+ return SetVADSafe(enableDTX, enableVAD, mode);
+}
+
+WebRtc_Word16
+ACMGenericCodec::InternalResetEncoder()
+{
+ // For most of the codecs it is sufficient to
+ // call their internal initialization.
+ // There are some exceptions.
+ // ----
+ // For iSAC we don't want to lose BWE history,
+ // so for iSAC we have to over-write this function.
+ // ----
+ return InternalInitEncoder(&_encoderParams);
+}
+
+WebRtc_Word16
+ACMGenericCodec::InitEncoder(
+ WebRtcACMCodecParams* codecParams,
+ bool forceInitialization)
+{
+ WriteLockScoped lockCodec(_codecWrapperLock);
+ ReadLockScoped lockNetEq(*_netEqDecodeLock);
+ return InitEncoderSafe(codecParams, forceInitialization);
+}
+
+WebRtc_Word16
+ACMGenericCodec::InitEncoderSafe(
+ WebRtcACMCodecParams* codecParams,
+ bool forceInitialization)
+{
+ // Check if we got a valid set of parameters
+ WebRtc_Word16 mirrorID;
+ WebRtc_Word16 codecNumber =
+ ACMCodecDB::CodecNumber(&(codecParams->codecInstant), mirrorID);
+
+ if(codecNumber < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitEncoderSafe: error, codec number negative");
+ return -1;
+ }
+ // Check if the parameters are for this codec
+ if((_codecID >= 0) && (_codecID != codecNumber) && (_codecID != mirrorID))
+ {
+ // The current codec is not the same as the one given by codecParams
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitEncoderSafe: current codec is not the same as the one given by codecParams");
+ return -1;
+ }
+
+ if(!CanChangeEncodingParam(codecParams->codecInstant))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitEncoderSafe: cannot change encoding parameters");
+ return -1;
+ }
+
+ if(_encoderInitialized && !forceInitialization)
+ {
+ // The encoder is already initialized
+ return 0;
+ }
+ WebRtc_Word16 status;
+ if(!_encoderExist)
+ {
+ _encoderInitialized = false;
+ status = CreateEncoder();
+ if(status < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitEncoderSafe: cannot create encoder");
+ return -1;
+ }
+ else
+ {
+ _encoderExist = true;
+ }
+ }
+ _frameLenSmpl = (codecParams->codecInstant).pacsize;
+ status = InternalInitEncoder(codecParams);
+ if(status < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitEncoderSafe: error in init encoder");
+ _encoderInitialized = false;
+ return -1;
+ }
+ else
+ {
+ memcpy(&_encoderParams, codecParams, sizeof(WebRtcACMCodecParams));
+ _encoderInitialized = true;
+ if(_inAudio == NULL)
+ {
+ _inAudio = new WebRtc_Word16[AUDIO_BUFFER_SIZE_W16];
+ if(_inAudio == NULL)
+ {
+ return -1;
+ }
+ memset(_inAudio, 0, AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
+ }
+ if(_inTimestamp == NULL)
+ {
+ _inTimestamp = new WebRtc_UWord32[TIMESTAMP_BUFFER_SIZE_W32];
+ if(_inTimestamp == NULL)
+ {
+ return -1;
+ }
+ memset(_inTimestamp, 0, sizeof(WebRtc_UWord32) *
+ TIMESTAMP_BUFFER_SIZE_W32);
+ }
+ _isAudioBuffFresh = true;
+ }
+ status = SetVADSafe(codecParams->enableDTX, codecParams->enableVAD,
+ codecParams->vadMode);
+
+ _noChannels = codecParams->codecInstant.channels;
+
+ return status;
+}
+
+bool
+ACMGenericCodec::CanChangeEncodingParam(
+ CodecInst& /*codecInst*/)
+{
+ return true;
+}
+
+WebRtc_Word16
+ACMGenericCodec::InitDecoder(
+ WebRtcACMCodecParams* codecParams,
+ bool forceInitialization)
+{
+ WriteLockScoped lockCodc(_codecWrapperLock);
+ WriteLockScoped lockNetEq(*_netEqDecodeLock);
+ return InitDecoderSafe(codecParams, forceInitialization);
+}
+
+WebRtc_Word16
+ACMGenericCodec::InitDecoderSafe(
+ WebRtcACMCodecParams* codecParams,
+ bool forceInitialization)
+{
+ WebRtc_Word16 mirrorID;
+ // Check if we got a valid set of parameters
+ WebRtc_Word16 codecNumber =
+ ACMCodecDB::ReceiverCodecNumber(codecParams->codecInstant, mirrorID);
+
+ if(codecNumber < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitDecoderSafe: error, invalid codec number");
+ return -1;
+ }
+ // Check if the parameters are for this codec
+ if((_codecID >= 0) && (_codecID != codecNumber) && (_codecID != mirrorID))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitDecoderSafe: current codec is not the same as the one given "
+ "by codecParams");
+ // The current codec is not the same as the one given by codecParams
+ return -1;
+ }
+
+
+ if(_decoderInitialized && !forceInitialization)
+ {
+ // The encoder is already initialized
+ return 0;
+ }
+
+ WebRtc_Word16 status;
+ if(!_decoderExist)
+ {
+ _decoderInitialized = false;
+ status = CreateDecoder();
+ if(status < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitDecoderSafe: cannot create decoder");
+ return -1;
+ }
+ else
+ {
+ _decoderExist = true;
+ }
+ }
+
+ status = InternalInitDecoder(codecParams);
+ if(status < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InitDecoderSafe: cannot init decoder");
+ _decoderInitialized = false;
+ return -1;
+ }
+ else
+ {
+ // Store the parameters
+ SaveDecoderParamSafe(codecParams);
+ _decoderInitialized = true;
+ }
+ return 0;
+}
+
+WebRtc_Word16
+ACMGenericCodec::ResetDecoder(WebRtc_Word16 payloadType)
+{
+ WriteLockScoped lockCodec(_codecWrapperLock);
+ WriteLockScoped lockNetEq(*_netEqDecodeLock);
+ return ResetDecoderSafe(payloadType);
+}
+
+WebRtc_Word16
+ACMGenericCodec::ResetDecoderSafe(WebRtc_Word16 payloadType)
+{
+ WebRtcACMCodecParams decoderParams;
+ if(!_decoderExist || !_decoderInitialized)
+ {
+ return 0;
+ }
+ // Initialization of the decoder should work for all
+ // the codec. If there is a codec that has to keep
+ // some states then we need to define a virtual and
+ // overwrite in that codec
+ DecoderParamsSafe(&decoderParams, (WebRtc_UWord8) payloadType);
+ return InternalInitDecoder(&decoderParams);
+}
+
+void
+ACMGenericCodec::ResetNoMissedSamples()
+{
+ WriteLockScoped cs(_codecWrapperLock);
+ _noMissedSamples = 0;
+}
+
+void
+ACMGenericCodec::IncreaseNoMissedSamples(
+ const WebRtc_Word16 noSamples)
+{
+ _noMissedSamples += noSamples;
+}
+
+// Get the number of missed samples, this can be public
+WebRtc_UWord32
+ACMGenericCodec::NoMissedSamples() const
+{
+ ReadLockScoped cs(_codecWrapperLock);
+ return _noMissedSamples;
+}
+void
+ACMGenericCodec::DestructEncoder()
+{
+ WriteLockScoped wl(_codecWrapperLock);
+
+ // Disable VAD and delete the instance
+ if(_ptrVADInst != NULL)
+ {
+ WebRtcVad_Free(_ptrVADInst);
+ _ptrVADInst = NULL;
+ }
+ _vadEnabled = false;
+ _vadMode = VADNormal;
+
+ //Disable DTX and delete the instance
+ _dtxEnabled = false;
+ if(_ptrDTXInst != NULL)
+ {
+ WebRtcCng_FreeEnc(_ptrDTXInst);
+ _ptrDTXInst = NULL;
+ }
+ _numLPCParams = kNewCNGNumPLCParams;
+
+ DestructEncoderSafe();
+}
+
+void
+ACMGenericCodec::DestructDecoder()
+{
+ WriteLockScoped wl(_codecWrapperLock);
+ _decoderParams.codecInstant.pltype = -1;
+ DestructDecoderSafe();
+}
+
+WebRtc_Word16
+ACMGenericCodec::SetBitRate(
+ const WebRtc_Word32 bitRateBPS)
+{
+ WriteLockScoped wl(_codecWrapperLock);
+ return SetBitRateSafe(bitRateBPS);
+}
+
+WebRtc_Word16
+ACMGenericCodec::SetBitRateSafe(
+ const WebRtc_Word32 bitRateBPS)
+{
+ // If the codec can change the bit-rate this function
+ // should be overwritten, otherewise the only acceptable
+ // value is the one that is in database.
+ CodecInst codecParams;
+ if(ACMCodecDB::Codec(_codecID, &codecParams) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "SetBitRateSafe: error in ACMCodecDB::Codec");
+ return -1;
+ }
+ if(codecParams.rate != bitRateBPS)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "SetBitRateSafe: rate value is not acceptable");
+ return -1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+WebRtc_Word32
+ACMGenericCodec::GetEstimatedBandwidth()
+{
+ WriteLockScoped wl(_codecWrapperLock);
+ return GetEstimatedBandwidthSafe();
+}
+
+WebRtc_Word32
+ACMGenericCodec::GetEstimatedBandwidthSafe()
+{
+ // All codecs but iSAC will return -1
+ return -1;
+}
+
+WebRtc_Word32
+ACMGenericCodec::SetEstimatedBandwidth(
+ WebRtc_Word32 estimatedBandwidth)
+{
+ WriteLockScoped wl(_codecWrapperLock);
+ return SetEstimatedBandwidthSafe(estimatedBandwidth);
+}
+
+WebRtc_Word32
+ACMGenericCodec::SetEstimatedBandwidthSafe(
+ WebRtc_Word32 /*estimatedBandwidth*/)
+{
+ // All codecs but iSAC will return -1
+ return -1;
+}
+
+WebRtc_Word32
+ACMGenericCodec::GetRedPayload(
+ WebRtc_UWord8* redPayload,
+ WebRtc_Word16* payloadBytes)
+{
+ WriteLockScoped wl(_codecWrapperLock);
+ return GetRedPayloadSafe(redPayload, payloadBytes);
+}
+
+WebRtc_Word32
+ACMGenericCodec::GetRedPayloadSafe(
+ WebRtc_UWord8* /* redPayload */,
+ WebRtc_Word16* /* payloadBytes */)
+{
+ return -1; // Do nothing by default
+}
+
+WebRtc_Word16
+ACMGenericCodec::CreateEncoder()
+{
+ WebRtc_Word16 status = 0;
+ if(!_encoderExist)
+ {
+ status = InternalCreateEncoder();
+ // We just created the codec and obviously it is not initialized
+ _encoderInitialized = false;
+ }
+
+ if(status < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CreateEncoder: error in internal create encoder");
+ _encoderExist = false;
+ }
+ else
+ {
+ _encoderExist = true;
+ }
+ return status;
+}
+
+WebRtc_Word16
+ACMGenericCodec::CreateDecoder()
+{
+ WebRtc_Word16 status = 0;
+ if(!_decoderExist)
+ {
+ status = InternalCreateDecoder();
+ // Decoder just created and obviously it is not initialized
+ _decoderInitialized = false;
+ }
+
+ if(status < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CreateDecoder: error in internal create decoder");
+ _decoderExist = false;
+ }
+ else
+ {
+ _decoderExist = true;
+ }
+ return status;
+}
+
+
+void ACMGenericCodec::DestructEncoderInst(void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ WriteLockScoped lockCodec(_codecWrapperLock);
+ ReadLockScoped lockNetEq(*_netEqDecodeLock);
+ InternalDestructEncoderInst(ptrInst);
+ }
+}
+
+
+WebRtc_Word16
+ACMGenericCodec::AudioBuffer(
+ WebRtcACMAudioBuff& audioBuff)
+{
+ ReadLockScoped cs(_codecWrapperLock);
+ memcpy(audioBuff.inAudio, _inAudio,
+ AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
+ audioBuff.inAudioIxRead = _inAudioIxRead;
+ audioBuff.inAudioIxWrite = _inAudioIxWrite;
+ memcpy(audioBuff.inTimestamp, _inTimestamp,
+ TIMESTAMP_BUFFER_SIZE_W32*sizeof(WebRtc_UWord32));
+ audioBuff.inTimestampIxWrite = _inTimestampIxWrite;
+ audioBuff.lastTimestamp = _lastTimestamp;
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMGenericCodec::SetAudioBuffer(
+ WebRtcACMAudioBuff& audioBuff)
+{
+ WriteLockScoped cs(_codecWrapperLock);
+ memcpy(_inAudio, audioBuff.inAudio,
+ AUDIO_BUFFER_SIZE_W16 * sizeof(WebRtc_Word16));
+ _inAudioIxRead = audioBuff.inAudioIxRead;
+ _inAudioIxWrite = audioBuff.inAudioIxWrite;
+ memcpy(_inTimestamp, audioBuff.inTimestamp,
+ TIMESTAMP_BUFFER_SIZE_W32*sizeof(WebRtc_UWord32));
+ _inTimestampIxWrite = audioBuff.inTimestampIxWrite;
+ _lastTimestamp = audioBuff.lastTimestamp;
+ _isAudioBuffFresh = false;
+ return 0;
+}
+
+
+WebRtc_UWord32
+ACMGenericCodec::LastEncodedTimestamp() const
+{
+ ReadLockScoped cs(_codecWrapperLock);
+ return _lastEncodedTimestamp;
+}
+
+
+WebRtc_UWord32
+ACMGenericCodec::EarliestTimestamp() const
+{
+ ReadLockScoped cs(_codecWrapperLock);
+ return _inTimestamp[0];
+}
+
+
+WebRtc_Word16
+ACMGenericCodec::SetVAD(
+ const bool enableDTX,
+ const bool enableVAD,
+ const ACMVADMode mode)
+{
+ WriteLockScoped cs(_codecWrapperLock);
+ return SetVADSafe(enableDTX, enableVAD, mode);
+}
+
+
+WebRtc_Word16
+ACMGenericCodec::SetVADSafe(
+ const bool enableDTX,
+ const bool enableVAD,
+ const ACMVADMode mode)
+{
+ if(enableDTX)
+ {
+ // Make G729 AnnexB a special case
+ if (!STR_CASE_CMP(_encoderParams.codecInstant.plname, "G729") && !_hasInternalDTX)
+ {
+ if (ACMGenericCodec::EnableDTX() < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "SetVADSafe: error in enable DTX");
+ return -1;
+ }
+ }
+ else
+ {
+ if(EnableDTX() < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "SetVADSafe: error in enable DTX");
+ return -1;
+ }
+ }
+
+ if(_hasInternalDTX)
+ {
+ // Codec has internal DTX, practically we don't need WebRtc VAD,
+ // however, we let the user to turn it on if they need call-backs
+ // on silence. Store VAD mode for future even if VAD is off.
+ _vadMode = mode;
+ return (enableVAD)? EnableVAD(mode):DisableVAD();
+ }
+ else
+ {
+ // Codec does not have internal DTX so enabling DTX requires an
+ // active VAD. 'enableDTX == true' overwrites VAD status.
+ if(EnableVAD(mode) < 0)
+ {
+ // If we cannot create VAD we have to disable DTX
+ if(!_vadEnabled)
+ {
+ DisableDTX();
+ }
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "SetVADSafe: error in enable VAD");
+ return -1;
+ }
+
+ // Return '1', to let the caller know VAD was turned on, even if the
+ // function was called with VAD='false'
+ if (enableVAD == false) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+ }
+ else
+ {
+ // Make G729 AnnexB a special case
+ if (!STR_CASE_CMP(_encoderParams.codecInstant.plname, "G729") && !_hasInternalDTX)
+ {
+ ACMGenericCodec::DisableDTX();
+ }
+ else
+ {
+ DisableDTX();
+ }
+ return (enableVAD)? EnableVAD(mode):DisableVAD();
+ }
+}
+
+WebRtc_Word16
+ACMGenericCodec::EnableDTX()
+{
+ if(_hasInternalDTX)
+ {
+ // We should not be here if we have internal DTX
+ // this function should be overwritten by the derived
+ // class in this case
+ return -1;
+ }
+ if(!_dtxEnabled)
+ {
+ if(WebRtcCng_CreateEnc(&_ptrDTXInst) < 0)
+ {
+ _ptrDTXInst = false;
+ return -1;
+ }
+ WebRtc_UWord16 freqHz;
+ EncoderSampFreq(freqHz);
+ if(WebRtcCng_InitEnc(_ptrDTXInst, (WebRtc_Word16)freqHz,
+ ACM_SID_INTERVAL_MSEC, _numLPCParams) < 0)
+ {
+ // Couldn't initialize, has to return -1, and free the memory
+ WebRtcCng_FreeEnc(_ptrDTXInst);
+ _ptrDTXInst = NULL;
+ return -1;
+ }
+ _dtxEnabled = true;
+ }
+ return 0;
+}
+
+WebRtc_Word16
+ACMGenericCodec::DisableDTX()
+{
+ if(_hasInternalDTX)
+ {
+ // We should not be here if we have internal DTX
+ // this function should be overwritten by the derived
+ // class in this case
+ return -1;
+ }
+ if(_ptrDTXInst != NULL)
+ {
+ WebRtcCng_FreeEnc(_ptrDTXInst);
+ _ptrDTXInst = NULL;
+ }
+ _dtxEnabled = false;
+ return 0;
+}
+
+WebRtc_Word16
+ACMGenericCodec::EnableVAD(
+ ACMVADMode mode)
+{
+ if((mode < VADNormal) || (mode > VADVeryAggr))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EnableVAD: error in VAD mode range");
+ return -1;
+ }
+
+ if(!_vadEnabled)
+ {
+ if(WebRtcVad_Create(&_ptrVADInst) < 0)
+ {
+ _ptrVADInst = NULL;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EnableVAD: error in create VAD");
+ return -1;
+ }
+ if(WebRtcVad_Init(_ptrVADInst) < 0)
+ {
+ WebRtcVad_Free(_ptrVADInst);
+ _ptrVADInst = NULL;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EnableVAD: error in init VAD");
+ return -1;
+ }
+ }
+
+ // Set the vad mode to the given value
+ if(WebRtcVad_set_mode(_ptrVADInst, mode) < 0)
+ {
+ // We failed to set the mode and we have to return -1. If
+ // we already have a working VAD (_vadEnabled == true) then
+ // we leave it to work. otherwise, the following will be
+ // executed.
+ if(!_vadEnabled)
+ {
+ // We just created the instance but cannot set the mode
+ // we have to free the memomry.
+ WebRtcVad_Free(_ptrVADInst);
+ _ptrVADInst = NULL;
+ }
+ WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _uniqueID,
+ "EnableVAD: failed to set the VAD mode");
+ return -1;
+ }
+ _vadMode = mode;
+ _vadEnabled = true;
+ return 0;
+}
+
+WebRtc_Word16
+ACMGenericCodec::DisableVAD()
+{
+ if(_ptrVADInst != NULL)
+ {
+ WebRtcVad_Free(_ptrVADInst);
+ _ptrVADInst = NULL;
+ }
+ _vadEnabled = false;
+ return 0;
+}
+
+WebRtc_Word32
+ACMGenericCodec::ReplaceInternalDTX(
+ const bool replaceInternalDTX)
+{
+ WriteLockScoped cs(_codecWrapperLock);
+ return ReplaceInternalDTXSafe(replaceInternalDTX);
+}
+
+WebRtc_Word32
+ACMGenericCodec::ReplaceInternalDTXSafe(
+ const bool /* replaceInternalDTX */)
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMGenericCodec::IsInternalDTXReplaced(
+ bool* internalDTXReplaced)
+{
+ WriteLockScoped cs(_codecWrapperLock);
+ return IsInternalDTXReplacedSafe(internalDTXReplaced);
+}
+
+WebRtc_Word32
+ACMGenericCodec::IsInternalDTXReplacedSafe(
+ bool* internalDTXReplaced)
+{
+ *internalDTXReplaced = false;
+ return 0;
+}
+
+WebRtc_Word16
+ACMGenericCodec::ProcessFrameVADDTX(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_Word16* samplesProcessed)
+{
+ if(!_vadEnabled)
+ {
+ // VAD not enabled, set all vadLable[] to 1 (speech detected)
+ for(WebRtc_Word16 n = 0; n < MAX_FRAME_SIZE_10MSEC; n++)
+ {
+ _vadLabel[n] = 1;
+ }
+ *samplesProcessed = 0;
+ return 0;
+ }
+ WebRtc_UWord16 freqHz;
+ EncoderSampFreq(freqHz);
+
+ // Calculate number of samples in 10 ms blocks, and number ms in one frame
+ WebRtc_Word16 samplesIn10Msec = (WebRtc_Word16)(freqHz / 100);
+ WebRtc_Word32 frameLenMsec = (((WebRtc_Word32)_frameLenSmpl * 1000) / freqHz);
+ WebRtc_Word16 status;
+ WebRtc_Word16 vadFlag = 0;
+
+ // Vector for storing maximum 30 ms of mono audio at 32 kHz
+ WebRtc_Word16 audio[960];
+
+ // Calculate number of VAD-blocks to process, and number of samples in each block.
+ int noSamplesToProcess[2];
+ if (frameLenMsec == 40)
+ {
+ // 20 ms in each VAD block
+ noSamplesToProcess[0] = noSamplesToProcess[1] = 2*samplesIn10Msec;
+ }
+ else
+ {
+ // For 10-30 ms framesizes, second VAD block will be size zero ms,
+ // for 50 and 60 ms first VAD block will be 30 ms.
+ noSamplesToProcess[0] = (frameLenMsec > 30)? 3*samplesIn10Msec : _frameLenSmpl;
+ noSamplesToProcess[1] = _frameLenSmpl-noSamplesToProcess[0];
+ }
+
+ int offSet = 0;
+ int loops = (noSamplesToProcess[1]>0) ? 2 : 1;
+ for (int i=0; i<loops; i++) {
+ // If stereo, calculate mean of the two channels
+ if(_noChannels == 2) {
+ for (int j=0; j<noSamplesToProcess[i]; j++) {
+ audio[j] = (_inAudio[(offSet+j)*2]+_inAudio[(offSet+j)*2+1])/2;
+ }
+ offSet = noSamplesToProcess[0];
+ } else {
+ // Mono, copy data from _inAudio to continue work on
+ memcpy(audio, _inAudio, sizeof(WebRtc_Word16)*noSamplesToProcess[i]);
+ }
+
+ // Call VAD
+ status = WebRtcVad_Process(_ptrVADInst, (WebRtc_Word16)freqHz,
+ audio, noSamplesToProcess[i]);
+ if (status)
+ {
+ vadFlag = 1;
+ }
+
+ _vadLabel[i] = status;
+
+ if(status < 0)
+ {
+ // This will force that the data be removed from the buffer
+ *samplesProcessed += noSamplesToProcess[i];
+ return -1;
+ }
+
+ // If VAD decision non-active, update DTX. NOTE! We only do this if the first part of
+ // a frame gets the VAD decision "inactive". Otherwise DTX might say it is time to
+ // transmit SID frame, but we will encode the whole frame, because the first part is
+ // active.
+ *samplesProcessed = 0;
+ if((status == 0) && (i==0) && _dtxEnabled && !_hasInternalDTX)
+ {
+ WebRtc_Word16 bitStreamLen;
+ WebRtc_Word16 num10MsecFrames = noSamplesToProcess[i] / samplesIn10Msec;
+ *bitStreamLenByte = 0;
+ for(WebRtc_Word16 n = 0; n < num10MsecFrames; n++)
+ {
+ // This block is (passive) && (vad enabled)
+ status = WebRtcCng_Encode(_ptrDTXInst, &audio[n*samplesIn10Msec],
+ samplesIn10Msec, bitStream, &bitStreamLen, 0);
+ if (status < 0) {
+ return -1;
+ }
+
+ *samplesProcessed += samplesIn10Msec*_noChannels;
+
+ // bitStreamLen will only be > 0 once per 100 ms
+ *bitStreamLenByte += bitStreamLen;
+ }
+
+
+ // Check if all samples got processed by the DTX
+ if(*samplesProcessed != noSamplesToProcess[i]*_noChannels) {
+ // Set to zero since something went wrong. Shouldn't happen.
+ *samplesProcessed = 0;
+ }
+ }
+
+ if(*samplesProcessed > 0)
+ {
+ // The block contains inactive speech, and is processed by DTX.
+ // Discontinue running VAD.
+ break;
+ }
+ }
+
+ return status;
+}
+
+WebRtc_Word16
+ACMGenericCodec::SamplesLeftToEncode()
+{
+ ReadLockScoped rl(_codecWrapperLock);
+ return (_frameLenSmpl <= _inAudioIxWrite)?
+ 0:(_frameLenSmpl - _inAudioIxWrite);
+}
+
+WebRtc_Word32
+ACMGenericCodec::UnregisterFromNetEq(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ WriteLockScoped wl(_codecWrapperLock);
+ if(!_registeredInNetEq)
+ {
+ return 0;
+ }
+ if(UnregisterFromNetEqSafe(netEq, payloadType) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "UnregisterFromNetEq: error, cannot unregister from NetEq");
+ _registeredInNetEq = true;
+ return -1;
+ }
+ else
+ {
+ _registeredInNetEq = false;
+ return 0;
+ }
+}
+
+void
+ACMGenericCodec::SetUniqueID(
+ const WebRtc_UWord32 id)
+{
+ _uniqueID = id;
+}
+
+bool
+ACMGenericCodec::IsAudioBufferFresh() const
+{
+ ReadLockScoped rl(_codecWrapperLock);
+ return _isAudioBuffFresh;
+}
+
+// This function is replaced by codec specific functions for some codecs
+WebRtc_Word16
+ACMGenericCodec::EncoderSampFreq(WebRtc_UWord16& sampFreqHz)
+{
+ WebRtc_Word32 f;
+ f = ACMCodecDB::CodecFreq(_codecID);
+ if(f < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EncoderSampFreq: codec frequency is negative");
+ return -1;
+ }
+ else
+ {
+ sampFreqHz = (WebRtc_UWord16)f;
+ return 0;
+ }
+}
+
+
+WebRtc_Word32
+ACMGenericCodec::ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 /* initFrameSizeMsec */,
+ const WebRtc_UWord16 /* initRateBitPerSec */,
+ const bool /* enforceFrameSize */)
+{
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
+ "The send-codec is not iSAC, failed to config iSAC bandwidth estimator.");
+ return -1;
+}
+
+WebRtc_Word32
+ACMGenericCodec::SetISACMaxRate(
+ const WebRtc_UWord32 /* maxRateBitPerSec */)
+{
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
+ "The send-codec is not iSAC, failed to set iSAC max rate.");
+ return -1;
+}
+
+WebRtc_Word32
+ACMGenericCodec::SetISACMaxPayloadSize(
+ const WebRtc_UWord16 /* maxPayloadLenBytes */)
+{
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
+ "The send-codec is not iSAC, failed to set iSAC max payload-size.");
+ return -1;
+}
+
+
+void
+ACMGenericCodec::SaveDecoderParam(
+ const WebRtcACMCodecParams* codecParams)
+{
+ WriteLockScoped wl(_codecWrapperLock);
+ SaveDecoderParamSafe(codecParams);
+}
+
+
+void
+ACMGenericCodec::SaveDecoderParamSafe(
+ const WebRtcACMCodecParams* codecParams)
+{
+ memcpy(&_decoderParams, codecParams, sizeof(WebRtcACMCodecParams));
+}
+
+WebRtc_Word16
+ACMGenericCodec::UpdateEncoderSampFreq(
+ WebRtc_UWord16 /* encoderSampFreqHz */)
+{
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "It is asked for a change in smapling frequency while the \
+current send-codec supports only one sampling rate.");
+ return -1;
+}
+
+
+void
+ACMGenericCodec::SetIsMaster(
+ bool isMaster)
+{
+ WriteLockScoped wl(_codecWrapperLock);
+ _isMaster = isMaster;
+}
+
+
+
+WebRtc_Word16
+ACMGenericCodec::REDPayloadISAC(
+ const WebRtc_Word32 /* isacRate */,
+ const WebRtc_Word16 /* isacBwEstimate */,
+ WebRtc_UWord8* /* payload */,
+ WebRtc_Word16* /* payloadLenBytes */)
+{
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error: REDPayloadISAC is an iSAC specific function");
+ return -1;
+}
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_generic_codec.h b/src/modules/audio_coding/main/source/acm_generic_codec.h
new file mode 100644
index 0000000..23e16a6
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_generic_codec.h
@@ -0,0 +1,1333 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_GENERIC_CODEC_H
+#define ACM_GENERIC_CODEC_H
+
+#include "acm_common_defs.h"
+#include "audio_coding_module_typedefs.h"
+#include "rw_lock_wrapper.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+
+#define MAX_FRAME_SIZE_10MSEC 6
+
+// forward declaration
+struct WebRtcVadInst;
+struct WebRtcCngEncInst;
+
+namespace webrtc
+{
+
+// forward declaration
+struct CodecInst;
+class ACMNetEQ;
+
+class ACMGenericCodec
+{
+public:
+ ///////////////////////////////////////////////////////////////////////////
+ // Constructor of the class
+ //
+ ACMGenericCodec();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Destructor of the class.
+ //
+ virtual ~ACMGenericCodec();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // ACMGenericCodec* CreateInstance();
+ // The function will be used for FEC. It is not implemented yet.
+ //
+ virtual ACMGenericCodec* CreateInstance() = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 Encode()
+ // The function is called to perform an encoding of the audio stored in
+ // audio buffer. An encoding is performed only if enough audio, i.e. equal
+ // to the frame-size of the codec, exist. The audio frame will be processed
+ // by VAD and CN/DTX if required. There are few different cases.
+ //
+ // A) Neither VAD nor DTX is active; the frame is encoded by the encoder.
+ //
+ // B) VAD is enabled but not DTX; in this case the audio is processed by VAD
+ // and encoded by the encoder. The "*encodingType" will be either
+ // "activeNormalEncode" or "passiveNormalEncode" if frame is active or
+ // passive, respectively.
+ //
+ // C) DTX is enabled; if the codec has internal VAD/DTX we just encode the
+ // frame by the encoder. Otherwise, the frame is passed through VAD and
+ // if identified as passive, then it will be processed by CN/DTX. If the
+ // frame is active it will be encoded by the encoder.
+ //
+ // This function acquires the appropriate locks and calls EncodeSafe() for
+ // the actual processing.
+ //
+ // Outputs:
+ // -bitStream : a buffer where bit-stream will be written to.
+ // -bitStreamLenByte : contains the length of the bit-stream in
+ // bytes.
+ // -timeStamp : contains the RTP timestamp, this is the
+ // sampling time of the first sample encoded
+ // (measured in number of samples).
+ // -encodingType : contains the type of encoding applied on the
+ // audio samples. The alternatives are
+ // (c.f. acm_common_types.h)
+ // -kNoEncoding:
+ // there was not enough data to encode. or
+ // some error has happened that we could
+ // not do encoding.
+ // -kActiveNormalEncoded:
+ // the audio frame is active and encoded by
+ // the given codec.
+ // -kPassiveNormalEncoded:
+ // the audio frame is passive but coded with
+ // the given codec (NO DTX).
+ // -kPassiveDTXWB:
+ // The audio frame is passive and used
+ // wide-band CN to encode.
+ // -kPassiveDTXNB:
+ // The audio frame is passive and used
+ // narrow-band CN to encode.
+ //
+ // Return value:
+ // -1 if error is occurred, otherwise the length of the bit-stream in
+ // bytes.
+ //
+ WebRtc_Word16 Encode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_UWord32* timeStamp,
+ WebRtcACMEncodingType* encodingType);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 Decode()
+ // This function is used to decode a given bit-stream, without engaging
+ // NetEQ.
+ //
+ // This function acquires the appropriate locks and calls DecodeSafe() for
+ // the actual processing. Please note that this is not functional yet.
+ //
+ // Inputs:
+ // -bitStream : a buffer where bit-stream will be read.
+ // -bitStreamLenByte : the length of the bit-stream in bytes.
+ //
+ // Outputs:
+ // -audio : pointer to a buffer where the audio will written.
+ // -audioSamples : number of audio samples out of decoding the given
+ // bit-stream.
+ // -speechType : speech type (for future use).
+ //
+ // Return value:
+ // -1 if failed to decode,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 Decode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // bool EncoderInitialized();
+ //
+ // Return value:
+ // True if the encoder is successfully initialized,
+ // false otherwise.
+ //
+ bool EncoderInitialized();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // bool DecoderInitialized();
+ //
+ // Return value:
+ // True if the decoder is successfully initialized,
+ // false otherwise.
+ //
+ bool DecoderInitialized();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 EncoderParams()
+ // It is called to get encoder parameters. It will call
+ // EncoderParamsSafe() in turn.
+ //
+ // Output:
+ // -encParams : a buffer where the encoder parameters is
+ // written to. If the encoder is not
+ // initialized this buffer is filled with
+ // invalid values
+ // Return value:
+ // -1 if the encoder is not initialized,
+ // 0 otherwise.
+ //
+ //
+ WebRtc_Word16 EncoderParams(
+ WebRtcACMCodecParams *encParams);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 DecoderParams(...)
+ // It is called to get decoder parameters. It will call DecoderParamsSafe()
+ // in turn.
+ //
+ // Output:
+ // -decParams : a buffer where the decoder parameters is
+ // written to. If the decoder is not initialized
+ // this buffer is filled with invalid values
+ //
+ // Return value:
+ // -1 if the decoder is not initialized,
+ // 0 otherwise.
+ //
+ //
+ bool DecoderParams(
+ WebRtcACMCodecParams *decParams,
+ const WebRtc_UWord8 payloadType);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InitEncoder(...)
+ // This function is called to initialize the encoder with the given
+ // parameters.
+ //
+ // Input:
+ // -codecParams : parameters of encoder.
+ // -forceInitialization: if false the initialization is invoked only if
+ // the encoder is not initialized. If true the
+ // encoder is forced to (re)initialize.
+ //
+ // Return value:
+ // 0 if could initialize successfully,
+ // -1 if failed to initialize.
+ //
+ //
+ WebRtc_Word16 InitEncoder(
+ WebRtcACMCodecParams* codecParams,
+ bool forceInitialization);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InitDecoder()
+ // This function is called to initialize the decoder with the given
+ // parameters. (c.f. acm_common_defs.h & common_types.h for the
+ // definition of the structure)
+ //
+ // Input:
+ // -codecParams : parameters of decoder.
+ // -forceInitialization: if false the initialization is invoked only
+ // if the decoder is not initialized. If true
+ // the encoder is forced to(re)initialize.
+ //
+ // Return value:
+ // 0 if could initialize successfully,
+ // -1 if failed to initialize.
+ //
+ //
+ WebRtc_Word16 InitDecoder(
+ WebRtcACMCodecParams* codecParams,
+ bool forceInitialization);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 RegisterInNetEq(...)
+ // This function is called to register the decoder in NetEq, with the given
+ // payload-type.
+ //
+ // Inputs:
+ // -netEq : pointer to NetEq Instance
+ // -codecInst : instance with of the codec settings of the codec
+ //
+ // Return values
+ // -1 if failed to register,
+ // 0 if successfully initialized.
+ //
+ WebRtc_Word32 RegisterInNetEq(
+ ACMNetEQ* netEq,
+ const CodecInst& codecInst);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 Add10MsData(...)
+ // This function is called to add 10 ms of audio to the audio buffer of
+ // the codec.
+ //
+ // Inputs:
+ // -timeStamp : the timestamp of the 10 ms audio. the timestamp
+ // is the sampling time of the
+ // first sample measured in number of samples.
+ // -data : a buffer that contains the audio. The codec
+ // expects to get the audio in correct sampling
+ // frequency
+ // -length : the length of the audio buffer
+ // -audioChannel : 0 for mono, 1 for stereo (not supported yet)
+ //
+ // Return values:
+ // -1 if failed
+ // 0 otherwise.
+ //
+ WebRtc_Word32 Add10MsData(
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_Word16* data,
+ const WebRtc_UWord16 length,
+ const WebRtc_UWord8 audioChannel);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_UWord32 NoMissedSamples()
+ // This function returns the number of samples which are overwritten in
+ // the audio buffer. The audio samples are overwritten if the input audio
+ // buffer is full, but Add10MsData() is called. (We might remove this
+ // function if it is not used)
+ //
+ // Return Value:
+ // Number of samples which are overwritten.
+ //
+ WebRtc_UWord32 NoMissedSamples() const;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // void ResetNoMissedSamples()
+ // This function resets the number of overwritten samples to zero.
+ // (We might remove this function if we remove NoMissedSamples())
+ //
+ void ResetNoMissedSamples();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 SetBitRate()
+ // The function is called to set the encoding rate.
+ //
+ // Input:
+ // -bitRateBPS : encoding rate in bits per second
+ //
+ // Return value:
+ // -1 if failed to set the rate, due to invalid input or given
+ // codec is not rate-adjustable.
+ // 0 if the rate is adjusted successfully
+ //
+ WebRtc_Word16 SetBitRate(const WebRtc_Word32 bitRateBPS);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // DestructEncoderInst()
+ // This API is used in conferencing. It will free the memory that is pointed
+ // by "ptrInst". "ptrInst" is a pointer to encoder instance, created and
+ // filled up by calling EncoderInst(...).
+ //
+ // Inputs:
+ // -ptrInst : pointer to an encoder instance to be deleted.
+ //
+ //
+ void DestructEncoderInst(
+ void* ptrInst);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 AudioBuffer()
+ // This is used when synchronization of codecs is required. There are cases
+ // that the audio buffers of two codecs have to be synched. By calling this
+ // function on can get the audio buffer and other related parameters, such
+ // as timestamps...
+ //
+ // Output:
+ // -audioBuff : a pointer to WebRtcACMAudioBuff where the audio
+ // buffer of this codec will be written to.
+ //
+ // Return value:
+ // -1 if fails to copy the audio buffer,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 AudioBuffer(
+ WebRtcACMAudioBuff& audioBuff);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_UWord32 EarliestTimestamp()
+ // Returns the timestamp of the first 10 ms in audio buffer. This is used
+ // to identify if a synchronization of two encoders is required.
+ //
+ // Return value:
+ // timestamp of the first 10 ms audio in the audio buffer.
+ //
+ WebRtc_UWord32 EarliestTimestamp() const;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 SetAudioBuffer()
+ // This function is called to set the audio buffer and the associated
+ // parameters to a given value.
+ //
+ // Return value:
+ // -1 if fails to copy the audio buffer,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 SetAudioBuffer(WebRtcACMAudioBuff& audioBuff);
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 SetVAD()
+ // This is called to set VAD & DTX. If the codec has internal DTX that will
+ // be used. If DTX is enabled and the codec does not have internal DTX,
+ // WebRtc-VAD will be used to decide if the frame is active. If DTX is
+ // disabled but VAD is enabled. The audio is passed through VAD to label it
+ // as active or passive, but the frame is encoded normally. However the
+ // bit-stream is labeled properly so that ACM::Process() can use this
+ // information. In case of failure, the previous states of the VAD & DTX
+ // are kept.
+ //
+ // Inputs:
+ // -enableDTX : if true DTX will be enabled otherwise the DTX is
+ // disabled. If codec has internal DTX that will be
+ // used, otherwise WebRtc-CNG is used. In the latter
+ // case VAD is automatically activated.
+ // -enableVAD : if true WebRtc-VAD is enabled, otherwise VAD is
+ // disabled, except for the case that DTX is enabled
+ // but codec doesn't have internal DTX. In this case
+ // VAD is enabled regardless of the value of
+ // "enableVAD."
+ // -mode : this specifies the aggressiveness of VAD.
+ //
+ // Return value
+ // -1 if failed to set DTX & VAD as specified,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 SetVAD(
+ const bool enableDTX = true,
+ const bool enableVAD = false,
+ const ACMVADMode mode = VADNormal);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 ReplaceInternalDTX()
+ // This is called to replace the codec internal DTX with WebRtc DTX.
+ // This is only valid for G729 where the user has possibility to replace
+ // AnnexB with WebRtc DTX. For other codecs this function has no effect.
+ //
+ // Input:
+ // -replaceInternalDTX : if true the internal DTX is replaced with WebRtc.
+ //
+ // Return value
+ // -1 if failed to replace internal DTX,
+ // 0 if succeeded.
+ //
+ WebRtc_Word32 ReplaceInternalDTX(const bool replaceInternalDTX);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 IsInternalDTXReplaced()
+ // This is called to check if the codec internal DTX is replaced by WebRtc DTX.
+ // This is only valid for G729 where the user has possibility to replace
+ // AnnexB with WebRtc DTX. For other codecs this function has no effect.
+ //
+ // Output:
+ // -internalDTXReplaced : if true the internal DTX is replaced with WebRtc.
+ //
+ // Return value
+ // -1 if failed to check if replace internal DTX or replacement not feasible,
+ // 0 if succeeded.
+ //
+ WebRtc_Word32 IsInternalDTXReplaced(bool* internalDTXReplaced);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // void SetNetEqDecodeLock()
+ // Passes the NetEq lock to the codec.
+ //
+ // Input:
+ // -netEqDecodeLock : pointer to the lock associated with NetEQ of ACM.
+ //
+ void SetNetEqDecodeLock(
+ RWLockWrapper* netEqDecodeLock)
+ {
+ _netEqDecodeLock = netEqDecodeLock;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // bool HasInternalDTX()
+ // Used to check if the codec has internal DTX.
+ //
+ // Return value:
+ // true if the codec has an internal DTX, e.g. G729,
+ // false otherwise.
+ //
+ bool HasInternalDTX() const
+ {
+ return _hasInternalDTX;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 GetEstimatedBandwidth()
+ // Used to get decoder estimated bandwidth. Only iSAC will provide a value.
+ //
+ //
+ // Return value:
+ // -1 if fails to get decoder estimated bandwidth,
+ // >0 estimated bandwidth in bits/sec.
+ //
+ WebRtc_Word32 GetEstimatedBandwidth();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 SetEstimatedBandwidth()
+ // Used to set estiamted bandwidth sent out of band from other side. Only
+ // iSAC will have use for the value.
+ //
+ // Input:
+ // -estimatedBandwidth: estimated bandwidth in bits/sec
+ //
+ // Return value:
+ // -1 if fails to set estimated bandwidth,
+ // 0 on success.
+ //
+ WebRtc_Word32 SetEstimatedBandwidth(WebRtc_Word32 estimatedBandwidth);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 GetRedPayload()
+ // Used to get codec specific RED payload (if such is implemented).
+ // Currently only done in iSAC.
+ //
+ // Outputs:
+ // -redPayload : a pointer to the data for RED payload.
+ // -payloadBytes : number of bytes in RED payload.
+ //
+ // Return value:
+ // -1 if fails to get codec specific RED,
+ // 0 if succeeded.
+ //
+ WebRtc_Word32 GetRedPayload(
+ WebRtc_UWord8* redPayload,
+ WebRtc_Word16* payloadBytes);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 ResetEncoder()
+ // By calling this function you would re-initialize the encoder with the
+ // current parameters. All the settings, e.g. VAD/DTX, frame-size... should
+ // remain unchanged. (In case of iSAC we don't want to lose BWE history.)
+ //
+ // Return value
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 ResetEncoder();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 ResetEncoder()
+ // By calling this function you would re-initialize the decoder with the
+ // current parameters.
+ //
+ // Return value
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 ResetDecoder(
+ WebRtc_Word16 payloadType);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // void DestructEncoder()
+ // This function is called to delete the encoder instance, if possible, to
+ // have a fresh start. For codecs where encoder and decoder share the same
+ // instance we cannot delete the encoder and instead we will initialize the
+ // encoder. We also delete VAD and DTX if they have been created.
+ //
+ void DestructEncoder();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // void DestructDecoder()
+ // This function is called to delete the decoder instance, if possible, to
+ // have a fresh start. For codecs where encoder and decoder share the same
+ // instance we cannot delete the encoder and instead we will initialize the
+ // decoder. Before deleting decoder instance it has to be removed from the
+ // NetEq list.
+ //
+ void DestructDecoder();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 SamplesLeftToEncode()
+ // Returns the number of samples required to be able to do encoding.
+ //
+ // Return value:
+ // Number of samples.
+ //
+ WebRtc_Word16 SamplesLeftToEncode();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_UWord32 LastEncodedTimestamp()
+ // Returns the timestamp of the last frame it encoded.
+ //
+ // Return value:
+ // Timestamp.
+ //
+ WebRtc_UWord32 LastEncodedTimestamp() const;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 UnregisterFromNetEq()
+ // To remove the codec from NetEQ. If the codec (or the decoder instance)
+ // is going to be deleted, first the codec has to be removed from NetEq
+ // by calling this function.
+ //
+ // Input:
+ // -netEq : pointer to a NetEq instance that the codec
+ // has to be unregistered from.
+ //
+ // Output:
+ // -1 if failed to unregister the codec,
+ // 0 if the codec is successfully unregistered.
+ //
+ WebRtc_Word32 UnregisterFromNetEq(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // SetUniqueID()
+ // Set a unique ID for the codec to be used for tracing and debuging
+ //
+ // Input
+ // -id : A number to identify the codec.
+ //
+ void SetUniqueID(
+ const WebRtc_UWord32 id);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // IsAudioBufferFresh()
+ // Specifies if ever audio is injected to this codec.
+ //
+ // Return value
+ // -true; no audio is feed into this codec
+ // -false; audio has already been fed to the codec.
+ //
+ bool IsAudioBufferFresh() const;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // UpdateDecoderSampFreq()
+ // For most of the codecs this function does nothing. It must be
+ // implemented for those codecs that one codec instance serves as the
+ // decoder for different flavers of the codec. One example is iSAC. there,
+ // iSAC 16 kHz and iSAC 32 kHz are treated as two different codecs with
+ // different payload types, however, there is only one iSAC instance to
+ // decode. The reason for that is we would like to decode and encode with
+ // the same codec instance for bandwidth estimator to work.
+ //
+ // Each time that we receive a new payload type, we call this funtion to
+ // prepare the decoder associated with the new payload. Normally, decoders
+ // doesn't have to do anything. For iSAC the decoder has to change it's
+ // sampling rate. The input parameter specifies the current flaver of the
+ // codec in codec database. For instance, if we just got a SWB payload then
+ // the input parameter is ACMCodecDB::isacswb.
+ //
+ // Input:
+ // -codecId : the ID of the codec associated with the
+ // payload type that we just received.
+ //
+ // Return value:
+ // 0 if succeeded in updating the decoder.
+ // -1 if failed to update.
+ //
+ virtual WebRtc_Word16 UpdateDecoderSampFreq(
+ WebRtc_Word16 /* codecId */)
+ {
+ return 0;
+ }
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // UpdateEncoderSampFreq()
+ // Call this function to update the encoder sampling frequency. This
+ // is for codecs where one payload-name supports several encoder sampling
+ // frequencies. Otherwise, to change the sampling frequency we need to
+ // register new codec. ACM will consider that as registration of a new
+ // codec, not a change in parameter. For iSAC, switching from WB to SWB
+ // is treated as a change in parameter. Therefore, we need this function.
+ //
+ // Input:
+ // -encoderSampFreqHz : encoder sampling frequency.
+ //
+ // Return value:
+ // -1 if failed, or if this is meaningless for the given codec.
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 UpdateEncoderSampFreq(
+ WebRtc_UWord16 encoderSampFreqHz);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // EncoderSampFreq()
+ // Get the sampling frequency that the encoder (WebRtc wrapper) expects.
+ //
+ // Output:
+ // -sampFreqHz : sampling frequency, in Hertz, which the encoder
+ // should be fed with.
+ //
+ // Return value:
+ // -1 if failed to output sampling rate.
+ // 0 if the sample rate is returned successfully.
+ //
+ virtual WebRtc_Word16 EncoderSampFreq(
+ WebRtc_UWord16& sampFreqHz);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word32 ConfigISACBandwidthEstimator()
+ // Call this function to configure the bandwidth estimator of ISAC.
+ // During the adaptation of bit-rate, iSAC atomatically adjusts the
+ // frame-size (either 30 or 60 ms) to save on RTP header. The initial
+ // frame-size can be specified by the first argument. The configuration also
+ // regards the initial estimate of bandwidths. The estimator starts from
+ // this point and converges to the actual bottleneck. This is given by the
+ // second parameter. Furthermore, it is also possible to control the
+ // adaptation of frame-size. This is specified by the last parameter.
+ //
+ // Input:
+ // -initFrameSizeMsec : initial frame-size in milisecods. For iSAC-wb
+ // 30 ms and 60 ms (default) are acceptable values,
+ // and for iSAC-swb 30 ms is the only acceptable
+ // value. Zero indiates default value.
+ // -initRateBitPerSec : initial estimate of the bandwidth. Values
+ // between 10000 and 58000 are acceptable.
+ // -enforceFrameSize : if true, the frame-size will not be adapted.
+ //
+ // Return value:
+ // -1 if failed to configure the bandwidth estimator,
+ // 0 if the configuration was successfully applied.
+ //
+ virtual WebRtc_Word32 ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 initFrameSizeMsec,
+ const WebRtc_UWord16 initRateBitPerSec,
+ const bool enforceFrameSize);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // SetISACMaxPayloadSize()
+ // Set the maximum payload size of iSAC packets. No iSAC payload,
+ // regardless of its frame-size, may exceed the given limit. For
+ // an iSAC payload of size B bits and frame-size T sec we have;
+ // (B < maxPayloadLenBytes * 8) and (B/T < maxRateBitPerSec), c.f.
+ // SetISACMaxRate().
+ //
+ // Input:
+ // -maxPayloadLenBytes : maximum payload size in bytes.
+ //
+ // Return value:
+ // -1 if failed to set the maximm payload-size.
+ // 0 if the given linit is seet successfully.
+ //
+ virtual WebRtc_Word32 SetISACMaxPayloadSize(
+ const WebRtc_UWord16 maxPayloadLenBytes);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // SetISACMaxRate()
+ // Set the maximum instantaneous rate of iSAC. For a payload of B bits
+ // with a frame-size of T sec the instantaneous rate is B/T bist per
+ // second. Therefore, (B/T < maxRateBitPerSec) and
+ // (B < maxPayloadLenBytes * 8) are always satisfied for iSAC payloads,
+ // c.f SetISACMaxPayloadSize().
+ //
+ // Input:
+ // -maxRateBitPerSec : maximum instantaneous bit-rate given in bits/sec.
+ //
+ // Return value:
+ // -1 if failed to set the maximum rate.
+ // 0 if the maximum rate is set successfully.
+ //
+ virtual WebRtc_Word32 SetISACMaxRate(
+ const WebRtc_UWord32 maxRateBitPerSec);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // SaveDecoderParamS()
+ // Save the parameters of decoder.
+ //
+ // Input:
+ // -codecParams : pointer to a struct where the parameters of
+ // decoder is stored in.
+ //
+ void SaveDecoderParam(
+ const WebRtcACMCodecParams* codecParams);
+
+
+ WebRtc_Word32 FrameSize()
+ {
+ return _frameLenSmpl;
+ }
+
+ void SetIsMaster(bool isMaster);
+
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // REDPayloadISAC()
+ // This is an iSAC-specific function. The function is called to get RED
+ // paylaod from a default-encoder.
+ //
+ // Inputs:
+ // -isacRate : the target rate of the main payload. A RED
+ // paylaod is generated according to the rate of
+ // main paylaod. Note that we are not specifying the
+ // rate of RED payload, but the main payload.
+ // -isacBwEstimate : bandwidth information should be inserted in
+ // RED payload.
+ //
+ // Output:
+ // -payload : pointer to a buffer where the RED paylaod will
+ // written to.
+ // -paylaodLenBytes : a place-holder to write the length of the RED
+ // payload in Bytes.
+ //
+ // Return value:
+ // -1 if an error occures, otherwise the length of the payload (in Bytes)
+ // is returned.
+ //
+ //
+ virtual WebRtc_Word16 REDPayloadISAC(
+ const WebRtc_Word32 isacRate,
+ const WebRtc_Word16 isacBwEstimate,
+ WebRtc_UWord8* payload,
+ WebRtc_Word16* payloadLenBytes);
+
+protected:
+ ///////////////////////////////////////////////////////////////////////////
+ // All the functions with FunctionNameSafe(...) contain the actual
+ // implementation of FunctionName(...). FunctionName() acquires an
+ // appropriate lock and calls FunctionNameSafe() to do the actual work.
+ // Therefore, for the description of functionality, input/output arguments
+ // and return value we refer to FunctionName()
+ //
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See Encode() for the description of function, input(s)/output(s) and
+ // return value.
+ //
+ WebRtc_Word16 EncodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_UWord32* timeStamp,
+ WebRtcACMEncodingType* encodingType);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See Decode() for the description of function, input(s)/output(s) and
+ // return value.
+ //
+ virtual WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See Add10MsSafe() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ virtual WebRtc_Word32 Add10MsDataSafe(
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_Word16* data,
+ const WebRtc_UWord16 length,
+ const WebRtc_UWord8 audioChannel);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See RegisterInNetEq() for the description of function,
+ // input(s)/output(s) and return value.
+ //
+ virtual WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst) = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See EncoderParam() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ WebRtc_Word16 EncoderParamsSafe(
+ WebRtcACMCodecParams *encParams);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See DecoderParam for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ // Note:
+ // Any Class where a single instance handle several flavers of the
+ // same codec, therefore, several payload types are associated with
+ // the same instance have to implement this function.
+ //
+ // Currently only iSAC is implementing it. A single iSAC instance is
+ // used for decoding both WB & SWB stream. At one moment both WB & SWB
+ // can be registered as receive codec. Hence two payloads are associated
+ // with a single codec instance.
+ //
+ virtual bool DecoderParamsSafe(
+ WebRtcACMCodecParams *decParams,
+ const WebRtc_UWord8 payloadType);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See ResetEncoder() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ WebRtc_Word16 ResetEncoderSafe();
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See InitEncoder() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ WebRtc_Word16 InitEncoderSafe(
+ WebRtcACMCodecParams *codecParams,
+ bool forceInitialization);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See InitDecoder() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ WebRtc_Word16 InitDecoderSafe(
+ WebRtcACMCodecParams *codecParams,
+ bool forceInitialization);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See ResetDecoder() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ WebRtc_Word16 ResetDecoderSafe(
+ WebRtc_Word16 payloadType);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See DestructEncoder() for the description of function,
+ // input(s)/output(s) and return value.
+ //
+ virtual void DestructEncoderSafe() = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See DestructDecoder() for the description of function,
+ // input(s)/output(s) and return value.
+ //
+ virtual void DestructDecoderSafe() = 0;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See SetBitRate() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ // Any codec that can change the bit-rate has to implement this.
+ //
+ virtual WebRtc_Word16 SetBitRateSafe(
+ const WebRtc_Word32 bitRateBPS);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See GetEstimatedBandwidth() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ virtual WebRtc_Word32 GetEstimatedBandwidthSafe();
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See SetEstimatedBandwidth() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ virtual WebRtc_Word32 SetEstimatedBandwidthSafe(WebRtc_Word32 estimatedBandwidth);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See GetRedPayload() for the description of function, input(s)/output(s)
+ // and return value.
+ //
+ virtual WebRtc_Word32 GetRedPayloadSafe(
+ WebRtc_UWord8* redPayload,
+ WebRtc_Word16* payloadBytes);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See SetVAD() for the description of function, input(s)/output(s) and
+ // return value.
+ //
+ WebRtc_Word16 SetVADSafe(
+ const bool enableDTX = true,
+ const bool enableVAD = false,
+ const ACMVADMode mode = VADNormal);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See ReplaceInternalDTX() for the description of function, input and
+ // return value.
+ //
+ virtual WebRtc_Word32 ReplaceInternalDTXSafe(
+ const bool replaceInternalDTX);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See IsInternalDTXReplaced() for the description of function, input and
+ // return value.
+ //
+ virtual WebRtc_Word32 IsInternalDTXReplacedSafe(
+ bool* internalDTXReplaced);
+
+ ///////////////////////////////////////////////////////////////////////////
+ // See UnregisterFromNetEq() for the description of function,
+ // input(s)/output(s) and return value.
+ //
+ virtual WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 CreateEncoder()
+ // Creates the encoder instance.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 CreateEncoder();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 CreateDecoder()
+ // Creates the decoder instance.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 CreateDecoder();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 EnableVAD();
+ // Enables VAD with the given mode. The VAD instance will be created if
+ // it does not exists.
+ //
+ // Input:
+ // -mode : VAD mode c.f. audio_coding_module_typedefs.h for
+ // the options.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 EnableVAD(ACMVADMode mode);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 DisableVAD()
+ // Disables VAD.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 DisableVAD();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 EnableDTX()
+ // Enables DTX. This method should be overwritten for codecs which have
+ // internal DTX.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 EnableDTX();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 DisableDTX()
+ // Disables usage of DTX. This method should be overwritten for codecs which
+ // have internal DTX.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 DisableDTX();
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalEncode()
+ // This is a codec-specific function called in EncodeSafe() to actually
+ // encode a frame of audio.
+ //
+ // Outputs:
+ // -bitStream : pointer to a buffer where the bit-stream is
+ // written to.
+ // -bitStreamLenByte : the length of the bit-stream in byte, a negative
+ // value indicates error.
+ //
+ // Return value:
+ // -1 if failed,
+ // otherwise the length of the bit-stream is returned.
+ //
+ virtual WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalInitEncoder()
+ // This is a codec-specific function called in InitEncoderSafe(), it has to
+ // do all codec-specific operation to initialize the encoder given the
+ // encoder parameters.
+ //
+ // Input:
+ // -codecParams : pointer to a structure that contains parameters to
+ // initialize encoder.
+ // Set codecParam->CodecInst.rate to -1 for
+ // iSAC to operate in adaptive mode.
+ // (to do: if frame-length is -1 frame-length will be
+ // automatically adjusted, otherwise, given
+ // frame-length is forced)
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalInitDecoder()
+ // This is a codec-specific function called in InitDecoderSafe(), it has to
+ // do all codec-specific operation to initialize the decoder given the
+ // decoder parameters.
+ //
+ // Input:
+ // -codecParams : pointer to a structure that contains parameters to
+ // initialize encoder.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // void IncreaseNoMissedSamples()
+ // This method is called to increase the number of samples that are
+ // overwritten in the audio buffer.
+ //
+ // Input:
+ // -noSamples : the number of overwritten samples is incremented
+ // by this value.
+ //
+ void IncreaseNoMissedSamples(
+ const WebRtc_Word16 noSamples);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalCreateEncoder()
+ // This is a codec-specific method called in CreateEncoderSafe() it is
+ // supposed to perform all codec-specific operations to create encoder
+ // instance.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 InternalCreateEncoder() = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalCreateDecoder()
+ // This is a codec-specific method called in CreateDecoderSafe() it is
+ // supposed to perform all codec-specific operations to create decoder
+ // instance.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 InternalCreateDecoder() = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // void InternalDestructEncoderInst()
+ // This is a codec-specific method, used in conferencing, called from
+ // DestructEncoderInst(). The input argument is pointer to encoder instance
+ // (codec instance for codecs that encoder and decoder share the same
+ // instance). This method is called to free the memory that "ptrInst" is
+ // pointing to.
+ //
+ // Input:
+ // -ptrInst : pointer to encoder instance.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual void InternalDestructEncoderInst(
+ void* ptrInst) = 0;
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 InternalResetEncoder()
+ // This method is called to reset the states of encoder. However, the
+ // current parameters, e.g. frame-length, should remain as they are. For
+ // most of the codecs a re-initialization of the encoder is what needs to
+ // be down. But for iSAC we like to keep the BWE history so we cannot
+ // re-initialize. As soon as such an API is implemented in iSAC this method
+ // has to be overwritten in ACMISAC class.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ virtual WebRtc_Word16 InternalResetEncoder();
+
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WebRtc_Word16 ProcessFrameVADDTX()
+ // This function is called when a full frame of audio is available. It will
+ // break the audio frame into blocks such that each block could be processed
+ // by VAD & CN/DTX. If a frame is divided into two blocks then there are two
+ // cases. First, the first block is active, the second block will not be
+ // processed by CN/DTX but only by VAD and return to caller with
+ // '*samplesProcessed' set to zero. There, the audio frame will be encoded
+ // by the encoder. Second, the first block is inactive and is processed by
+ // CN/DTX, then we stop processing the next block and return to the caller
+ // which is EncodeSafe(), with "*samplesProcessed" equal to the number of
+ // samples in first block.
+ //
+ // Output:
+ // -bitStream : pointer to a buffer where DTX frame, if
+ // generated, will be written to.
+ // -bitStreamLenByte : contains the length of bit-stream in bytes, if
+ // generated. Zero if no bit-stream is generated.
+ // -noSamplesProcessed : contains no of samples that actually CN has
+ // processed. Those samples processed by CN will not
+ // be encoded by the encoder, obviously. If
+ // contains zero, it means that the frame has been
+ // identified as active by VAD. Note that
+ // "*noSamplesProcessed" might be non-zero but
+ // "*bitStreamLenByte" be zero.
+ //
+ // Return value:
+ // -1 if failed,
+ // 0 if succeeded.
+ //
+ WebRtc_Word16 ProcessFrameVADDTX(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_Word16* samplesProcessed);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // CanChangeEncodingParam()
+ // Check if the codec parameters can be changed. In conferencing normally
+ // codec parametrs cannot be changed. The exception is bit-rate of isac.
+ //
+ // return value:
+ // -true if codec parameters are allowed to change.
+ // -flase otherwise.
+ //
+ virtual bool CanChangeEncodingParam(CodecInst& codecInst);
+
+
+ ///////////////////////////////////////////////////////////////////////////
+ // CurrentRate()
+ // Call to get the current encoding rate of the encoder. This function
+ // should be overwritten for codecs whic automatically change their
+ // target rate. One example is iSAC. The output of the function is the
+ // current target rate.
+ //
+ // Output:
+ // -rateBitPerSec : the current target rate of the codec.
+ //
+ virtual void CurrentRate(
+ WebRtc_Word32& /* rateBitPerSec */)
+ {
+
+ return;
+ }
+
+ virtual void SaveDecoderParamSafe(
+ const WebRtcACMCodecParams* codecParams);
+
+
+ // &_inAudio[_inAudioIxWrite] always point to where new audio can be
+ // written to
+ WebRtc_Word16 _inAudioIxWrite;
+
+ // &_inAudio[_inAudioIxRead] points to where audio has to be read from
+ WebRtc_Word16 _inAudioIxRead;
+
+ WebRtc_Word16 _inTimestampIxWrite;
+
+ // Where the audio is stored before encoding,
+ // To save memory the following buffer can be allocated
+ // dynamically for 80ms depending on the sampling frequency
+ // of the codec.
+ WebRtc_Word16* _inAudio;
+ WebRtc_UWord32* _inTimestamp;
+
+ WebRtc_Word16 _frameLenSmpl;
+ WebRtc_UWord16 _noChannels;
+
+ // This will point to a static database of the supported codecs
+ WebRtc_Word16 _codecID;
+
+ // This will account for the No of samples were not encoded
+ // the case is rare, either samples are missed due to overwite
+ // at input buffer or due to encoding error
+ WebRtc_UWord32 _noMissedSamples;
+
+ // True if the encoder instance created
+ bool _encoderExist;
+ bool _decoderExist;
+ // True if the ecncoder instance initialized
+ bool _encoderInitialized;
+ bool _decoderInitialized;
+
+ bool _registeredInNetEq;
+
+ // VAD/DTX
+ bool _hasInternalDTX;
+ WebRtcVadInst* _ptrVADInst;
+ bool _vadEnabled;
+ ACMVADMode _vadMode;
+ WebRtc_Word16 _vadLabel[MAX_FRAME_SIZE_10MSEC];
+ bool _dtxEnabled;
+ WebRtcCngEncInst* _ptrDTXInst;
+ WebRtc_UWord8 _numLPCParams;
+ bool _sentCNPrevious;
+ bool _isMaster;
+
+ WebRtcACMCodecParams _encoderParams;
+ WebRtcACMCodecParams _decoderParams;
+
+ // Used as a global lock for all avaiable decoders
+ // so that no decoder is used when NetEQ decodes.
+ RWLockWrapper* _netEqDecodeLock;
+ // Used to lock wrapper internal data
+ // such as buffers and state variables.
+ RWLockWrapper& _codecWrapperLock;
+
+ WebRtc_UWord32 _lastEncodedTimestamp;
+ WebRtc_UWord32 _lastTimestamp;
+ bool _isAudioBuffFresh;
+ WebRtc_UWord32 _uniqueID;
+};
+
+} // namespace webrt
+
+#endif // ACM_GENERIC_CODEC_H
diff --git a/src/modules/audio_coding/main/source/acm_gsmfr.cc b/src/modules/audio_coding/main/source/acm_gsmfr.cc
new file mode 100644
index 0000000..0730408
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_gsmfr.cc
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_common_defs.h"
+#include "acm_gsmfr.h"
+#include "acm_neteq.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+#ifdef WEBRTC_CODEC_GSMFR
+ // NOTE! GSM-FR is not included in the open-source package. Modify this file or your codec
+ // API to match the function call and name of used GSM-FR API file.
+ // #include "gsmfr_interface.h"
+#endif
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_GSMFR
+
+ACMGSMFR::ACMGSMFR(
+ WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+
+ACMGSMFR::~ACMGSMFR()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::EnableDTX()
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::DisableDTX()
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word32
+ACMGSMFR::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+
+ACMGenericCodec*
+ACMGSMFR::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+void
+ACMGSMFR::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMGSMFR::DestructDecoderSafe()
+{
+ return;
+}
+
+
+void
+ACMGSMFR::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+
+#else //===================== Actual Implementation =======================
+
+// Remove when integrating a real GSM FR wrapper
+extern WebRtc_Word16 WebRtcGSMFR_CreateEnc(GSMFR_encinst_t_** inst);
+extern WebRtc_Word16 WebRtcGSMFR_CreateDec(GSMFR_decinst_t_** inst);
+extern WebRtc_Word16 WebRtcGSMFR_FreeEnc(GSMFR_encinst_t_* inst);
+extern WebRtc_Word16 WebRtcGSMFR_FreeDec(GSMFR_decinst_t_* inst);
+extern WebRtc_Word16 WebRtcGSMFR_Encode(GSMFR_encinst_t_* encInst, WebRtc_Word16* input,
+ WebRtc_Word16 len, WebRtc_Word16* output);
+extern WebRtc_Word16 WebRtcGSMFR_EncoderInit(GSMFR_encinst_t_* encInst, WebRtc_Word16 mode);
+extern WebRtc_Word16 WebRtcGSMFR_Decode(GSMFR_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcGSMFR_DecodeBwe(GSMFR_decinst_t_* decInst, WebRtc_Word16* input);
+extern WebRtc_Word16 WebRtcGSMFR_DecodePlc(GSMFR_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcGSMFR_DecoderInit(GSMFR_decinst_t_* decInst);
+
+ACMGSMFR::ACMGSMFR(
+ WebRtc_Word16 codecID):
+_encoderInstPtr(NULL),
+_decoderInstPtr(NULL)
+{
+ _codecID = codecID;
+ _hasInternalDTX = true;
+ return;
+}
+
+
+ACMGSMFR::~ACMGSMFR()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcGSMFR_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcGSMFR_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ *bitStreamLenByte = WebRtcGSMFR_Encode(_encoderInstPtr,
+ &_inAudio[_inAudioIxRead], _frameLenSmpl, (WebRtc_Word16*)bitStream);
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _frameLenSmpl;
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::EnableDTX()
+{
+ if(_dtxEnabled)
+ {
+ return 0;
+ }
+ else if(_encoderExist)
+ {
+ if(WebRtcGSMFR_EncoderInit(_encoderInstPtr, 1) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "EnableDTX: cannot init encoder for GSMFR");
+ return -1;
+ }
+ _dtxEnabled = true;
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+}
+
+
+WebRtc_Word16
+ACMGSMFR::DisableDTX()
+{
+ if(!_dtxEnabled)
+ {
+ return 0;
+ }
+ else if(_encoderExist)
+ {
+ if(WebRtcGSMFR_EncoderInit(_encoderInstPtr, 0) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "DisableDTX: cannot init encoder for GSMFR");
+ return -1;
+ }
+ _dtxEnabled = false;
+ return 0;
+ }
+ else
+ {
+ // encoder doesn't exists, therefore disabling is harmless
+ return 0;
+ }
+}
+
+
+WebRtc_Word16
+ACMGSMFR::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ if (WebRtcGSMFR_EncoderInit(_encoderInstPtr, ((codecParams->enableDTX)? 1:0)) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitEncoder: cannot init encoder for GSMFR");
+ }
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ if (WebRtcGSMFR_DecoderInit(_decoderInstPtr) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitDecoder: cannot init decoder for GSMFR");
+ return -1;
+ }
+ return 0;
+}
+
+
+WebRtc_Word32
+ACMGSMFR::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CodecDef: decoder is not initialized for GSMFR");
+ return -1;
+ }
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_GSMFR_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderGSMFR, codecInst.pltype,
+ _decoderInstPtr, 8000);
+ SET_GSMFR_FUNCTIONS((codecDef));
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMGSMFR::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::InternalCreateEncoder()
+{
+ if (WebRtcGSMFR_CreateEnc(&_encoderInstPtr) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateEncoder: cannot create instance for GSMFR encoder");
+ return -1;
+ }
+ return 0;
+}
+
+
+void
+ACMGSMFR::DestructEncoderSafe()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcGSMFR_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ _encoderExist = false;
+ _encoderInitialized = false;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::InternalCreateDecoder()
+{
+ if (WebRtcGSMFR_CreateDec(&_decoderInstPtr) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateDecoder: cannot create instance for GSMFR decoder");
+ return -1;
+ }
+ return 0;
+}
+
+
+void
+ACMGSMFR::DestructDecoderSafe()
+{
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcGSMFR_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ _decoderExist = false;
+ _decoderInitialized = false;
+}
+
+
+void
+ACMGSMFR::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ WebRtcGSMFR_FreeEnc((GSMFR_encinst_t_*)ptrInst);
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMGSMFR::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec: payload-type does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ return netEq->RemoveCodec(kDecoderGSMFR);
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_gsmfr.h b/src/modules/audio_coding/main/source/acm_gsmfr.h
new file mode 100644
index 0000000..ead5ea7
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_gsmfr.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_GSMFR_H
+#define ACM_GSMFR_H
+
+#include "acm_generic_codec.h"
+
+namespace webrtc
+{
+
+// forward declaration
+struct GSMFR_encinst_t_;
+struct GSMFR_decinst_t_;
+
+class ACMGSMFR : public ACMGenericCodec
+{
+public:
+ ACMGSMFR(WebRtc_Word16 codecID);
+ ~ACMGSMFR();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 EnableDTX();
+
+ WebRtc_Word16 DisableDTX();
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ GSMFR_encinst_t_* _encoderInstPtr;
+ GSMFR_decinst_t_* _decoderInstPtr;
+};
+
+} // namespace webrtc
+
+#endif // ACM_GSMFR_H
+
diff --git a/src/modules/audio_coding/main/source/acm_ilbc.cc b/src/modules/audio_coding/main/source/acm_ilbc.cc
new file mode 100644
index 0000000..28e69e7
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_ilbc.cc
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_common_defs.h"
+#include "acm_ilbc.h"
+#include "acm_neteq.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+#ifdef WEBRTC_CODEC_ILBC
+ #include "ilbc.h"
+#endif
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_ILBC
+
+ACMILBC::ACMILBC(
+ WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+
+ACMILBC::~ACMILBC()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMILBC::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMILBC::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMILBC::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMILBC::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word32
+ACMILBC::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+
+ACMGenericCodec*
+ACMILBC::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMILBC::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+void
+ACMILBC::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMILBC::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMILBC::DestructDecoderSafe()
+{
+ return;
+}
+
+
+void
+ACMILBC::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+WebRtc_Word16
+ACMILBC::SetBitRateSafe(const WebRtc_Word32 /* rate */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMILBC::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+
+#else //===================== Actual Implementation =======================
+
+
+ACMILBC::ACMILBC(
+ WebRtc_Word16 codecID):
+_encoderInstPtr(NULL),
+_decoderInstPtr(NULL)
+{
+ _codecID = codecID;
+ return;
+}
+
+
+ACMILBC::~ACMILBC()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcIlbcfix_EncoderFree(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcIlbcfix_DecoderFree(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMILBC::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ *bitStreamLenByte = WebRtcIlbcfix_Encode(_encoderInstPtr,
+ &_inAudio[_inAudioIxRead], _frameLenSmpl, (WebRtc_Word16*)bitStream);
+ if (*bitStreamLenByte < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalEncode: error in encode for ILBC");
+ return -1;
+ }
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _frameLenSmpl;
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMILBC::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMILBC::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ // initialize with a correct processing block length
+ if((160 == (codecParams->codecInstant).pacsize) ||
+ (320 == (codecParams->codecInstant).pacsize))
+ {
+ // processing block of 20ms
+ return WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 20);
+ }
+ else if((240 == (codecParams->codecInstant).pacsize) ||
+ (480 == (codecParams->codecInstant).pacsize))
+ {
+ // processing block of 30ms
+ return WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 30);
+ }
+ else
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitEncoder: invalid processing block");
+ return -1;
+ }
+}
+
+
+WebRtc_Word16
+ACMILBC::InternalInitDecoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ // initialize with a correct processing block length
+ if((160 == (codecParams->codecInstant).pacsize) ||
+ (320 == (codecParams->codecInstant).pacsize))
+ {
+ // processing block of 20ms
+ return WebRtcIlbcfix_DecoderInit(_decoderInstPtr, 20);
+ }
+ else if((240 == (codecParams->codecInstant).pacsize) ||
+ (480 == (codecParams->codecInstant).pacsize))
+ {
+ // processing block of 30ms
+ return WebRtcIlbcfix_DecoderInit(_decoderInstPtr, 30);
+ }
+ else
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitDecoder: invalid processing block");
+ return -1;
+ }
+}
+
+
+WebRtc_Word32
+ACMILBC::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CodeDef: decoder not initialized for ILBC");
+ return -1;
+ }
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_ILBC_FUNCTION."
+ // Then return the structure back to NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderILBC, codecInst.pltype,
+ _decoderInstPtr, 8000);
+ SET_ILBC_FUNCTIONS((codecDef));
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMILBC::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMILBC::InternalCreateEncoder()
+{
+ if (WebRtcIlbcfix_EncoderCreate(&_encoderInstPtr) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateEncoder: cannot create instance for ILBC encoder");
+ return -1;
+ }
+ return 0;
+}
+
+
+void
+ACMILBC::DestructEncoderSafe()
+{
+ _encoderInitialized = false;
+ _encoderExist = false;
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcIlbcfix_EncoderFree(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+}
+
+
+WebRtc_Word16
+ACMILBC::InternalCreateDecoder()
+{
+ if (WebRtcIlbcfix_DecoderCreate(&_decoderInstPtr) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateDecoder: cannot create instance for ILBC decoder");
+ return -1;
+ }
+ return 0;
+}
+
+
+void
+ACMILBC::DestructDecoderSafe()
+{
+ _decoderInitialized = false;
+ _decoderExist = false;
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcIlbcfix_DecoderFree(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+}
+
+
+void
+ACMILBC::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ WebRtcIlbcfix_EncoderFree((iLBC_encinst_t_*)ptrInst);
+ }
+ return;
+}
+
+WebRtc_Word16
+ACMILBC::SetBitRateSafe(const WebRtc_Word32 rate)
+{
+ // Check that rate is valid. No need to store the value
+ if (rate == 13300)
+ {
+ WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 30);
+ }
+ else if (rate == 15200)
+ {
+ WebRtcIlbcfix_EncoderInit(_encoderInstPtr, 20);
+ }
+ else
+ {
+ return -1;
+ }
+ _encoderParams.codecInstant.rate = rate;
+
+ return 0;
+}
+
+WebRtc_Word16
+ACMILBC::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec: given payload-type does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ return netEq->RemoveCodec(kDecoderILBC);
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_ilbc.h b/src/modules/audio_coding/main/source/acm_ilbc.h
new file mode 100644
index 0000000..c35c2ce
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_ilbc.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_ILBC_H
+#define ACM_ILBC_H
+
+#include "acm_generic_codec.h"
+
+// forward declaration
+struct iLBC_encinst_t_;
+struct iLBC_decinst_t_;
+
+namespace webrtc
+{
+
+class ACMILBC : public ACMGenericCodec
+{
+public:
+ ACMILBC(WebRtc_Word16 codecID);
+ ~ACMILBC();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+
+ WebRtc_Word16 SetBitRateSafe(
+ const WebRtc_Word32 rate);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ iLBC_encinst_t_* _encoderInstPtr;
+ iLBC_decinst_t_* _decoderInstPtr;
+};
+
+} // namespace webrtc
+
+#endif //ACM_ILBC_H
+
diff --git a/src/modules/audio_coding/main/source/acm_isac.cc b/src/modules/audio_coding/main/source/acm_isac.cc
new file mode 100644
index 0000000..d5ec891
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_isac.cc
@@ -0,0 +1,1233 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_codec_database.h"
+#include "acm_common_defs.h"
+#include "acm_isac.h"
+#include "acm_neteq.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+
+#ifdef WEBRTC_CODEC_ISAC
+ #include "acm_isac_macros.h"
+ #include "isac.h"
+#endif
+
+#ifdef WEBRTC_CODEC_ISACFX
+ #include "acm_isac_macros.h"
+ #include "isacfix.h"
+#endif
+
+namespace webrtc
+{
+
+// we need this otherwise we cannot use forward declaration
+// in the header file
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+struct ACMISACInst
+{
+ ACM_ISAC_STRUCT *inst;
+};
+#endif
+
+#define ISAC_MIN_RATE 10000
+#define ISAC_MAX_RATE 56000
+
+
+// How the scaling is computed. iSAC computes a gain based on the
+// bottleneck. It follows the following expression for that
+//
+// G(BN_kbps) = pow(10, (a + b * BN_kbps + c * BN_kbps * BN_kbps) / 20.0)
+// / 3.4641;
+//
+// Where for 30 ms framelength we have,
+//
+// a = -23; b = 0.48; c = 0;
+//
+// As the default encoder is operating at 32kbps we have the scale as
+//
+// S(BN_kbps) = G(BN_kbps) / G(32);
+
+#define ISAC_NUM_SUPPORTED_RATES 9
+const WebRtc_UWord16 isacSuportedRates[ISAC_NUM_SUPPORTED_RATES] = {
+ 32000, 30000, 26000, 23000, 21000,
+ 19000, 17000, 15000, 12000};
+
+const float isacScale[ISAC_NUM_SUPPORTED_RATES] = {
+ 1.0f, 0.8954f, 0.7178f, 0.6081f, 0.5445f,
+ 0.4875f, 0.4365f, 0.3908f, 0.3311f};
+
+// Tables for bandwidth estimates
+#define NR_ISAC_BANDWIDTHS 24
+const WebRtc_Word32 isacRatesWB[NR_ISAC_BANDWIDTHS] =
+{
+ 10000, 11100, 12300, 13700, 15200, 16900,
+ 18800, 20900, 23300, 25900, 28700, 31900,
+ 10100, 11200, 12400, 13800, 15300, 17000,
+ 18900, 21000, 23400, 26000, 28800, 32000};
+
+
+const WebRtc_Word32 isacRatesSWB[NR_ISAC_BANDWIDTHS] =
+{
+ 10000, 11000, 12400, 13800, 15300, 17000,
+ 18900, 21000, 23200, 25400, 27600, 29800,
+ 32000, 34100, 36300, 38500, 40700, 42900,
+ 45100, 47300, 49500, 51700, 53900, 56000,
+};
+
+#if (!defined(WEBRTC_CODEC_ISAC) && !defined(WEBRTC_CODEC_ISACFX))
+
+ACMISAC::ACMISAC(
+ WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+
+ACMISAC::~ACMISAC()
+{
+ return;
+}
+
+
+ACMGenericCodec*
+ACMISAC::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMISAC::InternalEncode(
+ WebRtc_UWord8* /* bitstream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMISAC::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMISAC::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMISAC::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMISAC::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMISAC::DestructDecoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMISAC::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+void
+ACMISAC::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word32
+ACMISAC::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+
+void
+ACMISAC::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+WebRtc_Word16
+ACMISAC::DeliverCachedIsacData(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */,
+ WebRtc_UWord32* /* timestamp */,
+ WebRtcACMEncodingType* /* encodingType */,
+ const WebRtc_UWord16 /* isacRate */,
+ const WebRtc_UWord8 /* isacBWestimate */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMISAC::Transcode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */,
+ WebRtc_Word16 /* qBWE */,
+ WebRtc_Word32 /* scale */,
+ bool /* isRED */)
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMISAC::SetBitRateSafe(
+ WebRtc_Word32 /* bitRate */)
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMISAC::GetEstimatedBandwidthSafe()
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMISAC::SetEstimatedBandwidthSafe(
+ WebRtc_Word32 /* estimatedBandwidth */)
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMISAC::GetRedPayloadSafe(
+ WebRtc_UWord8* /* redPayload */,
+ WebRtc_Word16* /* payloadBytes */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMISAC::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+bool
+ACMISAC::IsValidDefaultEncoder()
+{
+ return false;
+}
+
+WebRtc_Word16
+ACMISAC::UpdateDecoderSampFreq(
+ WebRtc_Word16 /* codecId */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMISAC::UpdateEncoderSampFreq(
+ WebRtc_UWord16 /* encoderSampFreqHz */)
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMISAC::EncoderSampFreq(
+ WebRtc_UWord16& /* sampFreqHz */)
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMISAC::ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 /* initFrameSizeMsec */,
+ const WebRtc_UWord16 /* initRateBitPerSec */,
+ const bool /* enforceFrameSize */)
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMISAC::SetISACMaxPayloadSize(
+ const WebRtc_UWord16 /* maxPayloadLenBytes */)
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMISAC::SetISACMaxRate(
+ const WebRtc_UWord32 /* maxRateBitPerSec */)
+{
+ return -1;
+}
+
+
+void
+ACMISAC::UpdateFrameLen()
+{
+ return;
+}
+
+void
+ACMISAC::CurrentRate(
+ WebRtc_Word32& /*rateBitPerSec */)
+{
+ return;
+}
+
+bool
+ACMISAC::DecoderParamsSafe(
+ WebRtcACMCodecParams* /* decParams */,
+ const WebRtc_UWord8 /* payloadType */)
+{
+ return false;
+}
+
+void
+ACMISAC::SaveDecoderParamSafe(
+ const WebRtcACMCodecParams* /* codecParams */)
+{
+ return;
+}
+
+WebRtc_Word16
+ACMISAC::REDPayloadISAC(
+ const WebRtc_Word32 /* isacRate */,
+ const WebRtc_Word16 /* isacBwEstimate */,
+ WebRtc_UWord8* /* payload */,
+ WebRtc_Word16* /* payloadLenBytes */)
+{
+ return -1;
+}
+
+
+#else //===================== Actual Implementation =======================
+
+
+
+#ifdef WEBRTC_CODEC_ISACFX
+
+enum IsacSamplingRate
+{
+ kIsacWideband = 16,
+ kIsacSuperWideband = 32
+};
+
+static float
+ACMISACFixTranscodingScale(
+ WebRtc_UWord16 rate)
+{
+ // find the scale for transcoding, the scale is rounded
+ // downward
+ float scale = -1;
+ for(WebRtc_Word16 n=0; n < ISAC_NUM_SUPPORTED_RATES; n++)
+ {
+ if(rate >= isacSuportedRates[n])
+ {
+ scale = isacScale[n];
+ break;
+ }
+ }
+ return scale;
+}
+
+static void
+ACMISACFixGetSendBitrate(
+ ACM_ISAC_STRUCT* inst,
+ WebRtc_Word32* bottleNeck)
+{
+ *bottleNeck = WebRtcIsacfix_GetUplinkBw(inst);
+}
+
+static WebRtc_Word16
+ACMISACFixGetNewBitstream(
+ ACM_ISAC_STRUCT* inst,
+ WebRtc_Word16 BWEIndex,
+ WebRtc_Word16 jitterIndex,
+ WebRtc_Word32 rate,
+ WebRtc_Word16* bitStream,
+ bool isRED)
+{
+ if (isRED)
+ {
+ // RED not supported with iSACFIX
+ return -1;
+ }
+ float scale = ACMISACFixTranscodingScale((WebRtc_UWord16)rate);
+ return WebRtcIsacfix_GetNewBitStream(inst, BWEIndex, scale, bitStream);
+}
+
+
+static WebRtc_Word16
+ACMISACFixGetSendBWE(
+ ACM_ISAC_STRUCT* inst,
+ WebRtc_Word16* rateIndex,
+ WebRtc_Word16* /* dummy */)
+{
+ WebRtc_Word16 localRateIndex;
+ WebRtc_Word16 status = WebRtcIsacfix_GetDownLinkBwIndex(inst, &localRateIndex);
+ if(status < 0)
+ {
+ return -1;
+ }
+ else
+ {
+ *rateIndex = localRateIndex;
+ return 0;
+ }
+}
+
+static WebRtc_Word16
+ACMISACFixControlBWE(
+ ACM_ISAC_STRUCT* inst,
+ WebRtc_Word32 rateBPS,
+ WebRtc_Word16 frameSizeMs,
+ WebRtc_Word16 enforceFrameSize)
+{
+ return WebRtcIsacfix_ControlBwe(inst, (WebRtc_Word16)rateBPS,
+ frameSizeMs, enforceFrameSize);
+}
+
+static WebRtc_Word16
+ACMISACFixControl(
+ ACM_ISAC_STRUCT* inst,
+ WebRtc_Word32 rateBPS,
+ WebRtc_Word16 frameSizeMs)
+{
+ return WebRtcIsacfix_Control(inst, (WebRtc_Word16)rateBPS,
+ frameSizeMs);
+}
+
+static IsacSamplingRate
+ACMISACFixGetEncSampRate(
+ ACM_ISAC_STRUCT* /* inst */)
+{
+ return kIsacWideband;
+}
+
+
+static IsacSamplingRate
+ACMISACFixGetDecSampRate(
+ ACM_ISAC_STRUCT* /* inst */)
+{
+ return kIsacWideband;
+}
+
+#endif
+
+
+
+
+
+
+ACMISAC::ACMISAC(
+ WebRtc_Word16 codecID):
+_codecInstPtr(NULL)
+{
+ _codecInstPtr = new ACMISACInst;
+ if (_codecInstPtr == NULL)
+ {
+ return;
+ }
+ _codecInstPtr->inst = NULL;
+ _codecID = codecID;
+ _enforceFrameSize = false;
+ // by default a 16 kHz iSAC is created.
+ _samplesIn10MsAudio = 160;
+
+ // Initialize values that can be used uninitialized otherwise
+ _decoderParams.codecInstant.pltype = -1;
+ _decoderParams32kHz.codecInstant.pltype = -1;
+}
+
+
+ACMISAC::~ACMISAC()
+{
+ if (_codecInstPtr != NULL)
+ {
+ if(_codecInstPtr->inst != NULL)
+ {
+ ACM_ISAC_FREE(_codecInstPtr->inst);
+ _codecInstPtr->inst = NULL;
+ }
+ delete _codecInstPtr;
+ _codecInstPtr = NULL;
+ }
+ return;
+}
+
+
+ACMGenericCodec*
+ACMISAC::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMISAC::InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ // ISAC takes 10ms audio everytime we call encoder, therefor,
+ // it should be treated like codecs with 'basic coding block'
+ // non-zero, and the following 'while-loop' should not be necessary.
+ // However, due to a mistake in the codec the frame-size might change
+ // at the first 10ms pushed in to iSAC if the bit-rate is low, this is
+ // sort of a bug in iSAC. to address this we treat iSAC as the
+ // following.
+
+ if (_codecInstPtr == NULL)
+ {
+ return -1;
+ }
+ *bitStreamLenByte = 0;
+ while((*bitStreamLenByte == 0) && (_inAudioIxRead < _frameLenSmpl))
+ {
+ if(_inAudioIxRead > _inAudioIxWrite)
+ {
+ // something is wrong.
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "The actual fram-size of iSAC appears to be larger that expected. All audio \
+pushed in but no bit-stream is generated.");
+ return -1;
+ }
+ *bitStreamLenByte = ACM_ISAC_ENCODE(_codecInstPtr->inst,
+ &_inAudio[_inAudioIxRead], (WebRtc_Word16*)bitstream);
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _samplesIn10MsAudio;
+ }
+ if(*bitStreamLenByte == 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _uniqueID,
+ "ISAC Has encoded the whole frame but no bit-stream is generated.");
+ }
+
+ // a packet is generated iSAC, is set in adaptive mode may change
+ // the frame length and we like to update the bottleneck value as
+ // well, although updating bottleneck is not crucial
+ if((*bitStreamLenByte > 0) && (_isacCodingMode == ADAPTIVE))
+ {
+ //_frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
+ ACM_ISAC_GETSENDBITRATE(_codecInstPtr->inst, &_isacCurrentBN);
+ }
+ UpdateFrameLen();
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMISAC::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMISAC::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ // if rate is set to -1 then iSAC has to be in adaptive mode
+ if(codecParams->codecInstant.rate == -1)
+ {
+ _isacCodingMode = ADAPTIVE;
+ }
+
+ // sanity check that rate is in acceptable range
+ else if((codecParams->codecInstant.rate >= ISAC_MIN_RATE) &&
+ (codecParams->codecInstant.rate <= ISAC_MAX_RATE))
+ {
+ _isacCodingMode = CHANNEL_INDEPENDENT;
+ _isacCurrentBN = codecParams->codecInstant.rate;
+ }
+ else
+ {
+ return -1;
+ }
+
+ // we need to set the encoder sampling frequency.
+ if(UpdateEncoderSampFreq((WebRtc_UWord16)codecParams->codecInstant.plfreq) < 0)
+ {
+ return -1;
+ }
+ if(ACM_ISAC_ENCODERINIT(_codecInstPtr->inst, _isacCodingMode) < 0)
+ {
+ return -1;
+ }
+
+ // apply the frame-size and rate if operating in
+ // channel-independent mode
+ if(_isacCodingMode == CHANNEL_INDEPENDENT)
+ {
+ if(ACM_ISAC_CONTROL(_codecInstPtr->inst,
+ codecParams->codecInstant.rate,
+ codecParams->codecInstant.pacsize /
+ (codecParams->codecInstant.plfreq / 1000)) < 0)
+ {
+ return -1;
+ }
+ }
+ else
+ {
+ // We need this for adaptive case and has to be called
+ // after initialization
+ ACM_ISAC_GETSENDBITRATE(
+ _codecInstPtr->inst, &_isacCurrentBN);
+ }
+ _frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
+ return 0;
+}
+
+WebRtc_Word16
+ACMISAC::InternalInitDecoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ if (_codecInstPtr == NULL)
+ {
+ return -1;
+ }
+
+ // set decoder sampling frequency.
+ if(codecParams->codecInstant.plfreq == 32000)
+ {
+ UpdateDecoderSampFreq(ACMCodecDB::isacswb);
+ }
+ else
+ {
+ UpdateDecoderSampFreq(ACMCodecDB::isac);
+ }
+
+ // in a one-way communication we may never register send-codec.
+ // However we like that the BWE to work properly so it has to
+ // be initialized. The BWE is initialized when iSAC encoder is initialized.
+ // Therefore, we need this.
+ if(!_encoderInitialized)
+ {
+ // Since we don't require a valid rate or a valid packet size when initializing
+ // the decoder, we set valid values before initializing encoder
+ codecParams->codecInstant.rate = ISACWB_DEFAULT_RATE;
+ codecParams->codecInstant.pacsize = ISACSWB_PAC_SIZE;
+ if(InternalInitEncoder(codecParams) < 0)
+ {
+ return -1;
+ }
+ _encoderInitialized = true;
+ }
+
+ return ACM_ISAC_DECODERINIT(_codecInstPtr->inst);
+}
+
+
+WebRtc_Word16
+ACMISAC::InternalCreateDecoder()
+{
+ if (_codecInstPtr == NULL)
+ {
+ return -1;
+ }
+ WebRtc_Word16 status = ACM_ISAC_CREATE (&(_codecInstPtr->inst));
+
+ // specific to codecs with one instance for encoding and decoding
+ _encoderInitialized = false;
+ if(status < 0)
+ {
+ _encoderExist = false;
+ }
+ else
+ {
+ _encoderExist = true;
+ }
+ return status;
+}
+
+
+void
+ACMISAC::DestructDecoderSafe()
+{
+ // codec with shared instance cannot delete.
+ _decoderInitialized = false;
+ return;
+}
+
+
+WebRtc_Word16
+ACMISAC::InternalCreateEncoder()
+{
+ if (_codecInstPtr == NULL)
+ {
+ return -1;
+ }
+ WebRtc_Word16 status = ACM_ISAC_CREATE(&(_codecInstPtr->inst));
+
+ // specific to codecs with one instance for encoding and decoding
+ _decoderInitialized = false;
+ if(status < 0)
+ {
+ _decoderExist = false;
+ }
+ else
+ {
+ _decoderExist = true;
+ }
+ return status;
+}
+
+
+void
+ACMISAC::DestructEncoderSafe()
+{
+ // codec with shared instance cannot delete.
+ _encoderInitialized = false;
+ return;
+}
+
+
+WebRtc_Word32
+ACMISAC::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ // Sanity checks
+ if (_codecInstPtr == NULL)
+ {
+ return -1;
+ }
+ if (!_decoderInitialized || !_decoderExist)
+ {
+ // Todo:
+ // log error
+ return -1;
+ }
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_ISAC_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ if(codecInst.plfreq == 16000)
+ {
+ SET_CODEC_PAR((codecDef), kDecoderISAC, codecInst.pltype,
+ _codecInstPtr->inst, 16000);
+#ifdef WEBRTC_CODEC_ISAC
+ SET_ISAC_FUNCTIONS((codecDef));
+#else
+ SET_ISACfix_FUNCTIONS((codecDef));
+#endif
+ }
+ else
+ {
+#ifdef WEBRTC_CODEC_ISAC
+ SET_CODEC_PAR((codecDef), kDecoderISACswb, codecInst.pltype,
+ _codecInstPtr->inst, 32000);
+ SET_ISACSWB_FUNCTIONS((codecDef));
+#else
+ return -1;
+#endif
+ }
+
+ return 0;
+}
+
+
+void
+ACMISAC::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ ACM_ISAC_FREE((ACM_ISAC_STRUCT *)ptrInst);
+ }
+ return;
+}
+
+WebRtc_Word16
+ACMISAC::Transcode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_Word16 qBWE,
+ WebRtc_Word32 rate,
+ bool isRED)
+{
+ WebRtc_Word16 jitterInfo = 0;
+ // transcode from a higher rate to lower rate
+ // sanity check
+ if (_codecInstPtr == NULL)
+ {
+ return -1;
+ }
+
+ *bitStreamLenByte = ACM_ISAC_GETNEWBITSTREAM(_codecInstPtr->inst,
+ qBWE, jitterInfo, rate, (WebRtc_Word16*)bitStream, (isRED)? 1:0);
+
+ if(*bitStreamLenByte < 0)
+ {
+ // error happened
+ *bitStreamLenByte = 0;
+ return -1;
+ }
+ else
+ {
+ return *bitStreamLenByte;
+ }
+}
+
+WebRtc_Word16
+ACMISAC::SetBitRateSafe(
+ WebRtc_Word32 bitRate)
+{
+ if (_codecInstPtr == NULL)
+ {
+ return -1;
+ }
+ WebRtc_UWord16 encoderSampFreq;
+ EncoderSampFreq(encoderSampFreq);
+ bool reinit = false;
+ // change the BN of iSAC
+ if(bitRate == -1)
+ {
+ // ADAPTIVE MODE
+ // Check if it was already in adaptive mode
+ if(_isacCodingMode != ADAPTIVE)
+ {
+ // was not in adaptive, then set the mode to adaptive
+ // and flag for re-initialization
+ _isacCodingMode = ADAPTIVE;
+ reinit = true;
+ }
+ }
+ // Sanity check if the rate valid
+ else if((bitRate >= ISAC_MIN_RATE) &&
+ (bitRate <= ISAC_MAX_RATE))
+ {
+ //check if it was in channel-independent mode before
+ if(_isacCodingMode != CHANNEL_INDEPENDENT)
+ {
+ // was not in channel independent, set the mode to
+ // channel-independent and flag for re-initialization
+ _isacCodingMode = CHANNEL_INDEPENDENT;
+ reinit = true;
+ }
+ // store the bottleneck
+ _isacCurrentBN = (WebRtc_UWord16)bitRate;
+ }
+ else
+ {
+ // invlaid rate
+ return -1;
+ }
+
+ WebRtc_Word16 status = 0;
+ if(reinit)
+ {
+ // initialize and check if it is successful
+ if(ACM_ISAC_ENCODERINIT(_codecInstPtr->inst, _isacCodingMode) < 0)
+ {
+ // failed initialization
+ return -1;
+ }
+ }
+ if(_isacCodingMode == CHANNEL_INDEPENDENT)
+ {
+
+ status = ACM_ISAC_CONTROL(_codecInstPtr->inst, _isacCurrentBN,
+ (encoderSampFreq == 32000)? 30:(_frameLenSmpl / 16));
+ if(status < 0)
+ {
+ status = -1;
+ }
+ }
+
+ // Update encoder parameters
+ _encoderParams.codecInstant.rate = bitRate;
+
+ UpdateFrameLen();
+ return status;
+}
+
+
+WebRtc_Word32
+ACMISAC::GetEstimatedBandwidthSafe()
+{
+ WebRtc_Word16 bandwidthIndex;
+ WebRtc_Word16 delayIndex;
+ IsacSamplingRate sampRate;
+
+ // Get bandwidth information
+ ACM_ISAC_GETSENDBWE(_codecInstPtr->inst, &bandwidthIndex, &delayIndex);
+
+ // Validy check of index
+ if ((bandwidthIndex < 0) || (bandwidthIndex > NR_ISAC_BANDWIDTHS))
+ {
+ return -1;
+ }
+
+ // Check sample frequency
+ sampRate = ACM_ISAC_GETDECSAMPRATE(_codecInstPtr->inst);
+ if(sampRate == kIsacWideband)
+ {
+ return isacRatesWB[bandwidthIndex];
+ }
+ else
+ {
+ return isacRatesSWB[bandwidthIndex];
+ }
+}
+
+WebRtc_Word32
+ACMISAC::SetEstimatedBandwidthSafe(
+ WebRtc_Word32 estimatedBandwidth)
+{
+ IsacSamplingRate sampRate;
+ WebRtc_Word16 bandwidthIndex;
+
+ // Check sample frequency and choose appropriate table
+ sampRate = ACM_ISAC_GETENCSAMPRATE(_codecInstPtr->inst);
+
+ if(sampRate == kIsacWideband)
+ {
+ // Search through the WB rate table to find the index
+
+ bandwidthIndex = NR_ISAC_BANDWIDTHS/2 - 1;
+ for (int i=0; i<(NR_ISAC_BANDWIDTHS/2); i++)
+ {
+ if (estimatedBandwidth == isacRatesWB[i])
+ {
+ bandwidthIndex = i;
+ break;
+ } else if (estimatedBandwidth == isacRatesWB[i+NR_ISAC_BANDWIDTHS/2])
+ {
+ bandwidthIndex = i + NR_ISAC_BANDWIDTHS/2;
+ break;
+ } else if (estimatedBandwidth < isacRatesWB[i])
+ {
+ bandwidthIndex = i;
+ break;
+ }
+ }
+ }
+ else
+ {
+ // Search through the SWB rate table to find the index
+ bandwidthIndex = NR_ISAC_BANDWIDTHS - 1;
+ for (int i=0; i<NR_ISAC_BANDWIDTHS; i++)
+ {
+ if(estimatedBandwidth <= isacRatesSWB[i])
+ {
+ bandwidthIndex = i;
+ break;
+ }
+ }
+ }
+
+ // Set iSAC Bandwidth Estimate
+ ACM_ISAC_SETBWE(_codecInstPtr->inst, bandwidthIndex);
+
+ return 0;
+}
+
+WebRtc_Word32
+ACMISAC::GetRedPayloadSafe(
+#if (!defined(WEBRTC_CODEC_ISAC))
+ WebRtc_UWord8* /* redPayload */,
+ WebRtc_Word16* /* payloadBytes */)
+{
+ return -1;
+#else
+ WebRtc_UWord8* redPayload,
+ WebRtc_Word16* payloadBytes)
+{
+
+ WebRtc_Word16 bytes = WebRtcIsac_GetRedPayload(_codecInstPtr->inst, (WebRtc_Word16*)redPayload);
+ if (bytes < 0)
+ {
+ return -1;
+ }
+ *payloadBytes = bytes;
+ return 0;
+#endif
+}
+
+
+WebRtc_Word16
+ACMISAC::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType == _decoderParams.codecInstant.pltype)
+ {
+ return netEq->RemoveCodec(kDecoderISAC);
+ }
+ else if(payloadType == _decoderParams32kHz.codecInstant.pltype)
+ {
+ return netEq->RemoveCodec(kDecoderISACswb);
+ }
+ else
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type %d or %d",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype,
+ _decoderParams32kHz.codecInstant.pltype);
+
+ return -1;
+ }
+}
+
+
+WebRtc_Word16
+ACMISAC::UpdateDecoderSampFreq(
+#ifdef WEBRTC_CODEC_ISAC
+ WebRtc_Word16 codecId)
+{
+ if(ACMCodecDB::isac == codecId)
+ {
+ return WebRtcIsac_SetDecSampRate(_codecInstPtr->inst, kIsacWideband);
+ }
+ else if(ACMCodecDB::isacswb == codecId)
+ {
+ return WebRtcIsac_SetDecSampRate(_codecInstPtr->inst, kIsacSuperWideband);
+ }
+ else
+ {
+ return -1;
+ }
+
+#else
+ WebRtc_Word16 /* codecId */)
+{
+ return 0;
+#endif
+}
+
+
+WebRtc_Word16
+ACMISAC::UpdateEncoderSampFreq(
+#ifdef WEBRTC_CODEC_ISAC
+ WebRtc_UWord16 encoderSampFreqHz)
+{
+ WebRtc_UWord16 currentSampRateHz;
+ EncoderSampFreq(currentSampRateHz);
+
+ if(currentSampRateHz != encoderSampFreqHz)
+ {
+ if((encoderSampFreqHz != 16000) && (encoderSampFreqHz != 32000))
+ {
+ return -1;
+ }
+ else
+ {
+ _inAudioIxRead = 0;
+ _inAudioIxWrite = 0;
+ _inTimestampIxWrite = 0;
+ if(encoderSampFreqHz == 16000)
+ {
+ if(WebRtcIsac_SetEncSampRate(_codecInstPtr->inst, kIsacWideband) < 0)
+ {
+ return -1;
+ }
+ _samplesIn10MsAudio = 160;
+ }
+ else
+ {
+
+ if(WebRtcIsac_SetEncSampRate(_codecInstPtr->inst, kIsacSuperWideband) < 0)
+ {
+ return -1;
+ }
+ _samplesIn10MsAudio = 320;
+ }
+ _frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
+ _encoderParams.codecInstant.pacsize = _frameLenSmpl;
+ _encoderParams.codecInstant.plfreq = encoderSampFreqHz;
+ return 0;
+ }
+ }
+#else
+ WebRtc_UWord16 /* codecId */)
+{
+#endif
+ return 0;
+}
+
+WebRtc_Word16
+ACMISAC::EncoderSampFreq(
+ WebRtc_UWord16& sampFreqHz)
+{
+ IsacSamplingRate sampRate;
+ sampRate = ACM_ISAC_GETENCSAMPRATE(_codecInstPtr->inst);
+ if(sampRate == kIsacSuperWideband)
+ {
+ sampFreqHz = 32000;
+ }
+ else
+ {
+ sampFreqHz = 16000;
+ }
+ return 0;
+}
+
+WebRtc_Word32
+ACMISAC::ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 initFrameSizeMsec,
+ const WebRtc_UWord16 initRateBitPerSec,
+ const bool enforceFrameSize)
+{
+ WebRtc_Word16 status;
+ {
+ WebRtc_UWord16 sampFreqHz;
+ EncoderSampFreq(sampFreqHz);
+ // @TODO: at 32kHz we hardcode calling with 30ms and enforce
+ // the frame-size otherwise we might get error. Revise if
+ // control-bwe is changed.
+ if(sampFreqHz == 32000)
+ {
+ status = ACM_ISAC_CONTROL_BWE(_codecInstPtr->inst,
+ initRateBitPerSec, 30, 1);
+ }
+ else
+ {
+ status = ACM_ISAC_CONTROL_BWE(_codecInstPtr->inst,
+ initRateBitPerSec, initFrameSizeMsec, enforceFrameSize? 1:0);
+ }
+ }
+ if(status < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Coutn't config iSAC BWE.");
+ return -1;
+ }
+ UpdateFrameLen();
+ ACM_ISAC_GETSENDBITRATE(_codecInstPtr->inst, &_isacCurrentBN);
+ return 0;
+}
+
+WebRtc_Word32
+ACMISAC::SetISACMaxPayloadSize(
+ const WebRtc_UWord16 maxPayloadLenBytes)
+{
+ return ACM_ISAC_SETMAXPAYLOADSIZE(_codecInstPtr->inst, maxPayloadLenBytes);
+}
+
+WebRtc_Word32
+ACMISAC::SetISACMaxRate(
+ const WebRtc_UWord32 maxRateBitPerSec)
+{
+ return ACM_ISAC_SETMAXRATE(_codecInstPtr->inst, maxRateBitPerSec);
+}
+
+
+void
+ACMISAC::UpdateFrameLen()
+{
+ _frameLenSmpl = ACM_ISAC_GETNEWFRAMELEN(_codecInstPtr->inst);
+ _encoderParams.codecInstant.pacsize = _frameLenSmpl;
+}
+
+void
+ACMISAC::CurrentRate(WebRtc_Word32& rateBitPerSec)
+{
+ if(_isacCodingMode == ADAPTIVE)
+ {
+ ACM_ISAC_GETSENDBITRATE(_codecInstPtr->inst, &rateBitPerSec);
+ }
+}
+
+
+bool
+ACMISAC::DecoderParamsSafe(
+ WebRtcACMCodecParams* decParams,
+ const WebRtc_UWord8 payloadType)
+{
+ if(_decoderInitialized)
+ {
+ if(payloadType == _decoderParams.codecInstant.pltype)
+ {
+ memcpy(decParams, &_decoderParams, sizeof(WebRtcACMCodecParams));
+ return true;
+ }
+ if(payloadType == _decoderParams32kHz.codecInstant.pltype)
+ {
+ memcpy(decParams, &_decoderParams32kHz,
+ sizeof(WebRtcACMCodecParams));
+ return true;
+ }
+ }
+ return false;
+}
+
+void
+ACMISAC::SaveDecoderParamSafe(
+ const WebRtcACMCodecParams* codecParams)
+{
+ // set decoder sampling frequency.
+ if(codecParams->codecInstant.plfreq == 32000)
+ {
+ memcpy(&_decoderParams32kHz, codecParams, sizeof(WebRtcACMCodecParams));
+ }
+ else
+ {
+ memcpy(&_decoderParams, codecParams, sizeof(WebRtcACMCodecParams));
+ }
+}
+
+
+WebRtc_Word16
+ACMISAC::REDPayloadISAC(
+ const WebRtc_Word32 isacRate,
+ const WebRtc_Word16 isacBwEstimate,
+ WebRtc_UWord8* payload,
+ WebRtc_Word16* payloadLenBytes)
+{
+ WebRtc_Word16 status;
+ ReadLockScoped rl(_codecWrapperLock);
+ status = Transcode(payload, payloadLenBytes, isacBwEstimate, isacRate, true);
+ return status;
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_isac.h b/src/modules/audio_coding/main/source/acm_isac.h
new file mode 100644
index 0000000..681c1a4
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_isac.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_ISAC_H
+#define ACM_ISAC_H
+
+#include "acm_generic_codec.h"
+
+namespace webrtc
+{
+
+struct ACMISACInst;
+
+enum iSACCodingMode {ADAPTIVE, CHANNEL_INDEPENDENT};
+
+
+class ACMISAC : public ACMGenericCodec
+{
+public:
+ ACMISAC(WebRtc_Word16 codecID);
+ ~ACMISAC();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 DeliverCachedIsacData(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_UWord32* timestamp,
+ WebRtcACMEncodingType* encodingType,
+ const WebRtc_UWord16 isacRate,
+ const WebRtc_UWord8 isacBWestimate);
+
+ WebRtc_Word16 DeliverCachedData(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */,
+ WebRtc_UWord32* /* timestamp */,
+ WebRtcACMEncodingType* /* encodingType */)
+ {
+ return -1;
+ }
+
+ WebRtc_Word16 UpdateDecoderSampFreq(
+ WebRtc_Word16 codecId);
+
+ WebRtc_Word16 UpdateEncoderSampFreq(
+ WebRtc_UWord16 sampFreqHz);
+
+ WebRtc_Word16 EncoderSampFreq(
+ WebRtc_UWord16& sampFreqHz);
+
+ WebRtc_Word32 ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 initFrameSizeMsec,
+ const WebRtc_UWord16 initRateBitPerSec,
+ const bool enforceFrameSize);
+
+ WebRtc_Word32 SetISACMaxPayloadSize(
+ const WebRtc_UWord16 maxPayloadLenBytes);
+
+ WebRtc_Word32 SetISACMaxRate(
+ const WebRtc_UWord32 maxRateBitPerSec);
+
+ WebRtc_Word16 REDPayloadISAC(
+ const WebRtc_Word32 isacRate,
+ const WebRtc_Word16 isacBwEstimate,
+ WebRtc_UWord8* payload,
+ WebRtc_Word16* payloadLenBytes);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 SetBitRateSafe(
+ const WebRtc_Word32 bitRate);
+
+ WebRtc_Word32 GetEstimatedBandwidthSafe();
+
+ WebRtc_Word32 SetEstimatedBandwidthSafe(WebRtc_Word32 estimatedBandwidth);
+
+ WebRtc_Word32 GetRedPayloadSafe(
+ WebRtc_UWord8* redPayload,
+ WebRtc_Word16* payloadBytes);
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 Transcode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte,
+ WebRtc_Word16 qBWE,
+ WebRtc_Word32 rate,
+ bool isRED);
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ void CurrentRate(WebRtc_Word32& rateBitPerSec);
+
+ void UpdateFrameLen();
+
+ bool DecoderParamsSafe(
+ WebRtcACMCodecParams *decParams,
+ const WebRtc_UWord8 payloadType);
+
+ void SaveDecoderParamSafe(
+ const WebRtcACMCodecParams* codecParams);
+
+ ACMISACInst* _codecInstPtr;
+
+ bool _isEncInitialized;
+ iSACCodingMode _isacCodingMode;
+ bool _enforceFrameSize;
+ WebRtc_Word32 _isacCurrentBN;
+ WebRtc_UWord16 _samplesIn10MsAudio;
+ WebRtcACMCodecParams _decoderParams32kHz;
+};
+
+} //namespace
+
+#endif // ACM_ISAC_H
diff --git a/src/modules/audio_coding/main/source/acm_isac_macros.h b/src/modules/audio_coding/main/source/acm_isac_macros.h
new file mode 100644
index 0000000..030164c
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_isac_macros.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_ISAC_MACROS_H
+#define ACM_ISAC_MACROS_H
+
+#include "engine_configurations.h"
+
+namespace webrtc
+{
+
+#ifdef WEBRTC_CODEC_ISAC
+# define ACM_ISAC_CREATE WebRtcIsac_Create
+# define ACM_ISAC_FREE WebRtcIsac_Free
+# define ACM_ISAC_ENCODERINIT WebRtcIsac_EncoderInit
+# define ACM_ISAC_ENCODE WebRtcIsac_Encode
+# define ACM_ISAC_DECODERINIT WebRtcIsac_DecoderInit
+# define ACM_ISAC_DECODE_BWE WebRtcIsac_UpdateBwEstimate
+# define ACM_ISAC_DECODE_B WebRtcIsac_Decode
+# define ACM_ISAC_DECODEPLC WebRtcIsac_DecodePlc
+# define ACM_ISAC_CONTROL WebRtcIsac_Control
+# define ACM_ISAC_CONTROL_BWE WebRtcIsac_ControlBwe
+# define ACM_ISAC_GETFRAMELEN WebRtcIsac_ReadFrameLen
+# define ACM_ISAC_VERSION WebRtcIsac_version
+# define ACM_ISAC_GETERRORCODE WebRtcIsac_GetErrorCode
+# define ACM_ISAC_GETSENDBITRATE WebRtcIsac_GetUplinkBw
+# define ACM_ISAC_SETMAXPAYLOADSIZE WebRtcIsac_SetMaxPayloadSize
+# define ACM_ISAC_SETMAXRATE WebRtcIsac_SetMaxRate
+# define ACM_ISAC_GETNEWBITSTREAM WebRtcIsac_GetNewBitStream
+# define ACM_ISAC_GETSENDBWE WebRtcIsac_GetDownLinkBwIndex
+# define ACM_ISAC_SETBWE WebRtcIsac_UpdateUplinkBw
+# define ACM_ISAC_GETBWE WebRtcIsac_ReadBwIndex
+# define ACM_ISAC_GETNEWFRAMELEN WebRtcIsac_GetNewFrameLen
+# define ACM_ISAC_STRUCT ISACStruct
+# define ACM_ISAC_GETENCSAMPRATE WebRtcIsac_EncSampRate
+# define ACM_ISAC_GETDECSAMPRATE WebRtcIsac_DecSampRate
+#endif
+
+#ifdef WEBRTC_CODEC_ISACFX
+# define ACM_ISAC_CREATE WebRtcIsacfix_Create
+# define ACM_ISAC_FREE WebRtcIsacfix_Free
+# define ACM_ISAC_ENCODERINIT WebRtcIsacfix_EncoderInit
+# define ACM_ISAC_ENCODE WebRtcIsacfix_Encode
+# define ACM_ISAC_DECODERINIT WebRtcIsacfix_DecoderInit
+# define ACM_ISAC_DECODE_BWE WebRtcIsacfix_UpdateBwEstimate
+# define ACM_ISAC_DECODE_B WebRtcIsacfix_Decode
+# define ACM_ISAC_DECODEPLC WebRtcIsacfix_DecodePlc
+# define ACM_ISAC_CONTROL ACMISACFixControl // local Impl
+# define ACM_ISAC_CONTROL_BWE ACMISACFixControlBWE // local Impl
+# define ACM_ISAC_GETFRAMELEN WebRtcIsacfix_ReadFrameLen
+# define ACM_ISAC_VERSION WebRtcIsacfix_version
+# define ACM_ISAC_GETERRORCODE WebRtcIsacfix_GetErrorCode
+# define ACM_ISAC_GETSENDBITRATE ACMISACFixGetSendBitrate // local Impl
+# define ACM_ISAC_SETMAXPAYLOADSIZE WebRtcIsacfix_SetMaxPayloadSize
+# define ACM_ISAC_SETMAXRATE WebRtcIsacfix_SetMaxRate
+# define ACM_ISAC_GETNEWBITSTREAM ACMISACFixGetNewBitstream // local Impl
+# define ACM_ISAC_GETSENDBWE ACMISACFixGetSendBWE // local Impl
+# define ACM_ISAC_SETBWE WebRtcIsacfix_UpdateUplinkBw
+# define ACM_ISAC_GETBWE WebRtcIsacfix_ReadBwIndex
+# define ACM_ISAC_GETNEWFRAMELEN WebRtcIsacfix_GetNewFrameLen
+# define ACM_ISAC_STRUCT ISACFIX_MainStruct
+# define ACM_ISAC_GETENCSAMPRATE ACMISACFixGetEncSampRate // local Impl
+# define ACM_ISAC_GETDECSAMPRATE ACMISACFixGetDecSampRate // local Impl
+#endif
+
+} //namespace
+
+#endif // ACM_ISAC_MACROS_H
+
diff --git a/src/modules/audio_coding/main/source/acm_neteq.cc b/src/modules/audio_coding/main/source/acm_neteq.cc
new file mode 100644
index 0000000..d829db1
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_neteq.cc
@@ -0,0 +1,1370 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// 'conversion' conversion from 'type1' to 'type2', possible loss of data
+#pragma warning(disable: 4267)
+
+#include <stdlib.h> // malloc
+
+#include "acm_neteq.h"
+#include "common_types.h"
+#include "critical_section_wrapper.h"
+#include "rw_lock_wrapper.h"
+#include "signal_processing_library.h"
+#include "tick_util.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_internal.h"
+
+namespace webrtc
+{
+
+#define RTP_HEADER_SIZE 12
+#define NETEQ_INIT_FREQ 8000
+#define NETEQ_INIT_FREQ_KHZ (NETEQ_INIT_FREQ/1000)
+#define NETEQ_ERR_MSG_LEN_BYTE (WEBRTC_NETEQ_MAX_ERROR_NAME + 1)
+
+
+ACMNetEQ::ACMNetEQ()
+:
+_id(0),
+_currentSampFreqKHz(NETEQ_INIT_FREQ_KHZ),
+_avtPlayout(false),
+_playoutMode(voice),
+_netEqCritSect(CriticalSectionWrapper::CreateCriticalSection()),
+_vadStatus(false),
+_vadMode(VADNormal),
+_decodeLock(RWLockWrapper::CreateRWLock()),
+_numSlaves(0),
+_receivedStereo(false),
+_masterSlaveInfo(NULL),
+_previousAudioActivity(AudioFrame::kVadUnknown),
+_callbackCritSect(CriticalSectionWrapper::CreateCriticalSection())
+{
+ for(int n = 0; n < MAX_NUM_SLAVE_NETEQ + 1; n++)
+ {
+ _isInitialized[n] = false;
+ _ptrVADInst[n] = NULL;
+ _inst[n] = NULL;
+ _instMem[n] = NULL;
+ _netEqPacketBuffer[n] = NULL;
+ }
+}
+
+ACMNetEQ::~ACMNetEQ()
+{
+ {
+ CriticalSectionScoped lock(*_netEqCritSect);
+ for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
+ {
+ if (_instMem[idx] != NULL)
+ {
+ free(_instMem[idx]);
+ _instMem[idx] = NULL;
+ }
+ if (_netEqPacketBuffer[idx] != NULL)
+ {
+ free(_netEqPacketBuffer[idx]);
+ _netEqPacketBuffer[idx] = NULL;
+ }
+ if(_ptrVADInst[idx] != NULL)
+ {
+ WebRtcVad_Free(_ptrVADInst[idx]);
+ _ptrVADInst[idx] = NULL;
+ }
+ }
+ if(_masterSlaveInfo != NULL)
+ {
+ free(_masterSlaveInfo);
+ _masterSlaveInfo = NULL;
+ }
+ }
+ if(_netEqCritSect != NULL)
+ {
+ delete _netEqCritSect;
+ }
+
+ if(_decodeLock != NULL)
+ {
+ delete _decodeLock;
+ }
+
+ if(_callbackCritSect != NULL)
+ {
+ delete _callbackCritSect;
+ }
+}
+
+WebRtc_Word32
+ACMNetEQ::Init()
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+
+ for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
+ {
+ if(InitByIdxSafe(idx) < 0)
+ {
+ return -1;
+ }
+ // delete VAD instance and start fresh if required.
+ if(_ptrVADInst[idx] != NULL)
+ {
+ WebRtcVad_Free(_ptrVADInst[idx]);
+ _ptrVADInst[idx] = NULL;
+ }
+ if(_vadStatus)
+ {
+ // Has to enable VAD
+ if(EnableVADByIdxSafe(idx) < 0)
+ {
+ // Failed to enable VAD.
+ // Delete VAD instance, if it is created
+ if(_ptrVADInst[idx] != NULL)
+ {
+ WebRtcVad_Free(_ptrVADInst[idx]);
+ _ptrVADInst[idx] = NULL;
+ }
+ // We are at initialization of NetEq, if failed to
+ // enable VAD, we delete the NetEq instance.
+ if (_instMem[idx] != NULL) {
+ free(_instMem[idx]);
+ _instMem[idx] = NULL;
+ _inst[idx] = NULL;
+ }
+ _isInitialized[idx] = false;
+ return -1;
+ }
+ }
+ _isInitialized[idx] = true;
+ }
+ return 0;
+}
+
+WebRtc_Word16
+ACMNetEQ::InitByIdxSafe(
+ const WebRtc_Word16 idx)
+{
+ int memorySizeBytes;
+ if (WebRtcNetEQ_AssignSize(&memorySizeBytes) != 0)
+ {
+ LogError("AssignSize", idx);
+ return -1;
+ }
+
+ if(_instMem[idx] != NULL)
+ {
+ free(_instMem[idx]);
+ _instMem[idx] = NULL;
+ }
+ _instMem[idx] = malloc(memorySizeBytes);
+ if (_instMem[idx] == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "InitByIdxSafe: NetEq Initialization error: could not allocate memory for NetEq");
+ _isInitialized[idx] = false;
+ return -1;
+ }
+ if (WebRtcNetEQ_Assign(&_inst[idx], _instMem[idx]) != 0)
+ {
+ if (_instMem[idx] != NULL) {
+ free(_instMem[idx]);
+ _instMem[idx] = NULL;
+ }
+ LogError("Assign", idx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "InitByIdxSafe: NetEq Initialization error: could not Assign");
+ _isInitialized[idx] = false;
+ return -1;
+ }
+ if (WebRtcNetEQ_Init(_inst[idx], NETEQ_INIT_FREQ) != 0)
+ {
+ if (_instMem[idx] != NULL) {
+ free(_instMem[idx]);
+ _instMem[idx] = NULL;
+ }
+ LogError("Init", idx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "InitByIdxSafe: NetEq Initialization error: could not initialize NetEq");
+ _isInitialized[idx] = false;
+ return -1;
+ }
+ _isInitialized[idx] = true;
+ return 0;
+}
+
+WebRtc_Word16
+ACMNetEQ::EnableVADByIdxSafe(
+ const WebRtc_Word16 idx)
+{
+ if(_ptrVADInst[idx] == NULL)
+ {
+ if(WebRtcVad_Create(&_ptrVADInst[idx]) < 0)
+ {
+ _ptrVADInst[idx] = NULL;
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "EnableVADByIdxSafe: NetEq Initialization error: could not create VAD");
+ return -1;
+ }
+ }
+
+ if(WebRtcNetEQ_SetVADInstance(_inst[idx], _ptrVADInst[idx],
+ (WebRtcNetEQ_VADInitFunction) WebRtcVad_Init,
+ (WebRtcNetEQ_VADSetmodeFunction) WebRtcVad_set_mode,
+ (WebRtcNetEQ_VADFunction) WebRtcVad_Process) < 0)
+ {
+ LogError("setVADinstance", idx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "EnableVADByIdxSafe: NetEq Initialization error: could not set VAD instance");
+ return -1;
+ }
+
+ if(WebRtcNetEQ_SetVADMode(_inst[idx], _vadMode) < 0)
+ {
+ LogError("setVADmode", idx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "EnableVADByIdxSafe: NetEq Initialization error: could not set VAD mode");
+ return -1;
+ }
+ return 0;
+}
+
+
+
+
+WebRtc_Word32
+ACMNetEQ::AllocatePacketBuffer(
+ WebRtcNetEQDecoder* usedCodecs,
+ WebRtc_Word16 noOfCodecs)
+{
+ // Due to WebRtcNetEQ_GetRecommendedBufferSize
+ // the following has to be int otherwise we will have compiler error
+ // if not casted
+
+ CriticalSectionScoped lock(*_netEqCritSect);
+ for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
+ {
+ if(AllocatePacketBufferByIdxSafe(usedCodecs, noOfCodecs, idx) < 0)
+ {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+WebRtc_Word16
+ACMNetEQ::AllocatePacketBufferByIdxSafe(
+ WebRtcNetEQDecoder* usedCodecs,
+ WebRtc_Word16 noOfCodecs,
+ const WebRtc_Word16 idx)
+{
+ int maxNoPackets;
+ int bufferSizeInBytes;
+
+ if(!_isInitialized[idx])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AllocatePacketBufferByIdxSafe: NetEq is not initialized.");
+ return -1;
+ }
+ if (WebRtcNetEQ_GetRecommendedBufferSize(_inst[idx], usedCodecs, noOfCodecs,
+ kTCPLargeJitter , &maxNoPackets, &bufferSizeInBytes)
+ != 0)
+ {
+ LogError("GetRecommendedBufferSize", idx);
+ return -1;
+ }
+ if(_netEqPacketBuffer[idx] != NULL)
+ {
+ free(_netEqPacketBuffer[idx]);
+ _netEqPacketBuffer[idx] = NULL;
+ }
+
+ _netEqPacketBuffer[idx] = (WebRtc_Word16 *)malloc(bufferSizeInBytes);
+ if (_netEqPacketBuffer[idx] == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AllocatePacketBufferByIdxSafe: NetEq Initialization error: could not allocate "
+ "memory for NetEq Packet Buffer");
+ return -1;
+
+ }
+ if (WebRtcNetEQ_AssignBuffer(_inst[idx], maxNoPackets, _netEqPacketBuffer[idx],
+ bufferSizeInBytes) != 0)
+ {
+ if (_netEqPacketBuffer[idx] != NULL) {
+ free(_netEqPacketBuffer[idx]);
+ _netEqPacketBuffer[idx] = NULL;
+ }
+ LogError("AssignBuffer", idx);
+ return -1;
+ }
+ return 0;
+}
+
+
+
+
+WebRtc_Word32
+ACMNetEQ::SetExtraDelay(
+ const WebRtc_Word32 delayInMS)
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+
+ for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
+ {
+ if(!_isInitialized[idx])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetExtraDelay: NetEq is not initialized.");
+ return -1;
+ }
+ if(WebRtcNetEQ_SetExtraDelay(_inst[idx], delayInMS) < 0)
+ {
+ LogError("SetExtraDelay", idx);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+
+WebRtc_Word32
+ACMNetEQ::SetAVTPlayout(
+ const bool enable)
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ if (_avtPlayout != enable)
+ {
+ for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
+ {
+ if(!_isInitialized[idx])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetAVTPlayout: NetEq is not initialized.");
+ return -1;
+ }
+ if(WebRtcNetEQ_SetAVTPlayout(_inst[idx], (enable) ? 1 : 0) < 0)
+ {
+ LogError("SetAVTPlayout", idx);
+ return -1;
+ }
+ }
+ }
+ _avtPlayout = enable;
+ return 0;
+}
+
+
+bool
+ACMNetEQ::AVTPlayout() const
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ return _avtPlayout;
+}
+
+WebRtc_Word32
+ACMNetEQ::CurrentSampFreqHz() const
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ if(!_isInitialized[0])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "CurrentSampFreqHz: NetEq is not initialized.");
+ return -1;
+ }
+ return (WebRtc_Word32)(1000*_currentSampFreqKHz);
+}
+
+
+WebRtc_Word32
+ACMNetEQ::SetPlayoutMode(
+ const AudioPlayoutMode mode)
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ if(_playoutMode != mode)
+ {
+ for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
+ {
+ if(!_isInitialized[idx])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetPlayoutMode: NetEq is not initialized.");
+ return -1;
+ }
+
+ enum WebRtcNetEQPlayoutMode playoutMode;
+ switch(mode)
+ {
+ case voice:
+ playoutMode = kPlayoutOn;
+ break;
+ case fax:
+ playoutMode = kPlayoutFax;
+ break;
+ case streaming:
+ playoutMode = kPlayoutStreaming;
+ break;
+ default:
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetPlayoutMode: NetEq Error playout mode not recognized");
+ return -1;
+ break;
+ }
+ if(WebRtcNetEQ_SetPlayoutMode(_inst[idx], playoutMode) < 0)
+ {
+ LogError("SetPlayoutMode", idx);
+ return -1;
+ }
+ }
+ _playoutMode = mode;
+ }
+
+ return 0;
+}
+
+AudioPlayoutMode
+ACMNetEQ::PlayoutMode() const
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ return _playoutMode;
+}
+
+
+WebRtc_Word32
+ACMNetEQ::NetworkStatistics(
+ ACMNetworkStatistics* statistics) const
+{
+ WebRtcNetEQ_NetworkStatistics stats;
+ CriticalSectionScoped lock(*_netEqCritSect);
+ if(!_isInitialized[0])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "NetworkStatistics: NetEq is not initialized.");
+ return -1;
+ }
+ if(WebRtcNetEQ_GetNetworkStatistics(_inst[0], &stats) == 0)
+ {
+ statistics->currentAccelerateRate = stats.currentAccelerateRate;
+ statistics->currentBufferSize = stats.currentBufferSize;
+ statistics->currentDiscardRate = stats.currentDiscardRate;
+ statistics->currentExpandRate = stats.currentExpandRate;
+ statistics->currentPacketLossRate = stats.currentPacketLossRate;
+ statistics->currentPreemptiveRate = stats.currentPreemptiveRate;
+ statistics->preferredBufferSize = stats.preferredBufferSize;
+ return 0;
+ }
+ else
+ {
+ LogError("getNetworkStatistics", 0);
+ return -1;
+ }
+}
+
+
+WebRtc_Word32
+ACMNetEQ::JitterStatistics(
+ ACMJitterStatistics* jitterStatistics) const
+{
+ WebRtcNetEQ_JitterStatistics stats;
+ CriticalSectionScoped lock(*_netEqCritSect);
+ if(!_isInitialized[0])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "JitterStatistics: NetEq is not initialized.");
+ return -1;
+ }
+ if(WebRtcNetEQ_GetJitterStatistics(_inst[0], &stats) == 0)
+ {
+ jitterStatistics->accelerateMs = stats.accelerateMs;
+ jitterStatistics->avgPacketDelayMs = stats.avgPacketDelayMs;
+ jitterStatistics->numExpandTiny = stats.countExpandMoreThan120ms;
+ jitterStatistics->numExpandLong = stats.countExpandMoreThan2000ms;
+ jitterStatistics->numExpandSmall = stats.countExpandMoreThan250ms;
+ jitterStatistics->numExpandMedium = stats.countExpandMoreThan500ms;
+ jitterStatistics->countIAT1000ms = stats.countIAT1000ms;
+ jitterStatistics->countIAT2000ms = stats.countIAT2000ms;
+ jitterStatistics->countIAT500ms = stats.countIAT500ms;
+ jitterStatistics->flushedMs = stats.flushedMs;
+ jitterStatistics->generatedSilentMs = stats.generatedSilentMs;
+ jitterStatistics->interpolatedSilentMs = stats.interpolatedSilentMs;
+ jitterStatistics->interpolatedVoiceMs = stats.interpolatedVoiceMs;
+ jitterStatistics->jbAvgSize = stats.jbAvgSize;
+ jitterStatistics->jbChangeCount = stats.jbChangeCount;
+ jitterStatistics->jbMaxSize = stats.jbMaxSize;
+ jitterStatistics->jbMinSize = stats.jbMinSize;
+ jitterStatistics->lateLossMs = stats.lateLossMs;
+ jitterStatistics->longestExpandDurationMs = stats.longestExpandDurationMs;
+ jitterStatistics->longestIATms = stats.longestIATms;
+ jitterStatistics->maxPacketDelayMs = stats.maxPacketDelayMs;
+ jitterStatistics->minPacketDelayMs = stats.minPacketDelayMs;
+ return 0;
+ }
+ else
+ {
+ LogError("getJitterStatistics", 0);
+ return -1;
+ }
+}
+
+WebRtc_Word32
+ACMNetEQ::PreferredBufferSize(
+ WebRtc_UWord16* prefBufSize) const
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ WebRtc_Word32 ok = WebRtcNetEQ_GetPreferredBufferSize(_inst[0], prefBufSize);
+ if((*prefBufSize == 0) || (*prefBufSize == 0xFFFF))
+ {
+ ok = -1;
+ LogError("getPreferredBufferSize", 0);
+ }
+ return ok;
+}
+
+WebRtc_Word32
+ACMNetEQ::ResetJitterStatistics() const
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ if(WebRtcNetEQ_ResetJitterStatistics(_inst[0]) < 0)
+ {
+ LogError("resetJitterStatistics", 0);
+ return -1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+WebRtc_Word32
+ACMNetEQ::RecIn(
+ const WebRtc_Word8* incomingPayload,
+ const WebRtc_Word32 payloadLength,
+ const WebRtcRTPHeader& rtpInfo)
+{
+ // translate to NetEq struct
+ WebRtcNetEQ_RTPInfo netEqRTPInfo;
+ netEqRTPInfo.payloadType = rtpInfo.header.payloadType;
+ netEqRTPInfo.sequenceNumber = rtpInfo.header.sequenceNumber;
+ netEqRTPInfo.timeStamp = rtpInfo.header.timestamp;
+ netEqRTPInfo.SSRC = rtpInfo.header.ssrc;
+ netEqRTPInfo.markerBit = rtpInfo.header.markerBit;
+
+ CriticalSectionScoped lock(*_netEqCritSect);
+ // Down-cast the time to (32-6)-bit since we only care about
+ // the least significant bits. (32-6) bits cover 2^(32-6) = 67108864 ms.
+ // we masked 6 most significant bits of 32-bit so we don't loose resolution
+ // when do the following multiplication.
+ const WebRtc_UWord32 nowInMs = static_cast<WebRtc_UWord32>(
+ TickTime::MillisecondTimestamp() & 0x03ffffff);
+ WebRtc_UWord32 recvTimestamp = static_cast<WebRtc_UWord32>
+ (_currentSampFreqKHz * nowInMs);
+
+ int status;
+
+ if(rtpInfo.type.Audio.channel == 1)
+ {
+ if(!_isInitialized[0])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecIn: NetEq is not initialized.");
+ return -1;
+ }
+ // PUSH into Master
+ status = WebRtcNetEQ_RecInRTPStruct(_inst[0], &netEqRTPInfo,
+ (WebRtc_UWord8 *)incomingPayload, (WebRtc_Word16)payloadLength,
+ recvTimestamp);
+ if(status < 0)
+ {
+ LogError("RecInRTPStruct", 0);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecIn: NetEq, error in pushing in Master");
+ return -1;
+ }
+ }
+ else if(rtpInfo.type.Audio.channel == 2)
+ {
+ if(!_isInitialized[1])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecIn: NetEq is not initialized.");
+ return -1;
+ }
+ // PUSH into Slave
+ status = WebRtcNetEQ_RecInRTPStruct(_inst[1], &netEqRTPInfo,
+ (WebRtc_UWord8 *)incomingPayload, (WebRtc_Word16)payloadLength,
+ recvTimestamp);
+ if(status < 0)
+ {
+ LogError("RecInRTPStruct", 1);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecIn: NetEq, error in pushing in Slave");
+ return -1;
+ }
+ }
+ else
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecIn: NetEq, error invalid numbe of channels %d \
+(1, for Master stream, and 2, for slave stream, are valid values)",
+ rtpInfo.type.Audio.channel);
+ return -1;
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+ACMNetEQ::RecOut(
+ AudioFrame& audioFrame)
+{
+ enum WebRtcNetEQOutputType type;
+ WebRtc_Word16 payloadLenSample;
+ enum WebRtcNetEQOutputType typeMaster;
+ enum WebRtcNetEQOutputType typeSlave;
+
+ WebRtc_Word16 payloadLenSampleSlave;
+
+ CriticalSectionScoped lockNetEq(*_netEqCritSect);
+
+ if(!_receivedStereo)
+ {
+ if(!_isInitialized[0])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecOut: NetEq is not initialized.");
+ return -1;
+ }
+ {
+ WriteLockScoped lockCodec(*_decodeLock);
+ if(WebRtcNetEQ_RecOut(_inst[0], &(audioFrame._payloadData[0]),
+ &payloadLenSample) != 0)
+ {
+ LogError("RecOut", 0);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecOut: NetEq, error in pulling out for mono case");
+
+ // Check for errors that can be recovered from:
+ // RECOUT_ERROR_SAMPLEUNDERRUN = 2003
+ int errorCode = WebRtcNetEQ_GetErrorCode(_inst[0]);
+ if(errorCode != 2003)
+ {
+ // Cannot recover; return an error
+ return -1;
+ }
+ }
+ }
+ WebRtcNetEQ_GetSpeechOutputType(_inst[0], &type);
+ audioFrame._audioChannel = 1;
+ }
+ else
+ {
+ if(!_isInitialized[0] || !_isInitialized[1])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecOut: NetEq is not initialized.");
+ return -1;
+ }
+ WebRtc_Word16 payloadMaster[480];
+ WebRtc_Word16 payloadSlave[480];
+ {
+ WriteLockScoped lockCodec(*_decodeLock);
+ if(WebRtcNetEQ_RecOutMasterSlave(_inst[0], payloadMaster,
+ &payloadLenSample, _masterSlaveInfo, 1) != 0)
+ {
+ LogError("RecOutMasterSlave", 0);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecOut: NetEq, error in pulling out for master");
+
+ // Check for errors that can be recovered from:
+ // RECOUT_ERROR_SAMPLEUNDERRUN = 2003
+ int errorCode = WebRtcNetEQ_GetErrorCode(_inst[0]);
+ if(errorCode != 2003)
+ {
+ // Cannot recover; return an error
+ return -1;
+ }
+ }
+ if(WebRtcNetEQ_RecOutMasterSlave(_inst[1], payloadSlave,
+ &payloadLenSampleSlave, _masterSlaveInfo, 0) != 0)
+ {
+ LogError("RecOutMasterSlave", 1);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RecOut: NetEq, error in pulling out for slave");
+
+ // Check for errors that can be recovered from:
+ // RECOUT_ERROR_SAMPLEUNDERRUN = 2003
+ int errorCode = WebRtcNetEQ_GetErrorCode(_inst[0]);
+ if(errorCode != 2003)
+ {
+ // Cannot recover; return an error
+ return -1;
+ }
+ }
+ }
+ if(payloadLenSample != payloadLenSampleSlave)
+ {
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
+ "RecOut: mismatch between the lenght of the decoded \
+audio by Master (%d samples) and Slave (%d samples).",
+ payloadLenSample, payloadLenSampleSlave);
+ if(payloadLenSample > payloadLenSampleSlave)
+ {
+ memset(&payloadSlave[payloadLenSampleSlave], 0,
+ (payloadLenSample - payloadLenSampleSlave) * sizeof(WebRtc_Word16));
+ }
+ }
+
+ for(WebRtc_Word16 n = 0; n < payloadLenSample; n++)
+ {
+ audioFrame._payloadData[n<<1] = payloadMaster[n];
+ audioFrame._payloadData[(n<<1)+1] = payloadSlave[n];
+ }
+ audioFrame._audioChannel = 2;
+
+ WebRtcNetEQ_GetSpeechOutputType(_inst[0], &typeMaster);
+ WebRtcNetEQ_GetSpeechOutputType(_inst[1], &typeSlave);
+ if((typeMaster == kOutputNormal) ||
+ (typeSlave == kOutputNormal))
+ {
+ type = kOutputNormal;
+ }
+ else
+ {
+ type = typeMaster;
+ }
+ }
+
+ audioFrame._payloadDataLengthInSamples = static_cast<WebRtc_UWord16>(payloadLenSample);
+ // NetEq always returns 10 ms of audio.
+ _currentSampFreqKHz = static_cast<float>(audioFrame._payloadDataLengthInSamples) / 10.0f;
+ audioFrame._frequencyInHz = audioFrame._payloadDataLengthInSamples * 100;
+ if(_vadStatus)
+ {
+ if(type == kOutputVADPassive)
+ {
+ audioFrame._vadActivity = AudioFrame::kVadPassive;
+ audioFrame._speechType = AudioFrame::kNormalSpeech;
+ }
+ else if(type == kOutputNormal)
+ {
+ audioFrame._vadActivity = AudioFrame::kVadActive;
+ audioFrame._speechType = AudioFrame::kNormalSpeech;
+ }
+ else if(type == kOutputPLC)
+ {
+ audioFrame._vadActivity = _previousAudioActivity;
+ audioFrame._speechType = AudioFrame::kPLC;
+ }
+ else if(type == kOutputCNG)
+ {
+ audioFrame._vadActivity = AudioFrame::kVadPassive;
+ audioFrame._speechType = AudioFrame::kCNG;
+ }
+ else
+ {
+ audioFrame._vadActivity = AudioFrame::kVadPassive;
+ audioFrame._speechType = AudioFrame::kPLCCNG;
+ }
+ }
+ else
+ {
+ // Always return kVadUnknown when receive VAD is inactive
+ audioFrame._vadActivity = AudioFrame::kVadUnknown;
+
+ if(type == kOutputNormal)
+ {
+ audioFrame._speechType = AudioFrame::kNormalSpeech;
+ }
+ else if(type == kOutputPLC)
+ {
+ audioFrame._speechType = AudioFrame::kPLC;
+ }
+ else if(type == kOutputPLCtoCNG)
+ {
+ audioFrame._speechType = AudioFrame::kPLCCNG;
+ }
+ else if(type == kOutputCNG)
+ {
+ audioFrame._speechType = AudioFrame::kCNG;
+ }
+ else
+ {
+ // type is kOutputVADPassive which
+ // we don't expect to get if _vadStatus is false
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
+ "RecOut: NetEq returned kVadPassive while _vadStatus is false.");
+ audioFrame._vadActivity = AudioFrame::kVadUnknown;
+ audioFrame._speechType = AudioFrame::kNormalSpeech;
+ }
+ }
+ _previousAudioActivity = audioFrame._vadActivity;
+
+ return 0;
+}
+
+// When ACMGenericCodec has set the codec specific parameters in codecDef
+// it calls AddCodec() to add the new codec to the NetEQ database.
+WebRtc_Word32
+ACMNetEQ::AddCodec(
+ WebRtcNetEQ_CodecDef* codecDef,
+ bool toMaster)
+{
+ if (codecDef == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "ACMNetEQ::AddCodec: error, codecDef is NULL");
+ return -1;
+ }
+ CriticalSectionScoped lock(*_netEqCritSect);
+
+ WebRtc_Word16 idx;
+ if(toMaster)
+ {
+ idx = 0;
+ }
+ else
+ {
+ idx = 1;
+ }
+
+ if(!_isInitialized[idx])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "ACMNetEQ::AddCodec: NetEq is not initialized.");
+ return -1;
+ }
+ if(WebRtcNetEQ_CodecDbAdd(_inst[idx], codecDef) < 0)
+ {
+ LogError("CodecDB_Add", idx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "ACMNetEQ::AddCodec: NetEq, error in adding codec");
+ return -1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+// Creates a Word16 RTP packet out of a Word8 payload and an rtp info struct.
+// Must be byte order safe.
+void
+ACMNetEQ::RTPPack(
+ WebRtc_Word16* rtpPacket,
+ const WebRtc_Word8* payload,
+ const WebRtc_Word32 payloadLengthW8,
+ const WebRtcRTPHeader& rtpInfo)
+{
+ WebRtc_Word32 idx = 0;
+ WEBRTC_SPL_SET_BYTE(rtpPacket, (WebRtc_Word8)0x80, idx);
+ idx++;
+
+ WEBRTC_SPL_SET_BYTE(rtpPacket, rtpInfo.header.payloadType, idx);
+ idx++;
+
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
+ &(rtpInfo.header.sequenceNumber), 1), idx);
+ idx++;
+
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
+ &(rtpInfo.header.sequenceNumber), 0), idx);
+ idx++;
+
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
+ &(rtpInfo.header.timestamp), 3), idx);
+ idx++;
+
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
+ &(rtpInfo.header.timestamp), 2), idx);
+ idx++;
+
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
+ &(rtpInfo.header.timestamp), 1), idx);
+ idx++;
+
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
+ &(rtpInfo.header.timestamp), 0), idx);
+ idx++;
+
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
+ &(rtpInfo.header.ssrc), 3), idx);
+ idx++;
+
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
+ &(rtpInfo.header.ssrc), 2), idx);
+ idx++;
+
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
+ &(rtpInfo.header.ssrc), 1), idx);
+ idx++;
+
+ WEBRTC_SPL_SET_BYTE(rtpPacket, WEBRTC_SPL_GET_BYTE(
+ &(rtpInfo.header.ssrc), 0), idx);
+ idx++;
+
+ for (WebRtc_Word16 i=0; i < payloadLengthW8; i++)
+ {
+ WEBRTC_SPL_SET_BYTE(rtpPacket, payload[i], idx);
+ idx++;
+ }
+ if (payloadLengthW8 & 1)
+ {
+ // Our 16 bits buffer is one byte too large, set that
+ // last byte to zero.
+ WEBRTC_SPL_SET_BYTE(rtpPacket, 0x0, idx);
+ }
+}
+
+
+bool
+ACMNetEQ::VADStatus() const
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ return _vadStatus;
+}
+
+
+WebRtc_Word16
+ACMNetEQ::SetVADStatus(
+ const bool status)
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
+ {
+ if(!_isInitialized[idx])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetVADStatus: NetEq is not initialized.");
+ return -1;
+ }
+ if(_vadStatus && !status)
+ {
+ // We have been using VAD but we want to stop using it calling the
+ // following function with NULL as VAD instance switches off the
+ // post-decode VAD
+ if(WebRtcNetEQ_SetVADInstance(_inst[idx], NULL,
+ (WebRtcNetEQ_VADInitFunction) WebRtcVad_Init,
+ (WebRtcNetEQ_VADSetmodeFunction) WebRtcVad_set_mode,
+ (WebRtcNetEQ_VADFunction) WebRtcVad_Process) < 0)
+ {
+ LogError("setVADinstance", idx);
+ return -1;
+ }
+ // Free VAD Memory
+ if(_ptrVADInst[idx] != NULL)
+ {
+ WebRtcVad_Free(_ptrVADInst[idx]);
+ _ptrVADInst[idx] = NULL;
+ }
+
+ // Set previous VAD status to UNKNOWN
+ _previousAudioActivity = AudioFrame::kVadUnknown;
+ }
+ else if(!_vadStatus && status)
+ {
+ // VAD was off and we have to turn it on
+ if(EnableVADByIdxSafe(idx) < 0)
+ {
+ return -1;
+ }
+
+ // Set previous VAD status to PASSIVE
+ _previousAudioActivity = AudioFrame::kVadPassive;
+ }
+ }
+ _vadStatus = status;
+ return 0;
+}
+
+
+ACMVADMode
+ACMNetEQ::VADMode() const
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ return _vadMode;
+}
+
+
+WebRtc_Word16
+ACMNetEQ::SetVADMode(
+ const ACMVADMode mode)
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ if((mode < VADNormal) || (mode > VADVeryAggr))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetVADMode: NetEq error: could not set VAD mode, mode is not supported");
+ return -1;
+ }
+ else
+ {
+ for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
+ {
+ if(!_isInitialized[idx])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetVADMode: NetEq is not initialized.");
+ return -1;
+ }
+ if(WebRtcNetEQ_SetVADMode(_inst[idx], mode) < 0)
+ {
+ LogError("SetVADmode", idx);
+ return -1;
+ }
+ }
+ _vadMode = mode;
+ return 0;
+ }
+}
+
+
+WebRtc_Word32
+ACMNetEQ::FlushBuffers()
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
+ {
+ if(!_isInitialized[idx])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "FlushBuffers: NetEq is not initialized.");
+ return -1;
+ }
+ if(WebRtcNetEQ_FlushBuffers(_inst[idx]) < 0)
+ {
+ LogError("FlushBuffers", idx);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+
+WebRtc_Word32
+ACMNetEQ::GetVersion(
+ WebRtc_Word8* version,
+ WebRtc_UWord32& remainingBufferInBytes,
+ WebRtc_UWord32& position)
+{
+ WebRtc_UWord32 len = position;
+ strncpy(&version[position], "NetEq\t\t", remainingBufferInBytes);
+ position = (WebRtc_UWord32)strlen(version);
+ remainingBufferInBytes -= (position - len);
+ len = position;
+
+ WebRtc_Word8 myVersion[100];
+ if(WebRtcNetEQ_GetVersion(myVersion) < 0)
+ {
+ return -1;
+ }
+
+ strncpy(&version[position], myVersion, remainingBufferInBytes);
+ position = (WebRtc_UWord32)strlen(version);
+ remainingBufferInBytes -= (position - len);
+ len = position;
+
+ strncpy(&version[position], "\n", remainingBufferInBytes);
+ position = (WebRtc_UWord32)strlen(version);
+ remainingBufferInBytes -= (position - len);
+ len = position;
+
+ return 0;
+}
+
+WebRtc_Word16
+ACMNetEQ::RemoveCodec(
+ WebRtcNetEQDecoder codecIdx,
+ bool isStereo)
+{
+ // sanity check
+ if((codecIdx <= kDecoderReservedStart) ||
+ (codecIdx >= kDecoderReservedEnd))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RemoveCodec: NetEq error: could not Remove Codec, codec index out of range");
+ return -1;
+ }
+ CriticalSectionScoped lock(*_netEqCritSect);
+ if(!_isInitialized[0])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RemoveCodec: NetEq is not initialized.");
+ return -1;
+ }
+
+ if(WebRtcNetEQ_CodecDbRemove(_inst[0], codecIdx) < 0)
+ {
+ LogError("CodecDB_Remove", 0);
+ return -1;
+ }
+
+ if(isStereo)
+ {
+ if(WebRtcNetEQ_CodecDbRemove(_inst[1], codecIdx) < 0)
+ {
+ LogError("CodecDB_Remove", 1);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+WebRtc_Word16
+ACMNetEQ::Delay(
+ WebRtc_UWord16& currentDelayInMs) const
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ if(!_isInitialized[0])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Delay: NetEq is not initialized.");
+ return -1;
+ }
+ if(WebRtcNetEQ_GetCurrentDelay(_inst[0], ¤tDelayInMs) < 0)
+ {
+ LogError("GetCurrentDelay", 0);
+ return -1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+
+WebRtc_Word16
+ACMNetEQ::SetBackgroundNoiseMode(
+ const ACMBackgroundNoiseMode mode)
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ for(WebRtc_Word16 idx = 0; idx < _numSlaves + 1; idx++)
+ {
+ if(!_isInitialized[idx])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetBackgroundNoiseMode: NetEq is not initialized.");
+ return -1;
+ }
+ if(WebRtcNetEQ_SetBGNMode(_inst[idx], (WebRtcNetEQBGNMode)mode) < 0)
+ {
+ LogError("SetBGNMode", idx);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+WebRtc_Word16
+ACMNetEQ::BackgroundNoiseMode(
+ ACMBackgroundNoiseMode& mode)
+{
+ WebRtcNetEQBGNMode myMode;
+ CriticalSectionScoped lock(*_netEqCritSect);
+ if(!_isInitialized[0])
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "BackgroundNoiseMode: NetEq is not initialized.");
+ return -1;
+ }
+ if(WebRtcNetEQ_GetBGNMode(_inst[0], &myMode) < 0)
+ {
+ LogError("WebRtcNetEQ_GetBGNMode", 0);
+ return -1;
+ }
+ else
+ {
+ mode = (ACMBackgroundNoiseMode)myMode;
+ }
+ return 0;
+}
+
+void
+ACMNetEQ::SetUniqueId(
+ WebRtc_Word32 id)
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ _id = id;
+}
+
+
+void
+ACMNetEQ::LogError(
+ const WebRtc_Word8* neteqFuncName,
+ const WebRtc_Word16 idx) const
+{
+ WebRtc_Word8 errorName[NETEQ_ERR_MSG_LEN_BYTE];
+ WebRtc_Word8 myFuncName[50];
+ int neteqErrorCode = WebRtcNetEQ_GetErrorCode(_inst[idx]);
+ WebRtcNetEQ_GetErrorName(neteqErrorCode, errorName, NETEQ_ERR_MSG_LEN_BYTE - 1);
+ strncpy(myFuncName, neteqFuncName, 49);
+ errorName[NETEQ_ERR_MSG_LEN_BYTE - 1] = '\0';
+ myFuncName[49] = '\0';
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "NetEq-%d Error in function %s, error-code: %d, error-string: %s",
+ idx,
+ myFuncName,
+ neteqErrorCode,
+ errorName);
+}
+
+
+WebRtc_Word32
+ACMNetEQ::PlayoutTimestamp(
+ WebRtc_UWord32& timestamp)
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ if(WebRtcNetEQ_GetSpeechTimeStamp(_inst[0], ×tamp) < 0)
+ {
+ LogError("GetSpeechTimeStamp", 0);
+ return -1;
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+WebRtc_Word16
+ACMNetEQ::AddSlave(
+ WebRtcNetEQDecoder* usedCodecs,
+ WebRtc_Word16 noOfCodecs)
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ const WebRtc_Word16 slaveIdx = 1;
+ if(_numSlaves < 1)
+ {
+ // initialize the receiver, this also sets up VAD.
+ if(InitByIdxSafe(slaveIdx) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not Initialize");
+ return -1;
+ }
+
+ // Allocate buffer.
+ if(AllocatePacketBufferByIdxSafe(usedCodecs, noOfCodecs, slaveIdx) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not Allocate Packet Buffer");
+ return -1;
+ }
+
+ if(_masterSlaveInfo != NULL)
+ {
+ free(_masterSlaveInfo);
+ _masterSlaveInfo = NULL;
+ }
+ int msInfoSize = WebRtcNetEQ_GetMasterSlaveInfoSize();
+ _masterSlaveInfo = malloc(msInfoSize);
+
+ if(_masterSlaveInfo == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not Allocate memory for Master-Slave Info");
+ return -1;
+ }
+
+ // We accept this as initialized NetEQ, the rest is to synchronize
+ // Slave with Master.
+ _numSlaves = 1;
+ _isInitialized[slaveIdx] = true;
+
+ // Set Slave delay as Master delay
+ WebRtc_UWord16 currentDelayMs;
+ if(WebRtcNetEQ_GetCurrentDelay(_inst[0], ¤tDelayMs) < 0)
+ {
+ LogError("GetCurrentDelay", 0);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not Get Current Delay from Master.");
+ return -1;
+ }
+ if(WebRtcNetEQ_SetExtraDelay(_inst[slaveIdx], currentDelayMs) < 0)
+ {
+ LogError("SetExtraDelay", slaveIdx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not set delay");
+ return -1;
+ }
+
+ // Set AVT
+ if(WebRtcNetEQ_SetAVTPlayout(_inst[slaveIdx], (_avtPlayout) ? 1 : 0) < 0)
+ {
+ LogError("SetAVTPlayout", slaveIdx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not set AVT playout.");
+ return -1;
+ }
+
+ // Set Background Noise
+ WebRtcNetEQBGNMode currentMode;
+ if(WebRtcNetEQ_GetBGNMode(_inst[0], ¤tMode) < 0)
+ {
+ LogError("GetBGNMode", 0);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AAddSlave: AddSlave Failed, Could not Get BGN form Master.");
+ return -1;
+ }
+
+ if(WebRtcNetEQ_SetBGNMode(_inst[slaveIdx], (WebRtcNetEQBGNMode)currentMode) < 0)
+ {
+ LogError("SetBGNMode", slaveIdx);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not set BGN mode.");
+ return -1;
+ }
+
+ enum WebRtcNetEQPlayoutMode playoutMode;
+ switch(_playoutMode)
+ {
+ case voice:
+ playoutMode = kPlayoutOn;
+ break;
+ case fax:
+ playoutMode = kPlayoutFax;
+ break;
+ case streaming:
+ playoutMode = kPlayoutStreaming;
+ break;
+ default:
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: NetEq Error, playout mode not recognized");
+ return -1;
+ break;
+ }
+ if(WebRtcNetEQ_SetPlayoutMode(_inst[slaveIdx], playoutMode) < 0)
+ {
+ LogError("SetPlayoutMode", 1);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "AddSlave: AddSlave Failed, Could not Set Playout Mode.");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+void
+ACMNetEQ::SetReceivedStereo(
+ bool receivedStereo)
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ _receivedStereo = receivedStereo;
+}
+
+WebRtc_UWord8
+ACMNetEQ::NumSlaves()
+{
+ CriticalSectionScoped lock(*_netEqCritSect);
+ return _numSlaves;
+}
+
+} // namespace webrtc
+
diff --git a/src/modules/audio_coding/main/source/acm_neteq.h b/src/modules/audio_coding/main/source/acm_neteq.h
new file mode 100644
index 0000000..0b983f8
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_neteq.h
@@ -0,0 +1,446 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_NETEQ_H
+#define ACM_NETEQ_H
+
+#include "audio_coding_module.h"
+#include "audio_coding_module_typedefs.h"
+#include "engine_configurations.h"
+#include "module_common_types.h"
+#include "typedefs.h"
+#include "webrtc_neteq.h"
+#include "webrtc_vad.h"
+
+namespace webrtc {
+
+class CriticalSectionWrapper;
+class RWLockWrapper;
+struct CodecInst;
+enum AudioPlayoutMode;
+enum ACMSpeechType;
+
+#define MAX_NUM_SLAVE_NETEQ 1
+
+class ACMNetEQ
+{
+public:
+ // Constructor of the class
+ ACMNetEQ();
+
+ // Destructor of the class.
+ ~ACMNetEQ();
+
+ //
+ // GetVersion()
+ // Fills the version array with the NetEQ version and updates the
+ // remainingBufferInBytes and position variables accordingly.
+ //
+ // Output:
+ // - version : An array to be filled with the version
+ // data.
+ //
+ // Input/Output:
+ // - remainingBuffInBytes : The number of free bytes at the end of
+ // the version array.
+ // - position : Position where the free space starts.
+ //
+ // Return value : 0 if ok.
+ // -1 if NetEQ returned an error.
+ //
+ static WebRtc_Word32 GetVersion(
+ WebRtc_Word8* version,
+ WebRtc_UWord32& remainingBuffInBytes,
+ WebRtc_UWord32& position);
+
+ //
+ // Init()
+ // Allocates memory for NetEQ and VAD and initializes them.
+ //
+ // Return value : 0 if ok.
+ // -1 if NetEQ or VAD returned an error or
+ // if out of memory.
+ //
+ WebRtc_Word32 Init();
+
+ //
+ // RecIn()
+ // Gives the payload to NetEQ.
+ //
+ // Input:
+ // - incomingPayload : Incoming audio payload.
+ // - payloadLength : Length of incoming audio payload.
+ // - rtpInfo : RTP header for the incoming payload containing
+ // information about payload type, sequence number,
+ // timestamp, ssrc and marker bit.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 RecIn(
+ const WebRtc_Word8* incomingPayload,
+ const WebRtc_Word32 payloadLength,
+ const WebRtcRTPHeader& rtpInfo);
+
+ //
+ // RecOut()
+ // Asks NetEQ for 10 ms of decoded audio.
+ //
+ // Input:
+ // -audioFrame : an audio frame were output data and
+ // associated parameters are written to.
+ //
+ // Return value : 0 if ok.
+ // -1 if NetEQ returned an error.
+ //
+ WebRtc_Word32 RecOut(
+ AudioFrame& audioFrame);
+
+ //
+ // AddCodec()
+ // Adds a new codec to the NetEQ codec database.
+ //
+ // Input:
+ // - codecDef : The codec to be added.
+ // - toMaster : true if the codec has to be added to Master
+ // NetEq, otherwise will be added to the Slave
+ // NetEQ.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 AddCodec(
+ WebRtcNetEQ_CodecDef *codecDef,
+ bool toMaster = true);
+
+ //
+ // AllocatePacketBuffer()
+ // Allocates the NetEQ packet buffer.
+ //
+ // Input:
+ // - usedCodecs : An array of the codecs to be used by NetEQ.
+ // - noOfCodecs : Number of codecs in usedCodecs.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 AllocatePacketBuffer(
+ WebRtcNetEQDecoder* usedCodecs,
+ WebRtc_Word16 noOfCodecs);
+
+ //
+ // SetExtraDelay()
+ // Sets an delayInMS milliseconds extra delay in NetEQ.
+ //
+ // Input:
+ // - delayInMS : Extra delay in milliseconds.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 SetExtraDelay(
+ const WebRtc_Word32 delayInMS);
+
+ //
+ // SetAVTPlayout()
+ // Enable/disable playout of AVT payloads.
+ //
+ // Input:
+ // - enable : Enable if true, disable if false.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 SetAVTPlayout(
+ const bool enable);
+
+ //
+ // AVTPlayout()
+ // Get the current AVT playout state.
+ //
+ // Return value : True if AVT playout is enabled.
+ // False if AVT playout is disabled.
+ //
+ bool AVTPlayout() const;
+
+ //
+ // CurrentSampFreqHz()
+ // Get the current sampling frequency in Hz.
+ //
+ // Return value : Sampling frequency in Hz.
+ //
+ WebRtc_Word32 CurrentSampFreqHz() const;
+
+ //
+ // SetPlayoutMode()
+ // Sets the playout mode to voice or fax.
+ //
+ // Input:
+ // - mode : The playout mode to be used, voice,
+ // fax, or streaming.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 SetPlayoutMode(
+ const AudioPlayoutMode mode);
+
+ //
+ // PlayoutMode()
+ // Get the current playout mode.
+ //
+ // Return value : The current playout mode.
+ //
+ AudioPlayoutMode PlayoutMode() const;
+
+ //
+ // NetworkStatistics()
+ // Get the current network statistics from NetEQ.
+ //
+ // Output:
+ // - statistics : The current network statistics.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 NetworkStatistics(
+ ACMNetworkStatistics* statistics) const;
+
+ //
+ // JitterStatistics()
+ // Get the current jitter statistics from NetEQ.
+ //
+ // Output:
+ // - jitterStatistics : The current jitter statistics.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 JitterStatistics(
+ ACMJitterStatistics* jitterStatistics) const;
+
+ //
+ // PreferredBufferSize()
+ // Get the currently preferred buffer size from NetEQ.
+ //
+ // Output:
+ // - prefBufSize : The optimal buffer size for the current network
+ // conditions.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 PreferredBufferSize(
+ WebRtc_UWord16* prefBufSize) const;
+
+ //
+ // ResetJitterStatistics()
+ // Resets the NetEQ jitter statistics.
+ //
+ // Return value : 0 if ok.
+ // <0 if NetEQ returned an error.
+ //
+ WebRtc_Word32 ResetJitterStatistics() const;
+
+ //
+ // VADStatus()
+ // Get the current VAD status.
+ //
+ // Return value : True if VAD is enabled.
+ // False if VAD is disabled.
+ //
+ bool VADStatus() const;
+
+ //
+ // SetVADStatus()
+ // Enable/disable VAD.
+ //
+ // Input:
+ // - enable : Enable if true, disable if false.
+ //
+ // Return value : 0 if ok.
+ // -1 if an error occurred.
+ //
+ WebRtc_Word16 SetVADStatus(
+ const bool status);
+
+ //
+ // VADMode()
+ // Get the current VAD Mode.
+ //
+ // Return value : The current VAD mode.
+ //
+ ACMVADMode VADMode() const;
+
+ //
+ // SetVADMode()
+ // Set the VAD mode.
+ //
+ // Input:
+ // - mode : The new VAD mode.
+ //
+ // Return value : 0 if ok.
+ // -1 if an error occurred.
+ //
+ WebRtc_Word16 SetVADMode(
+ const ACMVADMode mode);
+
+ //
+ // DecodeLock()
+ // Get the decode lock used to protect decoder instances while decoding.
+ //
+ // Return value : Pointer to the decode lock.
+ //
+ RWLockWrapper* DecodeLock() const
+ {
+ return _decodeLock;
+ }
+
+ //
+ // FlushBuffers()
+ // Flushes the NetEQ packet and speech buffers.
+ //
+ // Return value : 0 if ok.
+ // -1 if NetEQ returned an error.
+ //
+ WebRtc_Word32 FlushBuffers();
+
+ //
+ // RemoveCodec()
+ // Removes a codec from the NetEQ codec database.
+ //
+ // Input:
+ // - codecIdx : Codec to be removed.
+ //
+ // Return value : 0 if ok.
+ // -1 if an error occurred.
+ //
+ WebRtc_Word16 RemoveCodec(
+ WebRtcNetEQDecoder codecIdx,
+ bool isStereo = false);
+
+
+ //
+ // Delay()
+ // Get the length of the current audio buffer in milliseconds. That is
+ // approximately the playout delay, which can be used for lip-synch.
+ //
+ // Output:
+ // - currentDelayInMs : delay in audio buffer given in milliseconds
+ //
+ // return value : 0 if ok
+ // -1 if an error occurred.
+ //
+ WebRtc_Word16 Delay(
+ WebRtc_UWord16& currentDelayInMs) const;
+
+ //
+ // SetBackgroundNoiseMode()
+ // Set the mode of the background noise.
+ //
+ // Input:
+ // - mode : an enumerator specifying the mode of the
+ // background noise.
+ //
+ // Return value : 0 if succeeded,
+ // -1 if failed to set the mode.
+ //
+ WebRtc_Word16 SetBackgroundNoiseMode(
+ const ACMBackgroundNoiseMode mode);
+
+ //
+ // BackgroundNoiseMode()
+ // return the mode of the background noise.
+ //
+ // Return value : The mode of background noise.
+ //
+ WebRtc_Word16 BackgroundNoiseMode(
+ ACMBackgroundNoiseMode& mode);
+
+ void SetUniqueId(
+ WebRtc_Word32 id);
+
+ WebRtc_Word32 PlayoutTimestamp(
+ WebRtc_UWord32& timestamp);
+
+ void SetReceivedStereo(
+ bool receivedStereo);
+
+ WebRtc_UWord8 NumSlaves();
+
+ enum JB {masterJB = 0, slaveJB = 1};
+
+ WebRtc_Word16 AddSlave(
+ WebRtcNetEQDecoder* usedCodecs,
+ WebRtc_Word16 noOfCodecs);
+
+private:
+ //
+ // RTPPack()
+ // Creates a Word16 RTP packet out of the payload data in Word16 and
+ // a WebRtcRTPHeader.
+ //
+ // Input:
+ // - payload : Payload to be packetized.
+ // - payloadLengthW8 : Length of the payload in bytes.
+ // - rtpInfo : RTP header struct.
+ //
+ // Output:
+ // - rtpPacket : The RTP packet.
+ //
+ static void RTPPack(
+ WebRtc_Word16* rtpPacket,
+ const WebRtc_Word8* payload,
+ const WebRtc_Word32 payloadLengthW8,
+ const WebRtcRTPHeader& rtpInfo);
+
+ void LogError(
+ const WebRtc_Word8* neteqFuncName,
+ const WebRtc_Word16 idx) const;
+
+ WebRtc_Word16 InitByIdxSafe(
+ const WebRtc_Word16 idx);
+
+ WebRtc_Word16 EnableVADByIdxSafe(
+ const WebRtc_Word16 idx);
+
+ WebRtc_Word16 AllocatePacketBufferByIdxSafe(
+ WebRtcNetEQDecoder* usedCodecs,
+ WebRtc_Word16 noOfCodecs,
+ const WebRtc_Word16 idx);
+
+ void* _inst[MAX_NUM_SLAVE_NETEQ + 1];
+ void* _instMem[MAX_NUM_SLAVE_NETEQ + 1];
+
+ WebRtc_Word16* _netEqPacketBuffer[MAX_NUM_SLAVE_NETEQ + 1];
+
+ WebRtc_Word32 _id;
+ float _currentSampFreqKHz;
+ bool _avtPlayout;
+ AudioPlayoutMode _playoutMode;
+ CriticalSectionWrapper* _netEqCritSect;
+
+ WebRtcVadInst* _ptrVADInst[MAX_NUM_SLAVE_NETEQ + 1];
+
+ bool _vadStatus;
+ ACMVADMode _vadMode;
+ RWLockWrapper* _decodeLock;
+ bool _isInitialized[MAX_NUM_SLAVE_NETEQ + 1];
+ WebRtc_UWord8 _numSlaves;
+ bool _receivedStereo;
+ void* _masterSlaveInfo;
+ AudioFrame::VADActivity _previousAudioActivity;
+
+ CriticalSectionWrapper* _callbackCritSect;
+};
+
+} //namespace webrtc
+
+#endif //ACM_NETEQ_H
diff --git a/src/modules/audio_coding/main/source/acm_opus.cc b/src/modules/audio_coding/main/source/acm_opus.cc
new file mode 100644
index 0000000..069175b
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_opus.cc
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_common_defs.h"
+#include "acm_neteq.h"
+#include "acm_opus.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+#ifdef WEBRTC_CODEC_OPUS
+ // NOTE! Opus is not included in the open-source package. Modify this file or your codec
+ // API to match the function call and name of used Opus API file.
+ // #include "opus_interface.h"
+#endif
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_OPUS
+
+ACMOPUS::ACMOPUS(
+ WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+
+ACMOPUS::~ACMOPUS()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMOPUS::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMOPUS::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMOPUS::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMOPUS::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word32
+ACMOPUS::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+
+ACMGenericCodec*
+ACMOPUS::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMOPUS::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+void
+ACMOPUS::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMOPUS::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMOPUS::DestructDecoderSafe()
+{
+ return;
+}
+
+
+void
+ACMOPUS::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMOPUS::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMOPUS::SetBitRateSafe(
+ const WebRtc_Word32 /*rate*/ )
+{
+ return -1;
+}
+
+#else //===================== Actual Implementation =======================
+
+// Remove when integrating a real Opus wrapper
+extern WebRtc_Word16 WebRtcOpus_CreateEnc(OPUS_inst_t_** inst, WebRtc_Word16 samplFreq);
+extern WebRtc_Word16 WebRtcOpus_CreateDec(OPUS_inst_t_** inst, WebRtc_Word16 samplFreq);
+extern WebRtc_Word16 WebRtcOpus_FreeEnc(OPUS_inst_t_* inst);
+extern WebRtc_Word16 WebRtcOpus_FreeDec(OPUS_inst_t_* inst);
+extern WebRtc_Word16 WebRtcOpus_Encode(OPUS_inst_t_* encInst,
+ WebRtc_Word16* input,
+ WebRtc_Word16* output,
+ WebRtc_Word16 len,
+ WebRtc_Word16 byteLen);
+extern WebRtc_Word16 WebRtcOpus_EncoderInit(OPUS_inst_t_* encInst,
+ WebRtc_Word16 samplFreq,
+ WebRtc_Word16 mode,
+ WebRtc_Word16 vbrFlag);
+extern WebRtc_Word16 WebRtcOpus_Decode(OPUS_inst_t_* decInst);
+extern WebRtc_Word16 WebRtcOpus_DecodeBwe(OPUS_inst_t_* decInst, WebRtc_Word16* input);
+extern WebRtc_Word16 WebRtcOpus_DecodePlc(OPUS_inst_t_* decInst);
+extern WebRtc_Word16 WebRtcOpus_DecoderInit(OPUS_inst_t_* decInst);
+
+ACMOPUS::ACMOPUS(
+ WebRtc_Word16 codecID):
+_encoderInstPtr(NULL),
+_decoderInstPtr(NULL),
+_opusMode(1), // default mode is the hybrid mode
+_flagVBR(0) // default VBR off
+{
+ _codecID = codecID;
+
+ // Current implementation doesn't have DTX. That might change.
+ _hasInternalDTX = false;
+
+ // Default sampling frequency
+ _mySampFreq = 48000;
+
+ // default rate
+ _myRate = 50000;
+
+ return;
+}
+
+ACMOPUS::~ACMOPUS()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcOpus_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcOpus_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMOPUS::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ WebRtc_Word16 noEncodedSamples = 0;
+ WebRtc_Word16 tmpLenByte = 0;
+ *bitStreamLenByte = 0;
+
+ WebRtc_Word16 byteLengthFrame = 0;
+
+ // Derive what byte-length is requested
+ byteLengthFrame = _myRate*_frameLenSmpl/(8*_mySampFreq);
+
+ // Call Encoder
+ *bitStreamLenByte = WebRtcOpus_Encode(_encoderInstPtr, &_inAudio[_inAudioIxRead],
+ (WebRtc_Word16*)bitStream, _frameLenSmpl, byteLengthFrame);
+
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _frameLenSmpl;
+
+ // sanity check
+ if(*bitStreamLenByte < 0)
+ {
+ // error has happened
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalEncode: Encode error for Opus");
+ *bitStreamLenByte = 0;
+ return -1;
+ }
+
+ return *bitStreamLenByte;
+}
+
+
+
+WebRtc_Word16
+ACMOPUS::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMOPUS::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ //set the bit rate and initialize
+ _myRate = codecParams->codecInstant.rate;
+ return SetBitRateSafe( (WebRtc_UWord32)_myRate);
+}
+
+
+WebRtc_Word16
+ACMOPUS::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ if (WebRtcOpus_DecoderInit(_decoderInstPtr) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalInitDecoder: init decoder failed for Opus");
+ return -1;
+ }
+ return 0;
+}
+
+
+WebRtc_Word32
+ACMOPUS::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "CodeDef: Decoder uninitialized for Opus");
+ return -1;
+ }
+
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_G729_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderOpus, codecInst.pltype,
+ _decoderInstPtr, 16000);
+ SET_OPUS_FUNCTIONS((codecDef));
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMOPUS::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMOPUS::InternalCreateEncoder()
+{
+ if (WebRtcOpus_CreateEnc(&_encoderInstPtr, _mySampFreq) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateEncoder: create encoder failed for Opus");
+ return -1;
+ }
+ return 0;
+}
+
+
+void
+ACMOPUS::DestructEncoderSafe()
+{
+ _encoderExist = false;
+ _encoderInitialized = false;
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcOpus_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+}
+
+
+WebRtc_Word16
+ACMOPUS::InternalCreateDecoder()
+{
+ if (WebRtcOpus_CreateDec(&_decoderInstPtr, _mySampFreq) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "InternalCreateDecoder: create decoder failed for Opus");
+ return -1;
+ }
+ return 0;
+}
+
+
+void
+ACMOPUS::DestructDecoderSafe()
+{
+ _decoderExist = false;
+ _decoderInitialized = false;
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcOpus_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+}
+
+
+void
+ACMOPUS::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ WebRtcOpus_FreeEnc((OPUS_inst_t*)ptrInst);
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMOPUS::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec: given payload-type does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+ return netEq->RemoveCodec(kDecoderOpus);
+}
+
+WebRtc_Word16
+ACMOPUS::SetBitRateSafe(
+ const WebRtc_Word32 rate)
+{
+ //allowed rates: {8000, 12000, 14000, 16000, 18000, 20000,
+ // 22000, 24000, 26000, 28000, 30000, 32000};
+ switch(rate)
+ {
+ case 8000:
+ {
+ _myRate = 8000;
+ break;
+ }
+ case 12000:
+ {
+ _myRate = 12000;
+ break;
+ }
+ case 14000:
+ {
+ _myRate = 14000;
+ break;
+ }
+ case 16000:
+ {
+ _myRate = 16000;
+ break;
+ }
+ case 18000:
+ {
+ _myRate = 18000;
+ break;
+ }
+ case 20000:
+ {
+ _myRate = 20000;
+ break;
+ }
+ case 22000:
+ {
+ _myRate = 22000;
+ break;
+ }
+ case 24000:
+ {
+ _myRate = 24000;
+ break;
+ }
+ case 26000:
+ {
+ _myRate = 26000;
+ break;
+ }
+ case 28000:
+ {
+ _myRate = 28000;
+ break;
+ }
+ case 30000:
+ {
+ _myRate = 30000;
+ break;
+ }
+ case 32000:
+ {
+ _myRate = 32000;
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "SetBitRateSafe: Invalid rate Opus");
+ return -1;
+ break;
+ }
+ }
+
+ // Re-init with new rate
+ if (WebRtcOpus_EncoderInit(_encoderInstPtr, _mySampFreq, _opusMode, _flagVBR) >= 0)
+ {
+ _encoderParams.codecInstant.rate = _myRate;
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_opus.h b/src/modules/audio_coding/main/source/acm_opus.h
new file mode 100644
index 0000000..47a8a13
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_opus.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_OPUS_H
+#define ACM_OPUS_H
+
+#include "acm_generic_codec.h"
+
+// forward declaration
+struct OPUS_inst_t_;
+struct OPUS_inst_t_;
+
+namespace webrtc
+{
+
+class ACMOPUS: public ACMGenericCodec
+{
+public:
+ ACMOPUS(WebRtc_Word16 codecID);
+ ~ACMOPUS();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ WebRtc_Word16 SetBitRateSafe(
+ const WebRtc_Word32 rate);
+
+ OPUS_inst_t_* _encoderInstPtr;
+ OPUS_inst_t_* _decoderInstPtr;
+
+ WebRtc_UWord16 _mySampFreq;
+ WebRtc_UWord16 _myRate;
+ WebRtc_Word16 _opusMode;
+ WebRtc_Word16 _flagVBR;
+
+};
+
+} // namespace webrtc
+
+#endif // ACM_OPUS_H
+
diff --git a/src/modules/audio_coding/main/source/acm_pcm16b.cc b/src/modules/audio_coding/main/source/acm_pcm16b.cc
new file mode 100644
index 0000000..1887f5b
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_pcm16b.cc
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_codec_database.h"
+#include "acm_common_defs.h"
+#include "acm_neteq.h"
+#include "acm_pcm16b.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+#ifdef WEBRTC_CODEC_PCM16
+ #include "pcm16b.h"
+#endif
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_PCM16
+
+ACMPCM16B::ACMPCM16B(
+ WebRtc_Word16 /* codecID */)
+{
+ return;
+}
+
+
+ACMPCM16B::~ACMPCM16B()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+
+WebRtc_Word32
+ACMPCM16B::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+
+ACMGenericCodec*
+ACMPCM16B::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::InternalCreateEncoder()
+{
+ return -1;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::InternalCreateDecoder()
+{
+ return -1;
+}
+
+
+void
+ACMPCM16B::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+
+void
+ACMPCM16B::DestructEncoderSafe()
+{
+ return;
+}
+
+void
+ACMPCM16B::DestructDecoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+
+
+#else //===================== Actual Implementation =======================
+
+
+ACMPCM16B::ACMPCM16B(
+ WebRtc_Word16 codecID)
+{
+ _codecID = codecID;
+ _samplingFreqHz = ACMCodecDB::CodecFreq(_codecID);
+}
+
+
+ACMPCM16B::~ACMPCM16B()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ *bitStreamLenByte = WebRtcPcm16b_Encode(&_inAudio[_inAudioIxRead],
+ _frameLenSmpl*_noChannels,
+ bitStream);
+ // increment the read index to tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _frameLenSmpl*_noChannels;
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ // This codec does not need initialization,
+ // PCM has no instance
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ // This codec does not need initialization,
+ // PCM has no instance
+ return 0;
+}
+
+
+WebRtc_Word32
+ACMPCM16B::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_PCMU_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ switch(_samplingFreqHz)
+ {
+ case 8000:
+ {
+ SET_CODEC_PAR((codecDef), kDecoderPCM16B, codecInst.pltype,
+ NULL, 8000);
+ SET_PCM16B_FUNCTIONS((codecDef));
+ break;
+ }
+ case 16000:
+ {
+ SET_CODEC_PAR((codecDef), kDecoderPCM16Bwb, codecInst.pltype,
+ NULL, 16000);
+ SET_PCM16B_WB_FUNCTIONS((codecDef));
+ break;
+ }
+ case 32000:
+ {
+ SET_CODEC_PAR((codecDef), kDecoderPCM16Bswb32kHz,
+ codecInst.pltype, NULL, 32000);
+ SET_PCM16B_SWB32_FUNCTIONS((codecDef));
+ break;
+ }
+ default:
+ {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMPCM16B::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::InternalCreateEncoder()
+{
+ // PCM has no instance
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::InternalCreateDecoder()
+{
+ // PCM has no instance
+ return 0;
+}
+
+
+void
+ACMPCM16B::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ // PCM has no instance
+ return;
+}
+
+
+void
+ACMPCM16B::DestructEncoderSafe()
+{
+ // PCM has no instance
+ _encoderExist = false;
+ _encoderInitialized = false;
+ return;
+}
+
+void
+ACMPCM16B::DestructDecoderSafe()
+{
+ // PCM has no instance
+ _decoderExist = false;
+ _decoderInitialized = false;
+ return;
+}
+
+
+WebRtc_Word16
+ACMPCM16B::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+
+ switch(_samplingFreqHz)
+ {
+ case 8000:
+ {
+ return netEq->RemoveCodec(kDecoderPCM16B);
+ break;
+ }
+ case 16000:
+ {
+ return netEq->RemoveCodec(kDecoderPCM16Bwb);
+ break;
+ }
+ case 32000:
+ {
+ return netEq->RemoveCodec(kDecoderPCM16Bswb32kHz);
+ break;
+ }
+ default:
+ {
+ return -1;
+ }
+ }
+}
+
+#endif
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_pcm16b.h b/src/modules/audio_coding/main/source/acm_pcm16b.h
new file mode 100644
index 0000000..7f4b691
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_pcm16b.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_PCM16B_H
+#define ACM_PCM16B_H
+
+#include "acm_generic_codec.h"
+
+namespace webrtc
+{
+
+class ACMPCM16B : public ACMGenericCodec
+{
+public:
+ ACMPCM16B(WebRtc_Word16 codecID);
+ ~ACMPCM16B();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word32 _samplingFreqHz;
+};
+
+} // namespace webrtc
+
+#endif //ACM_PCM16B_H
+
diff --git a/src/modules/audio_coding/main/source/acm_pcma.cc b/src/modules/audio_coding/main/source/acm_pcma.cc
new file mode 100644
index 0000000..c86bd1c
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_pcma.cc
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_common_defs.h"
+#include "acm_neteq.h"
+#include "acm_pcma.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+// Codec interface
+#include "g711_interface.h"
+
+namespace webrtc
+{
+
+ACMPCMA::ACMPCMA(WebRtc_Word16 codecID)
+{
+ _codecID = codecID;
+}
+
+
+ACMPCMA::~ACMPCMA()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMPCMA::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ *bitStreamLenByte = WebRtcG711_EncodeA(NULL, &_inAudio[_inAudioIxRead],
+ _frameLenSmpl*_noChannels, (WebRtc_Word16*)bitStream);
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _frameLenSmpl*_noChannels;
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMPCMA::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMPCMA::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ // This codec does not need initialization,
+ // PCM has no instance
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMPCMA::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ // This codec does not need initialization,
+ // PCM has no instance
+ return 0;
+}
+
+
+WebRtc_Word32 ACMPCMA::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_PCMA_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderPCMa, codecInst.pltype, NULL, 8000);
+ SET_PCMA_FUNCTIONS((codecDef));
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMPCMA::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMPCMA::InternalCreateEncoder()
+{
+ // PCM has no instance
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMPCMA::InternalCreateDecoder()
+{
+ // PCM has no instance
+ return 0;
+}
+
+
+void
+ACMPCMA::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ // PCM has no instance
+ return;
+}
+
+
+void
+ACMPCMA::DestructEncoderSafe()
+{
+ // PCM has no instance
+ return;
+}
+
+
+void
+ACMPCMA::DestructDecoderSafe()
+{
+ // PCM has no instance
+ _decoderInitialized = false;
+ _decoderExist = false;
+ return;
+}
+
+
+WebRtc_Word16
+ACMPCMA::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+
+ return netEq->RemoveCodec(kDecoderPCMa);
+}
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_pcma.h b/src/modules/audio_coding/main/source/acm_pcma.h
new file mode 100644
index 0000000..874f471
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_pcma.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_PCMA_H
+#define ACM_PCMA_H
+
+#include "acm_generic_codec.h"
+
+namespace webrtc
+{
+
+class ACMPCMA : public ACMGenericCodec
+{
+public:
+ ACMPCMA(WebRtc_Word16 codecID);
+ ~ACMPCMA();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+};
+
+} // namespace webrtc
+
+#endif //ACM_PCMA_H
+
diff --git a/src/modules/audio_coding/main/source/acm_pcmu.cc b/src/modules/audio_coding/main/source/acm_pcmu.cc
new file mode 100644
index 0000000..945eaa0
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_pcmu.cc
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_common_defs.h"
+#include "acm_neteq.h"
+#include "acm_pcmu.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+// Codec interface
+#include "g711_interface.h"
+
+namespace webrtc
+{
+
+ACMPCMU::ACMPCMU(WebRtc_Word16 codecID)
+{
+ _codecID = codecID;
+}
+
+
+ACMPCMU::~ACMPCMU()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMPCMU::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ *bitStreamLenByte = WebRtcG711_EncodeU(NULL, &_inAudio[_inAudioIxRead],
+ _frameLenSmpl*_noChannels, (WebRtc_Word16*)bitStream);
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _frameLenSmpl*_noChannels;
+ return *bitStreamLenByte;
+}
+
+
+WebRtc_Word16
+ACMPCMU::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMPCMU::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ // This codec does not need initialization,
+ // PCM has no instance
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMPCMU::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ // This codec does not need initialization,
+ // PCM has no instance
+ return 0;
+}
+
+
+WebRtc_Word32
+ACMPCMU::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_PCMU_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderPCMu, codecInst.pltype, NULL, 8000);
+ SET_PCMU_FUNCTIONS((codecDef));
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMPCMU::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMPCMU::InternalCreateEncoder()
+{
+ // PCM has no instance
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMPCMU::InternalCreateDecoder()
+{
+ // PCM has no instance
+ return 0;
+}
+
+
+void
+ACMPCMU::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ // PCM has no instance
+ return;
+}
+
+
+void
+ACMPCMU::DestructEncoderSafe()
+{
+ // PCM has no instance
+ _encoderExist = false;
+ _encoderInitialized = false;
+ return;
+}
+
+void ACMPCMU::DestructDecoderSafe()
+{
+ // PCM has no instance
+ _decoderInitialized = false;
+ _decoderExist = false;
+ return;
+}
+
+
+WebRtc_Word16
+ACMPCMU::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+
+ return netEq->RemoveCodec(kDecoderPCMu);
+}
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_pcmu.h b/src/modules/audio_coding/main/source/acm_pcmu.h
new file mode 100644
index 0000000..a4fbbad
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_pcmu.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_PCMU_H
+#define ACM_PCMU_H
+
+#include "acm_generic_codec.h"
+
+namespace webrtc
+{
+
+class ACMPCMU : public ACMGenericCodec
+{
+public:
+ ACMPCMU(WebRtc_Word16 codecID);
+ ~ACMPCMU();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+};
+
+} // namespace webrtc
+
+#endif //ACM_PCMU_H
+
diff --git a/src/modules/audio_coding/main/source/acm_red.cc b/src/modules/audio_coding/main/source/acm_red.cc
new file mode 100644
index 0000000..edc8c76
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_red.cc
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_red.h"
+#include "acm_neteq.h"
+#include "acm_common_defs.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+namespace webrtc
+{
+
+ACMRED::ACMRED(WebRtc_Word16 codecID)
+{
+ _codecID = codecID;
+}
+
+
+ACMRED::~ACMRED()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMRED::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ // RED is never used as an encoder
+ // RED has no instance
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMRED::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMRED::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ // This codec does not need initialization,
+ // RED has no instance
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMRED::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ // This codec does not need initialization,
+ // RED has no instance
+ return 0;
+}
+
+
+WebRtc_Word32
+ACMRED::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ // Todo:
+ // log error
+ return -1;
+ }
+
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_PCMU_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+ SET_CODEC_PAR((codecDef), kDecoderRED, codecInst.pltype, NULL, 8000);
+ SET_RED_FUNCTIONS((codecDef));
+ return 0;
+}
+
+
+ACMGenericCodec*
+ACMRED::CreateInstance(void)
+{
+ return NULL;
+}
+
+
+WebRtc_Word16
+ACMRED::InternalCreateEncoder()
+{
+ // RED has no instance
+ return 0;
+}
+
+
+WebRtc_Word16
+ACMRED::InternalCreateDecoder()
+{
+ // RED has no instance
+ return 0;
+}
+
+
+void
+ACMRED::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ // RED has no instance
+ return;
+}
+
+
+void
+ACMRED::DestructEncoderSafe()
+{
+ // RED has no instance
+ return;
+}
+
+void ACMRED::DestructDecoderSafe()
+{
+ // RED has no instance
+ return;
+}
+
+
+WebRtc_Word16
+ACMRED::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+
+ return netEq->RemoveCodec(kDecoderRED);
+}
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_red.h b/src/modules/audio_coding/main/source/acm_red.h
new file mode 100644
index 0000000..5900ec6
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_red.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_RED_H
+#define ACM_RED_H
+
+#include "acm_generic_codec.h"
+
+namespace webrtc
+{
+
+class ACMRED : public ACMGenericCodec
+{
+public:
+ ACMRED(WebRtc_Word16 codecID);
+ ~ACMRED();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+};
+
+} // namespace webrtc
+
+#endif //ACM_RED_H
+
diff --git a/src/modules/audio_coding/main/source/acm_resampler.cc b/src/modules/audio_coding/main/source/acm_resampler.cc
new file mode 100644
index 0000000..7389c14
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_resampler.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <string.h>
+
+#include "acm_resampler.h"
+#include "critical_section_wrapper.h"
+#include "resampler.h"
+#include "signal_processing_library.h"
+#include "trace.h"
+
+namespace webrtc
+{
+
+ACMResampler::ACMResampler():
+
+_resamplerCritSect(*CriticalSectionWrapper::CreateCriticalSection())
+{
+}
+
+ACMResampler::~ACMResampler()
+{
+
+ delete &_resamplerCritSect;
+}
+
+
+WebRtc_Word16
+ACMResampler::Resample10Msec(
+ const WebRtc_Word16* inAudio,
+ WebRtc_Word32 inFreqHz,
+ WebRtc_Word16* outAudio,
+ WebRtc_Word32 outFreqHz,
+ WebRtc_UWord8 numAudioChannels)
+{
+
+ CriticalSectionScoped cs(_resamplerCritSect);
+
+ if(inFreqHz == outFreqHz)
+ {
+ memcpy(outAudio, inAudio, (inFreqHz*numAudioChannels / 100) * sizeof(WebRtc_Word16));
+ return (WebRtc_Word16)(inFreqHz / 100);
+ }
+
+ int maxLen = 480 * numAudioChannels; //max number of samples for 10ms at 48kHz
+ int lengthIn = (WebRtc_Word16)(inFreqHz / 100) * numAudioChannels;
+ int outLen;
+
+ WebRtc_Word32 ret;
+ ResamplerType type;
+ type = (numAudioChannels == 1)? kResamplerSynchronous:kResamplerSynchronousStereo;
+
+ ret = _resampler.ResetIfNeeded(inFreqHz,outFreqHz,type);
+ if (ret < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Error in reset of resampler");
+ return -1;
+ }
+
+ ret = _resampler.Push(inAudio, lengthIn, outAudio, maxLen, outLen);
+ if (ret < 0 )
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Error in resampler: resampler.Push");
+ return -1;
+ }
+
+ WebRtc_Word16 outAudioLenSmpl = (WebRtc_Word16) outLen / numAudioChannels;
+
+ return outAudioLenSmpl;
+
+}
+
+void
+ACMResampler::SetUniqueId(
+ WebRtc_Word32 id)
+{
+ CriticalSectionScoped lock(_resamplerCritSect);
+ _id = id;
+}
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/acm_resampler.h b/src/modules/audio_coding/main/source/acm_resampler.h
new file mode 100644
index 0000000..f4d5c53
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_resampler.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_RESAMPLER_H
+#define ACM_RESAMPLER_H
+
+#include "resampler.h"
+#include "typedefs.h"
+
+namespace webrtc {
+
+class CriticalSectionWrapper;
+
+class ACMResampler
+{
+public:
+ ACMResampler();
+ ~ACMResampler();
+
+ WebRtc_Word16 Resample10Msec(
+ const WebRtc_Word16* inAudio,
+ const WebRtc_Word32 inFreqHz,
+ WebRtc_Word16* outAudio,
+ const WebRtc_Word32 outFreqHz,
+ WebRtc_UWord8 numAudioChannels);
+
+ void SetUniqueId(
+ WebRtc_Word32 id);
+
+private:
+
+ //Use the Resampler class
+ Resampler _resampler;
+ WebRtc_Word32 _id;
+ CriticalSectionWrapper& _resamplerCritSect;
+};
+
+} // namespace webrtc
+
+#endif //ACM_RESAMPLER_H
diff --git a/src/modules/audio_coding/main/source/acm_speex.cc b/src/modules/audio_coding/main/source/acm_speex.cc
new file mode 100644
index 0000000..f197316
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_speex.cc
@@ -0,0 +1,661 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_codec_database.h"
+#include "acm_common_defs.h"
+#include "acm_neteq.h"
+#include "acm_speex.h"
+#include "trace.h"
+#include "webrtc_neteq.h"
+#include "webrtc_neteq_help_macros.h"
+
+#ifdef WEBRTC_CODEC_SPEEX
+ // NOTE! Speex is not included in the open-source package. Modify this file or your codec
+ // API to match the function call and name of used Speex API file.
+ // #include "speex_interface.h"
+#endif
+
+
+namespace webrtc
+{
+
+#ifndef WEBRTC_CODEC_SPEEX
+ACMSPEEX::ACMSPEEX(WebRtc_Word16 /* codecID*/)
+{
+ return;
+}
+
+ACMSPEEX::~ACMSPEEX()
+{
+ return;
+}
+
+WebRtc_Word16
+ACMSPEEX::InternalEncode(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16* /* bitStreamLenByte */)
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMSPEEX::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMSPEEX::EnableDTX()
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMSPEEX::DisableDTX()
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMSPEEX::InternalInitEncoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMSPEEX::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ return -1;
+}
+
+WebRtc_Word32
+ACMSPEEX::CodecDef(
+ WebRtcNetEQ_CodecDef& /* codecDef */,
+ const CodecInst& /* codecInst */)
+{
+ return -1;
+}
+
+ACMGenericCodec*
+ACMSPEEX::CreateInstance(void)
+{
+ return NULL;
+}
+
+WebRtc_Word16
+ACMSPEEX::InternalCreateEncoder()
+{
+ return -1;
+}
+
+void
+ACMSPEEX::DestructEncoderSafe()
+{
+ return;
+}
+
+
+WebRtc_Word16
+ACMSPEEX::InternalCreateDecoder()
+{
+ return -1;
+}
+
+void
+ACMSPEEX::DestructDecoderSafe()
+{
+ return;
+}
+
+WebRtc_Word16
+ACMSPEEX::SetBitRateSafe(
+ const WebRtc_Word32 /* rate */)
+{
+ return -1;
+}
+
+void
+ACMSPEEX::InternalDestructEncoderInst(
+ void* /* ptrInst */)
+{
+ return;
+}
+
+WebRtc_Word16
+ACMSPEEX::UnregisterFromNetEqSafe(
+ ACMNetEQ* /* netEq */,
+ WebRtc_Word16 /* payloadType */)
+{
+ return -1;
+}
+
+#ifdef UNUSEDSPEEX
+WebRtc_Word16
+ACMSPEEX::EnableVBR()
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMSPEEX::DisableVBR()
+{
+ return -1;
+}
+
+WebRtc_Word16
+ACMSPEEX::SetComplMode(
+ WebRtc_Word16 mode)
+{
+ return -1;
+}
+#endif
+
+#else //===================== Actual Implementation =======================
+
+// Remove when integrating a real Speex wrapper
+extern WebRtc_Word16 WebRtcSpeex_CreateEnc(SPEEX_encinst_t_** inst,
+ WebRtc_Word16 samplFreq);
+extern WebRtc_Word16 WebRtcSpeex_CreateDec(SPEEX_decinst_t_** inst,
+ WebRtc_Word16 samplFreq,
+ WebRtc_Word16 mode);
+extern WebRtc_Word16 WebRtcSpeex_FreeEnc(SPEEX_encinst_t_* inst);
+extern WebRtc_Word16 WebRtcSpeex_FreeDec(SPEEX_decinst_t_* inst);
+extern WebRtc_Word16 WebRtcSpeex_Encode(SPEEX_encinst_t_* encInst,
+ WebRtc_Word16* input,
+ WebRtc_Word16 rate);
+extern WebRtc_Word16 WebRtcSpeex_EncoderInit(SPEEX_encinst_t_* encInst,
+ WebRtc_Word16 samplFreq,
+ WebRtc_Word16 mode,
+ WebRtc_Word16 vbrFlag);
+extern WebRtc_Word16 WebRtcSpeex_GetBitstream(SPEEX_encinst_t_* encInst,
+ WebRtc_Word16* output);
+extern WebRtc_Word16 WebRtcSpeex_Decode(SPEEX_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcSpeex_DecodePlc(SPEEX_decinst_t_* decInst);
+extern WebRtc_Word16 WebRtcSpeex_DecoderInit(SPEEX_decinst_t_* decInst);
+
+ACMSPEEX::ACMSPEEX(WebRtc_Word16 codecID):
+_encoderInstPtr(NULL),
+_decoderInstPtr(NULL)
+{
+ _codecID = codecID;
+
+ // Set sampling frequency, frame size and rate Speex
+ if(_codecID == ACMCodecDB::speex8)
+ {
+ _samplingFrequency = 8000;
+ _samplesIn20MsAudio = 160;
+ _encodingRate = 11000;
+ }
+ else if(_codecID == ACMCodecDB::speex16)
+ {
+ _samplingFrequency = 16000;
+ _samplesIn20MsAudio = 320;
+ _encodingRate = 22000;
+ }
+ else
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Wrong codec id for Speex.");
+
+ _samplingFrequency = -1;
+ _samplesIn20MsAudio = -1;
+ _encodingRate = -1;
+ }
+
+ _hasInternalDTX = true;
+ _dtxEnabled = false;
+ _vbrEnabled = false;
+ _complMode = 3; // default complexity value
+
+ return;
+}
+
+ACMSPEEX::~ACMSPEEX()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcSpeex_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcSpeex_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ return;
+}
+
+WebRtc_Word16
+ACMSPEEX::InternalEncode(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16* bitStreamLenByte)
+{
+ WebRtc_Word16 status;
+ WebRtc_Word16 numEncodedSamples = 0;
+ WebRtc_Word16 n = 0;
+
+ while( numEncodedSamples < _frameLenSmpl)
+ {
+ status = WebRtcSpeex_Encode(_encoderInstPtr, &_inAudio[_inAudioIxRead],
+ _encodingRate);
+
+ // increment the read index this tell the caller that how far
+ // we have gone forward in reading the audio buffer
+ _inAudioIxRead += _samplesIn20MsAudio;
+ numEncodedSamples += _samplesIn20MsAudio;
+
+ if(status < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error in Speex encoder");
+ return status;
+ }
+
+ // Update VAD, if internal DTX is used
+ if(_hasInternalDTX && _dtxEnabled)
+ {
+ _vadLabel[n++] = status;
+ _vadLabel[n++] = status;
+ }
+
+ if(status == 0)
+ {
+ // This frame is detected as inactive. We need send whatever
+ // encoded so far.
+ *bitStreamLenByte = WebRtcSpeex_GetBitstream(_encoderInstPtr,
+ (WebRtc_Word16*)bitStream);
+
+ return *bitStreamLenByte;
+ }
+ }
+
+ *bitStreamLenByte = WebRtcSpeex_GetBitstream(_encoderInstPtr,
+ (WebRtc_Word16*)bitStream);
+ return *bitStreamLenByte;
+}
+
+WebRtc_Word16
+ACMSPEEX::DecodeSafe(
+ WebRtc_UWord8* /* bitStream */,
+ WebRtc_Word16 /* bitStreamLenByte */,
+ WebRtc_Word16* /* audio */,
+ WebRtc_Word16* /* audioSamples */,
+ WebRtc_Word8* /* speechType */)
+{
+ return 0;
+}
+
+WebRtc_Word16
+ACMSPEEX::EnableDTX()
+{
+ if(_dtxEnabled)
+ {
+ return 0;
+ }
+ else if(_encoderExist) // check if encoder exist
+ {
+ // enable DTX
+ if(WebRtcSpeex_EncoderInit(_encoderInstPtr, (_vbrEnabled ? 1:0), _complMode, 1) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot enable DTX for Speex");
+ return -1;
+ }
+ _dtxEnabled = true;
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+
+ return 0;
+}
+
+WebRtc_Word16
+ACMSPEEX::DisableDTX()
+{
+ if(!_dtxEnabled)
+ {
+ return 0;
+ }
+ else if(_encoderExist) // check if encoder exist
+ {
+ // disable DTX
+ if(WebRtcSpeex_EncoderInit(_encoderInstPtr, (_vbrEnabled ? 1:0), _complMode, 0) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot disable DTX for Speex");
+ return -1;
+ }
+ _dtxEnabled = false;
+ return 0;
+ }
+ else
+ {
+ // encoder doesn't exists, therefore disabling is harmless
+ return 0;
+ }
+
+ return 0;
+}
+
+WebRtc_Word16
+ACMSPEEX::InternalInitEncoder(
+ WebRtcACMCodecParams* codecParams)
+{
+ // sanity check
+ if (_encoderInstPtr == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot initialize Speex encoder, instance does not exist");
+ return -1;
+ }
+
+ WebRtc_Word16 status = SetBitRateSafe((codecParams->codecInstant).rate);
+ status += (WebRtcSpeex_EncoderInit(_encoderInstPtr, _vbrEnabled, _complMode, ((codecParams->enableDTX)? 1:0)) < 0)? -1:0;
+
+ if (status >= 0) {
+ return 0;
+ } else {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error in initialization of Speex encoder");
+ return -1;
+ }
+}
+
+WebRtc_Word16
+ACMSPEEX::InternalInitDecoder(
+ WebRtcACMCodecParams* /* codecParams */)
+{
+ WebRtc_Word16 status;
+
+ // sanity check
+ if (_decoderInstPtr == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot initialize Speex decoder, instance does not exist");
+ return -1;
+ }
+ status = ((WebRtcSpeex_DecoderInit(_decoderInstPtr) < 0)? -1:0);
+
+ if (status >= 0) {
+ return 0;
+ } else {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error in initialization of Speex decoder");
+ return -1;
+ }
+}
+
+WebRtc_Word32
+ACMSPEEX::CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst)
+{
+ if (!_decoderInitialized)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error, Speex decoder is not initialized");
+ return -1;
+ }
+
+ // Fill up the structure by calling
+ // "SET_CODEC_PAR" & "SET_SPEEX_FUNCTION."
+ // Then call NetEQ to add the codec to it's
+ // database.
+
+ switch(_samplingFrequency)
+ {
+ case 8000:
+ {
+ SET_CODEC_PAR((codecDef), kDecoderSPEEX_8, codecInst.pltype,
+ _decoderInstPtr, 8000);
+ break;
+ }
+ case 16000:
+ {
+ SET_CODEC_PAR((codecDef), kDecoderSPEEX_16, codecInst.pltype,
+ _decoderInstPtr, 16000);
+ break;
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Unsupported sampling frequency for Speex");
+
+ return -1;
+ break;
+ }
+ }
+
+ SET_SPEEX_FUNCTIONS((codecDef));
+ return 0;
+}
+
+ACMGenericCodec*
+ACMSPEEX::CreateInstance(void)
+{
+ return NULL;
+}
+
+WebRtc_Word16
+ACMSPEEX::InternalCreateEncoder()
+{
+ return WebRtcSpeex_CreateEnc(&_encoderInstPtr, _samplingFrequency);
+}
+
+void
+ACMSPEEX::DestructEncoderSafe()
+{
+ if(_encoderInstPtr != NULL)
+ {
+ WebRtcSpeex_FreeEnc(_encoderInstPtr);
+ _encoderInstPtr = NULL;
+ }
+ // there is no encoder set the following
+ _encoderExist = false;
+ _encoderInitialized = false;
+ _encodingRate = 0;
+}
+
+
+WebRtc_Word16
+ACMSPEEX::InternalCreateDecoder()
+{
+ return WebRtcSpeex_CreateDec(&_decoderInstPtr, _samplingFrequency, 1);
+}
+
+void
+ACMSPEEX::DestructDecoderSafe()
+{
+ if(_decoderInstPtr != NULL)
+ {
+ WebRtcSpeex_FreeDec(_decoderInstPtr);
+ _decoderInstPtr = NULL;
+ }
+ // there is no encoder instance set the followings
+ _decoderExist = false;
+ _decoderInitialized = false;
+}
+
+WebRtc_Word16
+ACMSPEEX::SetBitRateSafe(
+ const WebRtc_Word32 rate)
+{
+ // Check if changed rate
+ if (rate == _encodingRate) {
+ return 0;
+ } else if (rate > 2000) {
+ _encodingRate = rate;
+ _encoderParams.codecInstant.rate = rate;
+ } else {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Unsupported encoding rate for Speex");
+
+ return -1;
+ }
+
+ return 0;
+}
+
+
+void
+ACMSPEEX::InternalDestructEncoderInst(
+ void* ptrInst)
+{
+ if(ptrInst != NULL)
+ {
+ WebRtcSpeex_FreeEnc((SPEEX_encinst_t_*)ptrInst);
+ }
+ return;
+}
+
+
+WebRtc_Word16
+ACMSPEEX::UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType)
+{
+ if(payloadType != _decoderParams.codecInstant.pltype)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot unregister codec %s given payload-type %d does not match \
+the stored payload type",
+ _decoderParams.codecInstant.plname,
+ payloadType,
+ _decoderParams.codecInstant.pltype);
+ return -1;
+ }
+
+
+ switch(_samplingFrequency)
+ {
+ case 8000:
+ {
+ return netEq->RemoveCodec(kDecoderSPEEX_8);
+ }
+ case 16000:
+ {
+ return netEq->RemoveCodec(kDecoderSPEEX_16);
+ }
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Could not unregister Speex from NetEQ. Sampling frequency doesn't match");
+ return -1;
+ }
+ }
+}
+
+
+#ifdef UNUSEDSPEEX
+
+// This API is currently not in use. If requested to be able to enable/disable VBR
+// an ACM API need to be added.
+WebRtc_Word16
+ACMSPEEX::EnableVBR()
+{
+ if(_vbrEnabled)
+ {
+ return 0;
+ }
+ else if(_encoderExist) // check if encoder exist
+ {
+ // enable Variable Bit Rate (VBR)
+ if(WebRtcSpeex_EncoderInit(_encoderInstPtr, 1, _complMode, (_dtxEnabled? 1:0)) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot enable VBR mode for Speex");
+
+ return -1;
+ }
+ _vbrEnabled = true;
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+}
+
+
+// This API is currently not in use. If requested to be able to enable/disable VBR
+// an ACM API need to be added.
+WebRtc_Word16
+ACMSPEEX::DisableVBR()
+{
+ if(!_vbrEnabled)
+ {
+ return 0;
+ }
+ else if(_encoderExist) // check if encoder exist
+ {
+ // disable DTX
+ if(WebRtcSpeex_EncoderInit(_encoderInstPtr, 0, _complMode, (_dtxEnabled? 1:0)) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Cannot disable DTX for Speex");
+
+ return -1;
+ }
+ _vbrEnabled = false;
+ return 0;
+ }
+ else
+ {
+ // encoder doesn't exists, therefore disabling is harmless
+ return 0;
+ }
+}
+
+// This API is currently not in use. If requested to be able to set complexity
+// an ACM API need to be added.
+WebRtc_Word16
+ACMSPEEX::SetComplMode(
+ WebRtc_Word16 mode)
+{
+ // Check if new mode
+ if(mode == _complMode)
+ {
+ return 0;
+ }
+ else if(_encoderExist) // check if encoder exist
+ {
+ // Set new mode
+ if(WebRtcSpeex_EncoderInit(_encoderInstPtr, 0, mode, (_dtxEnabled? 1:0)) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+ "Error in complexity mode for Speex");
+ return -1;
+ }
+ _complMode = mode;
+ return 0;
+ }
+ else
+ {
+ // encoder doesn't exists, therefore disabling is harmless
+ return 0;
+ }
+}
+
+#endif
+
+#endif
+
+} // namespace webrtc
+
diff --git a/src/modules/audio_coding/main/source/acm_speex.h b/src/modules/audio_coding/main/source/acm_speex.h
new file mode 100644
index 0000000..e03c2cd
--- /dev/null
+++ b/src/modules/audio_coding/main/source/acm_speex.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_SPEEX_H
+#define ACM_SPEEX_H
+
+#include "acm_generic_codec.h"
+
+namespace webrtc
+{
+
+// forward declaration
+struct SPEEX_encinst_t_;
+struct SPEEX_decinst_t_;
+
+class ACMSPEEX : public ACMGenericCodec
+{
+public:
+ ACMSPEEX(WebRtc_Word16 codecID);
+ ~ACMSPEEX();
+ // for FEC
+ ACMGenericCodec* CreateInstance(void);
+
+ WebRtc_Word16 InternalEncode(
+ WebRtc_UWord8* bitstream,
+ WebRtc_Word16* bitStreamLenByte);
+
+ WebRtc_Word16 InternalInitEncoder(
+ WebRtcACMCodecParams *codecParams);
+
+ WebRtc_Word16 InternalInitDecoder(
+ WebRtcACMCodecParams *codecParams);
+
+protected:
+ WebRtc_Word16 DecodeSafe(
+ WebRtc_UWord8* bitStream,
+ WebRtc_Word16 bitStreamLenByte,
+ WebRtc_Word16* audio,
+ WebRtc_Word16* audioSamples,
+ WebRtc_Word8* speechType);
+
+ WebRtc_Word32 CodecDef(
+ WebRtcNetEQ_CodecDef& codecDef,
+ const CodecInst& codecInst);
+
+ void DestructEncoderSafe();
+
+ void DestructDecoderSafe();
+
+ WebRtc_Word16 InternalCreateEncoder();
+
+ WebRtc_Word16 InternalCreateDecoder();
+
+ void InternalDestructEncoderInst(
+ void* ptrInst);
+
+ WebRtc_Word16 SetBitRateSafe(
+ const WebRtc_Word32 rate);
+
+ WebRtc_Word16 EnableDTX();
+
+ WebRtc_Word16 DisableDTX();
+
+#ifdef UNUSEDSPEEX
+ WebRtc_Word16 EnableVBR();
+
+ WebRtc_Word16 DisableVBR();
+
+ WebRtc_Word16 SetComplMode(
+ WebRtc_Word16 mode);
+#endif
+
+ WebRtc_Word16 UnregisterFromNetEqSafe(
+ ACMNetEQ* netEq,
+ WebRtc_Word16 payloadType);
+
+ SPEEX_encinst_t_* _encoderInstPtr;
+ SPEEX_decinst_t_* _decoderInstPtr;
+ WebRtc_Word16 _complMode;
+ bool _vbrEnabled;
+ WebRtc_Word32 _encodingRate;
+ WebRtc_Word16 _samplingFrequency;
+ WebRtc_UWord16 _samplesIn20MsAudio;
+};
+
+} // namespace webrtc
+
+#endif // ACM_SPEEX_H
diff --git a/src/modules/audio_coding/main/source/audio_coding_module.cc b/src/modules/audio_coding/main/source/audio_coding_module.cc
new file mode 100644
index 0000000..0ecad8d
--- /dev/null
+++ b/src/modules/audio_coding/main/source/audio_coding_module.cc
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// 'conversion' conversion from 'type1' to 'type2', possible loss of data
+#pragma warning(disable: 4267)
+
+#include "acm_dtmf_detection.h"
+#include "audio_coding_module.h"
+#include "audio_coding_module_impl.h"
+#include "trace.h"
+
+namespace webrtc
+{
+
+// Create module
+AudioCodingModule*
+AudioCodingModule::Create(
+ const WebRtc_Word32 id)
+{
+ return new AudioCodingModuleImpl(id);
+}
+
+// Destroy module
+void
+AudioCodingModule::Destroy(
+ AudioCodingModule* module)
+{
+ delete static_cast<AudioCodingModuleImpl*> (module);
+}
+
+// Returns version of the module and its components.
+WebRtc_Word32
+AudioCodingModule::GetVersion(
+ WebRtc_Word8* version,
+ WebRtc_UWord32& remainingBufferInBytes,
+ WebRtc_UWord32& position)
+{
+ WebRtc_Word32 len = position;
+ strncpy(&version[position], "AudioCodingModule 1.3.0\n", remainingBufferInBytes);
+ position = (WebRtc_UWord32)strlen(version);
+ remainingBufferInBytes -= (position - len);
+
+ if(ACMNetEQ::GetVersion(version,
+ remainingBufferInBytes, position) < 0)
+ {
+ return -1;
+ }
+
+ ACMCodecDB::initACMCodecDB();
+ if(ACMCodecDB::CodecsVersion(version,
+ remainingBufferInBytes, position) < 0)
+ {
+ return -1;
+ }
+ return 0;
+}
+
+// Get number of supported codecs
+WebRtc_UWord8 AudioCodingModule::NumberOfCodecs()
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, -1,
+ "NumberOfCodecs()");
+ ACMCodecDB::initACMCodecDB();
+ return (WebRtc_UWord8)ACMCodecDB::NoOfCodecs();
+}
+
+// Get supported codec param with id
+WebRtc_Word32
+AudioCodingModule::Codec(
+ const WebRtc_UWord8 listId,
+ CodecInst& codec)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, -1,
+ "Codec(const WebRtc_UWord8 listId, CodecInst& codec)");
+ ACMCodecDB::initACMCodecDB();
+
+ // Get the codec settings for the codec with the given list ID
+ return ACMCodecDB::Codec(listId, &codec);
+}
+
+// Get supported codec Param with name
+WebRtc_Word32
+AudioCodingModule::Codec(
+ const WebRtc_Word8* payloadName,
+ CodecInst& codec,
+ const WebRtc_Word32 samplingFreqHz)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, -1,
+ "Codec(const WebRtc_Word8* payloadName, CodecInst& codec)");
+ ACMCodecDB::initACMCodecDB();
+
+ // Search through codec list for a matching name
+ for(WebRtc_Word16 codecCntr = 0; codecCntr < ACMCodecDB::NoOfCodecs();
+ codecCntr++)
+ {
+ // Store codec settings for codec number "codeCntr" in the output struct
+ ACMCodecDB::Codec(codecCntr, &codec);
+
+ if(!STR_CASE_CMP(codec.plname, payloadName))
+ {
+ // If samplingFreqHz is set (!= -1), check if frequency matches
+ if((samplingFreqHz == codec.plfreq) || (samplingFreqHz == -1))
+ {
+ // We found a match, return OK
+ return 0;
+ }
+ }
+ }
+
+ // if we are here we couldn't find anything
+ // set the params to unacceptable values
+ codec.plname[0] = '\0';
+ codec.pltype = -1;
+ codec.pacsize = 0;
+ codec.rate = 0;
+ codec.plfreq = 0;
+ return -1;
+}
+
+// Get supported codec Index with name, and frequency if needed
+WebRtc_Word32
+AudioCodingModule::Codec(
+ const WebRtc_Word8* payloadName,
+ const WebRtc_Word32 samplingFreqHz)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, -1,
+ "Codec(const WebRtc_Word8* payloadName)");
+ ACMCodecDB::initACMCodecDB();
+ CodecInst codec;
+
+ // Search through codec list for a matching name
+ for(WebRtc_Word16 codecCntr = 0; codecCntr < ACMCodecDB::NoOfCodecs();
+ codecCntr++)
+ {
+ // Temporally store codec settings for codec number "codeCntr" in "codec"
+ ACMCodecDB::Codec(codecCntr, &codec);
+
+ if(!STR_CASE_CMP(codec.plname, payloadName))
+ {
+ // If samplingFreqHz is set (!= -1), check if frequency matches
+ if((samplingFreqHz == codec.plfreq) || (samplingFreqHz == -1))
+ {
+ // We found a match, return codec list number (index)
+ return codecCntr;
+ }
+ }
+ }
+
+ // We did not find a matching codec in the list
+ return -1;
+}
+
+// Checks the validity of the parameters of the given codec
+bool
+AudioCodingModule::IsCodecValid(
+ const CodecInst& codec)
+{
+ WebRtc_Word16 mirrorID;
+ WebRtc_Word8 errMsg[500];
+
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, -1,
+ "IsCodecValid(const CodecInst& codec)");
+ ACMCodecDB::initACMCodecDB();
+ WebRtc_Word16 codecNumber = ACMCodecDB::CodecNumber(&codec, mirrorID, errMsg, 500);
+
+ if(codecNumber < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1, errMsg);
+ return false;
+ }
+ else
+ {
+ return true;
+ }
+}
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/audio_coding_module.gyp b/src/modules/audio_coding/main/source/audio_coding_module.gyp
new file mode 100644
index 0000000..b5753bf
--- /dev/null
+++ b/src/modules/audio_coding/main/source/audio_coding_module.gyp
@@ -0,0 +1,138 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'includes': [
+ '../../../../common_settings.gypi', # Common settings
+ ],
+ 'targets': [
+ {
+ 'target_name': 'audio_coding_module',
+ 'type': '<(library)',
+ 'dependencies': [
+ '../../codecs/CNG/main/source/cng.gyp:CNG',
+ '../../codecs/G711/main/source/g711.gyp:G711',
+ '../../codecs/G722/main/source/g722.gyp:G722',
+ '../../codecs/iLBC/main/source/ilbc.gyp:iLBC',
+ '../../codecs/iSAC/main/source/isac.gyp:iSAC',
+ '../../codecs/iSAC/fix/source/isacfix.gyp:iSACFix',
+ '../../codecs/PCM16B/main/source/pcm16b.gyp:PCM16B',
+ '../../NetEQ/main/source/neteq.gyp:NetEq',
+ '../../../../common_audio/resampler/main/source/resampler.gyp:resampler',
+ '../../../../common_audio/signal_processing_library/main/source/spl.gyp:spl',
+ '../../../../common_audio/vad/main/source/vad.gyp:vad',
+ '../../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
+ ],
+ 'include_dirs': [
+ '../interface',
+ '../../../interface',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '../interface',
+ '../../../interface',
+ ],
+ },
+ 'sources': [
+ # TODO: Remove files from here and P4 when ACM is slimmed down.
+ '../interface/audio_coding_module.h',
+ '../interface/audio_coding_module_typedefs.h',
+ 'acm_amr.cc',
+ 'acm_amr.h',
+ 'acm_amrwb.cc',
+ 'acm_amrwb.h',
+ 'acm_cng.cc',
+ 'acm_cng.h',
+ 'acm_codec_database.cc',
+ 'acm_codec_database.h',
+ 'acm_dtmf_detection.cc',
+ 'acm_dtmf_detection.h',
+ 'acm_dtmf_playout.cc',
+ 'acm_dtmf_playout.h',
+ 'acm_g722.cc',
+ 'acm_g722.h',
+ 'acm_g7221.cc',
+ 'acm_g7221.h',
+ 'acm_g7221c.cc',
+ 'acm_g7221c.h',
+ 'acm_g729.cc',
+ 'acm_g729.h',
+ 'acm_g7291.cc',
+ 'acm_g7291.h',
+ 'acm_generic_codec.cc',
+ 'acm_generic_codec.h',
+ 'acm_gsmfr.cc',
+ 'acm_gsmfr.h',
+ 'acm_ilbc.cc',
+ 'acm_ilbc.h',
+ 'acm_isac.cc',
+ 'acm_isac.h',
+ 'acm_isac_macros.h',
+ 'acm_neteq.cc',
+ 'acm_neteq.h',
+ 'acm_opus.cc',
+ 'acm_opus.h',
+ 'acm_speex.cc',
+ 'acm_speex.h',
+ 'acm_pcm16b.cc',
+ 'acm_pcm16b.h',
+ 'acm_pcma.cc',
+ 'acm_pcma.h',
+ 'acm_pcmu.cc',
+ 'acm_pcmu.h',
+ 'acm_red.cc',
+ 'acm_red.h',
+ 'acm_resampler.cc',
+ 'acm_resampler.h',
+ 'audio_coding_module.cc',
+ 'audio_coding_module_impl.cc',
+ 'audio_coding_module_impl.h',
+ ],
+ },
+ {
+ 'target_name': 'audio_coding_module_test',
+ 'type': 'executable',
+ 'dependencies': [
+ 'audio_coding_module',
+ '../../../../system_wrappers/source/system_wrappers.gyp:system_wrappers',
+ ],
+ 'sources': [
+ '../test/ACMTest.cpp',
+ '../test/APITest.cpp',
+ '../test/Channel.cpp',
+ '../test/EncodeDecodeTest.cpp',
+ '../test/EncodeToFileTest.cpp',
+ '../test/iSACTest.cpp',
+ '../test/PCMFile.cpp',
+ '../test/RTPFile.cpp',
+ '../test/SpatialAudio.cpp',
+ '../test/TestAllCodecs.cpp',
+ '../test/Tester.cpp',
+ '../test/TestFEC.cpp',
+ '../test/TestStereo.cpp',
+ '../test/TestVADDTX.cpp',
+ '../test/TimedTrace.cpp',
+ '../test/TwoWayCommunication.cpp',
+ '../test/utility.cpp',
+ ],
+ 'conditions': [
+ ['OS=="linux"', {
+ 'cflags': [
+ '-fexceptions', # enable exceptions
+ ],
+ }],
+ ],
+ },
+ ],
+}
+
+# Local Variables:
+# tab-width:2
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/src/modules/audio_coding/main/source/audio_coding_module_impl.cc b/src/modules/audio_coding/main/source/audio_coding_module_impl.cc
new file mode 100644
index 0000000..75adb89
--- /dev/null
+++ b/src/modules/audio_coding/main/source/audio_coding_module_impl.cc
@@ -0,0 +1,2713 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "acm_codec_database.h"
+#include "acm_common_defs.h"
+#include "acm_dtmf_detection.h"
+#include "acm_generic_codec.h"
+#include "acm_resampler.h"
+#include "audio_coding_module_impl.h"
+#include "critical_section_wrapper.h"
+#include "engine_configurations.h"
+#include "rw_lock_wrapper.h"
+#include "trace.h"
+
+#include <assert.h>
+#include <stdlib.h>
+
+#ifdef ACM_QA_TEST
+# include <stdio.h>
+#endif
+
+#ifdef TIMED_LOGGING
+ char message[500];
+ #include "../test/timedtrace.h"
+ #include <string.h>
+ #define LOGWITHTIME(logString) \
+ sprintf(message, logString, _id); \
+ _trace.TimedLogg(message);
+#else
+ #define LOGWITHTIME(logString)
+#endif
+
+namespace webrtc
+{
+
+enum {
+ kACMToneEnd = 999
+};
+
+AudioCodingModuleImpl::AudioCodingModuleImpl(
+ const WebRtc_Word32 id):
+ _packetizationCallback(NULL),
+ _id(id),
+ _lastTimestamp(0),
+ _lastInTimestamp(0),
+ _vadEnabled(false),
+ _dtxEnabled(false),
+ _vadMode(VADNormal),
+ _stereoSend(false),
+ _currentSendCodecIdx(-1), // invalid value
+ _sendCodecRegistered(false),
+ _acmCritSect(CriticalSectionWrapper::CreateCriticalSection()),
+ _vadCallback(NULL),
+ _lastRecvAudioCodecPlType(255),
+ _isFirstRED(true),
+ _fecEnabled(false),
+ _fragmentation(NULL),
+ _lastFECTimestamp(0),
+ _receiveREDPayloadType(255), // invalid value
+ _previousPayloadType(255),
+ _dummyRTPHeader(NULL),
+ _receiverInitialized(false),
+ _dtmfDetector(NULL),
+ _dtmfCallback(NULL),
+ _lastDetectedTone(kACMToneEnd),
+ _callbackCritSect(CriticalSectionWrapper::CreateCriticalSection())
+{
+ _lastTimestamp = 0xD87F3F9F;
+ _lastInTimestamp = 0xD87F3F9F;
+ // nullify the codec name
+ strncpy(_sendCodecInst.plname, "noCodecRegistered", 31);
+ ACMCodecDB::initACMCodecDB();
+ for (WebRtc_Word16 i = 0; i < MAX_NR_OF_CODECS; i++)
+ {
+ _codecs[i] = NULL;
+ _registeredPlTypes[i] = -1;
+ _stereoReceive[i] = false;
+ _slaveCodecs[i] = NULL;
+ _mirrorCodecIdx[i] = -1;
+ }
+
+ _netEq.SetUniqueId(_id);
+
+ // Allocate memory for RED
+ _redBuffer = new WebRtc_UWord8[MAX_PAYLOAD_SIZE_BYTE];
+ _fragmentation = new RTPFragmentationHeader;
+ _fragmentation->fragmentationVectorSize = 2;
+ _fragmentation->fragmentationOffset = new WebRtc_UWord32[2];
+ _fragmentation->fragmentationLength = new WebRtc_UWord32[2];
+ _fragmentation->fragmentationTimeDiff = new WebRtc_UWord16[2];
+ _fragmentation->fragmentationPlType = new WebRtc_UWord8[2];
+
+ // Register the default payload type for RED and for
+ // CNG for the three frequencies 8, 16 and 32 kHz
+ for (int i = (ACMCodecDB::_noOfCodecs - 1); i>=0; i--)
+ {
+ if((STR_CASE_CMP(ACMCodecDB::_mycodecs[i].plname, "red") == 0))
+ {
+ _redPayloadType = ACMCodecDB::_mycodecs[i].pltype;
+ }
+ else if ((STR_CASE_CMP(ACMCodecDB::_mycodecs[i].plname, "CN") == 0))
+ {
+ if (ACMCodecDB::_mycodecs[i].plfreq == 8000)
+ {
+ memcpy(&_cngNB, &ACMCodecDB::_mycodecs[i], sizeof(_cngNB));
+ }
+ else if (ACMCodecDB::_mycodecs[i].plfreq == 16000)
+ {
+ memcpy(&_cngWB, &ACMCodecDB::_mycodecs[i], sizeof(_cngWB));
+ } else if (ACMCodecDB::_mycodecs[i].plfreq == 32000)
+ {
+ memcpy(&_cngSWB, &ACMCodecDB::_mycodecs[i], sizeof(_cngSWB));
+ }
+ }
+ }
+
+ if(InitializeReceiverSafe() < 0 )
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot initialize reciever");
+ }
+#ifdef TIMED_LOGGING
+ _trace.SetUp("TimedLogg.txt");
+#endif
+
+#ifdef ACM_QA_TEST
+ char fileName[500];
+ sprintf(fileName, "ACM_QA_incomingPL_%03d_%d%d%d%d%d%d.dat",
+ _id,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10);
+
+ _incomingPL = fopen(fileName, "wb");
+
+ sprintf(fileName, "ACM_QA_outgoingPL_%03d_%d%d%d%d%d%d.dat",
+ _id,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10);
+ _outgoingPL = fopen(fileName, "wb");
+#endif
+
+ WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id, "Created");
+}
+
+AudioCodingModuleImpl::~AudioCodingModuleImpl()
+{
+ {
+ CriticalSectionScoped lock(*_acmCritSect);
+ _currentSendCodecIdx = -1;
+
+ for (WebRtc_Word16 i=0; i < MAX_NR_OF_CODECS; i++)
+ {
+ if (_codecs[i] != NULL)
+ {
+ assert(_mirrorCodecIdx[i] > -1);
+ if(_codecs[_mirrorCodecIdx[i]] != NULL)
+ {
+ delete _codecs[_mirrorCodecIdx[i]];
+ _codecs[_mirrorCodecIdx[i]] = NULL;
+ }
+ _codecs[i] = NULL;
+ }
+
+ if(_slaveCodecs[i] != NULL)
+ {
+ assert(_mirrorCodecIdx[i] > -1);
+ if(_slaveCodecs[_mirrorCodecIdx[i]] != NULL)
+ {
+ delete _slaveCodecs[_mirrorCodecIdx[i]];
+ _slaveCodecs[_mirrorCodecIdx[i]] = NULL;
+ }
+ _slaveCodecs[i] = NULL;
+ }
+ }
+
+ if(_dtmfDetector != NULL)
+ {
+ delete _dtmfDetector;
+ _dtmfDetector = NULL;
+ }
+ if(_dummyRTPHeader != NULL)
+ {
+ delete _dummyRTPHeader;
+ _dummyRTPHeader = NULL;
+ }
+ if(_redBuffer != NULL)
+ {
+ delete [] _redBuffer;
+ _redBuffer = NULL;
+ }
+ if(_fragmentation != NULL)
+ {
+ // Only need to delete fragmentation header, it will clean
+ // up it's own memory
+ delete _fragmentation;
+ _fragmentation = NULL;
+ }
+ }
+
+
+#ifdef ACM_QA_TEST
+ if(_incomingPL != NULL)
+ {
+ fclose(_incomingPL);
+ }
+
+ if(_outgoingPL != NULL)
+ {
+ fclose(_outgoingPL);
+ }
+#endif
+
+ delete _callbackCritSect;
+ _callbackCritSect = NULL;
+
+ delete _acmCritSect;
+ _acmCritSect = NULL;
+ WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, _id, "Destroyed");
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::ChangeUniqueId(
+ const WebRtc_Word32 id)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "ChangeUniqueId(new id:%d)", id);
+ {
+ CriticalSectionScoped lock(*_acmCritSect);
+ _id = id;
+#ifdef ACM_QA_TEST
+ if(_incomingPL != NULL)
+ {
+ fclose(_incomingPL);
+ }
+
+ if(_outgoingPL != NULL)
+ {
+ fclose(_outgoingPL);
+ }
+
+ char fileName[500];
+ sprintf(fileName, "ACM_QA_incomingPL_%03d_%d%d%d%d%d%d.dat",
+ _id,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10);
+
+ _incomingPL = fopen(fileName, "wb");
+
+ sprintf(fileName, "ACM_QA_outgoingPL_%03d_%d%d%d%d%d%d.dat",
+ _id,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10,
+ rand() % 10);
+ _outgoingPL = fopen(fileName, "wb");
+#endif
+
+ for (WebRtc_Word16 i = 0; i < MAX_NR_OF_CODECS; i++)
+ {
+ if(_codecs[i] != NULL)
+ {
+ _codecs[i]->SetUniqueID(id);
+ }
+ }
+ }
+
+ _netEq.SetUniqueId(_id);
+ return 0;
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::Version(
+ WebRtc_Word8* version,
+ WebRtc_UWord32& remainingBufferInBytes,
+ WebRtc_UWord32& position) const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "Version()");
+ if(version == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Input buffer is NULL");
+ return -1;
+ }
+ return GetVersion(version, remainingBufferInBytes, position);
+}
+
+// returns the number of milliseconds until the module want a
+// worker thread to call Process
+WebRtc_Word32
+AudioCodingModuleImpl::TimeUntilNextProcess()
+{
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ if(!HaveValidEncoder("TimeUntilNextProcess"))
+ {
+ return -1;
+ }
+ return _codecs[_currentSendCodecIdx]->SamplesLeftToEncode() /
+ (_sendCodecInst.plfreq / 1000);
+}
+
+// Process any pending tasks such as timeouts
+WebRtc_Word32
+AudioCodingModuleImpl::Process()
+{
+ WebRtc_UWord8 bitStream[2 * MAX_PAYLOAD_SIZE_BYTE]; // Make room for 1 RED payload
+ WebRtc_Word16 lengthBytes = 2 * MAX_PAYLOAD_SIZE_BYTE;
+ WebRtc_UWord32 rtpTimestamp;
+ WebRtc_Word16 status;
+ WebRtcACMEncodingType encodingType;
+ FrameType frameType = kAudioFrameSpeech;
+ WebRtc_Word16 redLengthBytes;
+ WebRtc_UWord8 currentPayloadType;
+ bool hasDataToSend = false;
+ bool fecActive = false;
+ WebRtc_UWord32 dummyFragLength;
+
+ // keep the scope of the ACM critical section limited
+ {
+ CriticalSectionScoped lock(*_acmCritSect);
+ if(!HaveValidEncoder("Process"))
+ {
+ return -1;
+ }
+
+ status = _codecs[_currentSendCodecIdx]->Encode(bitStream, &lengthBytes,
+ &rtpTimestamp, &encodingType);
+ if (status < 0) // Encode failed
+ {
+ // logging error
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Process(): Encoding Failed");
+ lengthBytes = 0;
+ return -1;
+ }
+ else if(status == 0)
+ {
+ // Not enough data
+ return 0;
+ }
+ else
+ {
+ switch(encodingType)
+ {
+ case kNoEncoding:
+ {
+ currentPayloadType = _previousPayloadType;
+ frameType = kFrameEmpty;
+ lengthBytes = 0;
+ break;
+ }
+ case kActiveNormalEncoded:
+ case kPassiveNormalEncoded:
+ {
+ currentPayloadType = (WebRtc_UWord8)_sendCodecInst.pltype;
+ frameType = kAudioFrameSpeech;
+ break;
+ }
+ case kPassiveDTXNB:
+ {
+ currentPayloadType = (WebRtc_UWord8)_cngNB.pltype;
+ frameType = kAudioFrameCN;
+ _isFirstRED = true;
+ break;
+ }
+ case kPassiveDTXWB:
+ {
+ currentPayloadType = (WebRtc_UWord8)_cngWB.pltype;
+ frameType = kAudioFrameCN;
+ _isFirstRED = true;
+ break;
+ }
+ case kPassiveDTXSWB:
+ {
+ currentPayloadType = (WebRtc_UWord8)_cngSWB.pltype;
+ frameType = kAudioFrameCN;
+ _isFirstRED = true;
+ break;
+ }
+
+ default:
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Process(): Wrong Encoding-Type");
+ return -1;
+ }
+ }
+ hasDataToSend = true;
+ _previousPayloadType = currentPayloadType;
+
+ // Redundancy encode is done here,
+ // the two bitstreams packetized into
+ // one RTP packet and the fragmentation points
+ // are set.
+ // Only apply RED on speech data.
+
+ if((_fecEnabled) &&
+ ((encodingType == kActiveNormalEncoded) ||
+ (encodingType == kPassiveNormalEncoded)))
+ {
+ // FEC is enabled within this scope.
+ //
+ // Note that, a special solution exists for iSAC since it is the only codec for
+ // which getRedPayload has a non-empty implementation.
+ //
+ // Summary of the FEC scheme below (use iSAC as example):
+ //
+ // 1st (_firstRED is true) encoded iSAC frame (primary #1) =>
+ // - call getRedPayload() and store redundancy for packet #1 in second
+ // fragment of RED buffer (old data)
+ // - drop the primary iSAC frame
+ // - don't call SendData
+ // 2nd (_firstRED is false) encoded iSAC frame (primary #2) =>
+ // - store primary #2 in 1st fragment of RED buffer and send the combined
+ // packet
+ // - the transmitted packet contains primary #2 (new) and reduncancy for
+ // packet #1 (old)
+ // - call getRedPayload() and store redundancy for packet #2 in second
+ // fragment of RED buffer
+ //
+ // ...
+ //
+ // Nth encoded iSAC frame (primary #N) =>
+ // - store primary #N in 1st fragment of RED buffer and send the combined
+ // packet
+ // - the transmitted packet contains primary #N (new) and reduncancy for
+ // packet #(N-1) (old)
+ // - call getRedPayload() and store redundancy for packet #N in second
+ // fragment of RED buffer
+ //
+ // For all other codecs, getRedPayload does nothing and returns -1 =>
+ // redundant data is only a copy.
+ //
+ // First combined packet contains : #2 (new) and #1 (old)
+ // Second combined packet contains: #3 (new) and #2 (old)
+ // Third combined packet contains : #4 (new) and #3 (old)
+ //
+ // Hence, even if every second packet is dropped, perfect reconstruction is
+ // possible.
+ fecActive = true;
+
+ hasDataToSend = false;
+ if(!_isFirstRED) // skip this part for the first packet in a RED session
+ {
+ // Rearrange bitStream such that FEC packets are included.
+ // Replace bitStream now that we have stored current bitStream.
+ memcpy(bitStream + _fragmentation->fragmentationOffset[1], _redBuffer,
+ _fragmentation->fragmentationLength[1]);
+ // Update the fragmentation time difference vector
+ WebRtc_UWord16 timeSinceLastTimestamp =
+ WebRtc_UWord16(rtpTimestamp - _lastFECTimestamp);
+
+ // Update fragmentation vectors
+ _fragmentation->fragmentationPlType[1] =
+ _fragmentation->fragmentationPlType[0];
+ _fragmentation->fragmentationTimeDiff[1] = timeSinceLastTimestamp;
+ hasDataToSend = true;
+ }
+
+ // Insert new packet length.
+ _fragmentation->fragmentationLength[0] = lengthBytes;
+
+ // Insert new packet payload type.
+ _fragmentation->fragmentationPlType[0] = currentPayloadType;
+ _lastFECTimestamp = rtpTimestamp;
+
+ // can be modified by the getRedPayload() call if iSAC is utilized
+ redLengthBytes = lengthBytes;
+ // A fragmentation header is provided => packetization according to RFC 2198
+ // (RTP Payload for Redundant Audio Data) will be used.
+ // First fragment is the current data (new).
+ // Second fragment is the previous data (old).
+ lengthBytes =
+ static_cast<WebRtc_Word16> (_fragmentation->fragmentationLength[0] +
+ _fragmentation->fragmentationLength[1]);
+
+ // Get, and store, redundant data from the encoder based on the recently
+ // encoded frame.
+ // NOTE - only iSAC contains an implementation; all other codecs does nothing
+ // and returns -1.
+ if (_codecs[_currentSendCodecIdx]->GetRedPayload(_redBuffer,
+ &redLengthBytes) == -1)
+ {
+ // The codec was not iSAC => use current encoder output as redundant data
+ // instead (trivial FEC scheme)
+ memcpy(_redBuffer, bitStream, redLengthBytes);
+ }
+
+ // Temporary storing RED length
+ dummyFragLength = redLengthBytes;
+
+ _isFirstRED = false;
+ // Update payload type with RED payload type
+ currentPayloadType = _redPayloadType;
+ }
+ }
+ }
+
+ if(hasDataToSend)
+ {
+ CriticalSectionScoped lock(*_callbackCritSect);
+#ifdef ACM_QA_TEST
+ if(_outgoingPL != NULL)
+ {
+ fwrite(&rtpTimestamp, sizeof(WebRtc_UWord32), 1, _outgoingPL);
+ fwrite(¤tPayloadType, sizeof(WebRtc_UWord8), 1, _outgoingPL);
+ fwrite(&lengthBytes, sizeof(WebRtc_Word16), 1, _outgoingPL);
+ }
+#endif
+
+ if(_packetizationCallback != NULL)
+ {
+ if (fecActive) {
+ _packetizationCallback->SendData(frameType, currentPayloadType,
+ rtpTimestamp, bitStream, lengthBytes, _fragmentation);
+ } else {
+ _packetizationCallback->SendData(frameType, currentPayloadType,
+ rtpTimestamp, bitStream, lengthBytes, NULL);
+ }
+ }
+
+ // This is for test
+ if(_vadCallback != NULL)
+ {
+ _vadCallback->InFrameType(((WebRtc_Word16)encodingType));
+ }
+ }
+ if (fecActive) {
+ _fragmentation->fragmentationLength[1] = dummyFragLength;
+ }
+ return lengthBytes;
+}
+
+
+
+
+/////////////////////////////////////////
+// Sender
+//
+
+// Initialize send codec
+WebRtc_Word32
+AudioCodingModuleImpl::InitializeSender()
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "InitializeSender()");
+
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ _sendCodecRegistered = false;
+ _currentSendCodecIdx = -1; // invalid value
+
+ _sendCodecInst.plname[0] = '\0';
+
+ for(WebRtc_Word16 codecCntr = 0; codecCntr < MAX_NR_OF_CODECS; codecCntr++)
+ {
+ if(_codecs[codecCntr] != NULL)
+ {
+ _codecs[codecCntr]->DestructEncoder();
+ }
+ }
+ // Initialize FEC/RED
+ _isFirstRED = true;
+ if(_fecEnabled)
+ {
+ if(_redBuffer != NULL)
+ {
+ memset(_redBuffer, 0, MAX_PAYLOAD_SIZE_BYTE);
+ }
+ if(_fragmentation != NULL)
+ {
+ _fragmentation->fragmentationVectorSize = 2;
+ _fragmentation->fragmentationOffset[0] = 0;
+ _fragmentation->fragmentationOffset[0] = MAX_PAYLOAD_SIZE_BYTE;
+ memset(_fragmentation->fragmentationLength, 0, sizeof(WebRtc_UWord32) * 2);
+ memset(_fragmentation->fragmentationTimeDiff, 0, sizeof(WebRtc_UWord16) * 2);
+ memset(_fragmentation->fragmentationPlType, 0, sizeof(WebRtc_UWord8) * 2);
+ }
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::ResetEncoder()
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "ResetEncoder()");
+
+ CriticalSectionScoped lock(*_acmCritSect);
+ if(!HaveValidEncoder("ResetEncoder"))
+ {
+ return -1;
+ }
+ return _codecs[_currentSendCodecIdx]->ResetEncoder();
+}
+
+void
+AudioCodingModuleImpl::UnregisterSendCodec()
+{
+ CriticalSectionScoped lock(*_acmCritSect);
+ _sendCodecRegistered = false;
+ _currentSendCodecIdx = -1; // invalid value
+
+ return;
+}
+
+ACMGenericCodec*
+AudioCodingModuleImpl::CreateCodec(
+ const CodecInst& codec)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "CreateCodec()");
+
+ ACMGenericCodec* myCodec = NULL;
+
+ myCodec = ACMCodecDB::CreateCodecInstance(&codec);
+ if(myCodec == NULL)
+ {
+ // Error, could not create the codec
+
+ // logging error
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "ACMCodecDB::CreateCodecInstance() failed in \
+CreateCodec()");
+ return myCodec;
+ }
+ myCodec->SetUniqueID(_id);
+ myCodec->SetNetEqDecodeLock(_netEq.DecodeLock());
+
+ return myCodec;
+}
+
+// can be called multiple times for Codec, CNG, RED
+WebRtc_Word32
+AudioCodingModuleImpl::RegisterSendCodec(
+ const CodecInst& sendCodec)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "Registering Send Codec");
+
+ if((sendCodec.channels != 1) && (sendCodec.channels != 2))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Registering Send codec failed due to wrong number of channels, %d. Only\
+mono codecs are supported, i.e. channels=1.", sendCodec.channels);
+ return -1;
+ }
+
+ WebRtc_Word8 errMsg[500];
+ WebRtc_Word16 mirrorId;
+ WebRtc_Word16 codecID = ACMCodecDB::CodecNumber(&sendCodec, mirrorId, errMsg, 500);
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ // Check for reported errors from function CodecNumber()
+ if(codecID < 0)
+ {
+ if(!_sendCodecRegistered)
+ {
+ // This values has to be NULL if there is no codec registered
+ _currentSendCodecIdx = -1; // invalid value
+ }
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id, errMsg);
+ // Failed to register Send Codec
+ return -1;
+ }
+
+ // telephone-event cannot be a send codec
+ if(!STR_CASE_CMP(sendCodec.plname, "telephone-event"))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "telephone-event cannot be registered as send codec");
+ return -1;
+ }
+
+ // RED can be registered with other payload type. If not registered a default
+ // payload type is used.
+ if(!STR_CASE_CMP(sendCodec.plname, "red"))
+ {
+ // Check if the payload-type is valid
+ if(ACMCodecDB::ValidPayloadType(sendCodec.pltype) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Invalid payload-type %d for %s.",
+ sendCodec.pltype, sendCodec.plname);
+ return -1;
+ }
+ // Set RED payload type
+ _redPayloadType = (WebRtc_UWord8)sendCodec.pltype;
+ return 0;
+ }
+
+ // CNG can be registered with other payload type. If not registered the
+ // default payload types will be used: CNNB=13 (fixed), CNWB=97, CNSWB=98
+ if(!STR_CASE_CMP(sendCodec.plname, "CN"))
+ {
+ // CNG is registered
+
+ switch(sendCodec.plfreq)
+ {
+ case 8000:
+ {
+ memcpy(&_cngNB, &sendCodec, sizeof(_cngNB));
+ break;
+ }
+ case 16000:
+ {
+ memcpy(&_cngWB, &sendCodec, sizeof(_cngWB));
+ break;
+ }
+ case 32000:
+ {
+ memcpy(&_cngSWB, &sendCodec, sizeof(_cngSWB));
+ break;
+ }
+ default :
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RegisterSendCodec() failed, invalid frequency for CNG registeration");
+ return -1;
+ }
+ }
+
+ return 0;
+ }
+
+ // Check if the payload-type is valid
+ if(ACMCodecDB::ValidPayloadType(sendCodec.pltype) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Invalid payload-type %d for %s.",
+ sendCodec.pltype, sendCodec.plname);
+ return -1;
+ }
+
+ // Check if codec supports the number of channels
+ if(ACMCodecDB::_channelSupport[codecID] < sendCodec.channels)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "%d number of channels not supportedn for %s.",
+ sendCodec.channels, sendCodec.plname);
+ return -1;
+ }
+
+ // Set Stereo
+ if (sendCodec.channels == 2)
+ {
+ _stereoSend = true;
+ }
+
+ // check if the codec is already registered as send codec
+ bool oldCodecFamily;
+ if(_sendCodecRegistered)
+ {
+ WebRtc_Word16 sendCodecMirrorID;
+ WebRtc_Word16 sendCodecID =
+ ACMCodecDB::CodecNumber(&_sendCodecInst, sendCodecMirrorID);
+ assert(sendCodecID >= 0);
+ oldCodecFamily = (sendCodecID == codecID) || (mirrorId == sendCodecMirrorID);
+ }
+ else
+ {
+ oldCodecFamily = false;
+ }
+
+ // If new codec, register
+ if (!oldCodecFamily)
+ {
+ if(_codecs[mirrorId] == NULL)
+ {
+
+ _codecs[mirrorId] = CreateCodec(sendCodec);
+ if(_codecs[mirrorId] == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot Create the codec");
+ return -1;
+ }
+ _mirrorCodecIdx[mirrorId] = mirrorId;
+ }
+
+ if(mirrorId != codecID)
+ {
+ _codecs[codecID] = _codecs[mirrorId];
+ _mirrorCodecIdx[codecID] = mirrorId;
+ }
+
+ ACMGenericCodec* tmpCodecPtr = _codecs[codecID];
+ WebRtc_Word16 status;
+ WebRtcACMCodecParams codecParams;
+
+ memcpy(&(codecParams.codecInstant), &sendCodec,
+ sizeof(CodecInst));
+ codecParams.enableVAD = _vadEnabled;
+ codecParams.enableDTX = _dtxEnabled;
+ codecParams.vadMode = _vadMode;
+ // force initialization
+ status = tmpCodecPtr->InitEncoder(&codecParams, true);
+
+ // Check if VAD was turned on, or if error is reported
+ if (status == 1) {
+ _vadEnabled = true;
+ } else if (status < 0)
+ {
+ // could not initialize the encoder
+
+ // Check if already have a registered codec
+ // Depending on that differet messages are logged
+ if(!_sendCodecRegistered)
+ {
+ _currentSendCodecIdx = -1; // invalid value
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot Initialize the encoder No Encoder is registered");
+ }
+ else
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot Initialize the encoder, continue encoding \
+with the previously registered codec");
+ }
+ return -1;
+ }
+
+ // Everything is fine so we can replace the previous codec
+ // with this one
+ if(_sendCodecRegistered)
+ {
+ // If we change codec we start fresh with FEC.
+ // This is not strictly required by the standard.
+ _isFirstRED = true;
+
+ if(tmpCodecPtr->SetVAD(_dtxEnabled, _vadEnabled, _vadMode) < 0){
+ // SetVAD failed
+ _vadEnabled = false;
+ _dtxEnabled = false;
+ }
+
+ }
+
+ _currentSendCodecIdx = codecID;
+ _sendCodecRegistered = true;
+ memcpy(&_sendCodecInst, &sendCodec, sizeof(CodecInst));
+ _previousPayloadType = _sendCodecInst.pltype;
+ return 0;
+ }
+ else
+ {
+ // If codec is the same as already registers check if any parameters
+ // has changed compared to the current values.
+ // If any parameter is valid then apply it and record.
+ bool forceInit = false;
+
+ if(mirrorId != codecID)
+ {
+ _codecs[codecID] = _codecs[mirrorId];
+ _mirrorCodecIdx[codecID] = mirrorId;
+ }
+
+ // check the payload-type
+ if(sendCodec.pltype != _sendCodecInst.pltype)
+ {
+ // At this point check if the given payload type is valid.
+ // Record it later when the sampling frequency is changed
+ // successfully.
+ if(ACMCodecDB::ValidPayloadType(sendCodec.pltype) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Out of range payload type");
+ return -1;
+ }
+
+ }
+
+ // If there is a codec that ONE instance of codec supports multiple
+ // sampling frequencies, then we need to take care of it here.
+ // one such a codec is iSAC. Both WB and SWB are encoded and decoded
+ // with one iSAC instance. Therefore, we need to update the encoder
+ // frequency if required.
+ if(_sendCodecInst.plfreq != sendCodec.plfreq)
+ {
+ forceInit = true;
+
+ // if sampling frequency is changed we have to start fresh with RED.
+ _isFirstRED = true;
+ }
+
+ if(_sendCodecInst.pacsize != sendCodec.pacsize)
+ {
+ forceInit = true;
+ }
+
+ if(forceInit)
+ {
+ WebRtcACMCodecParams codecParams;
+
+ memcpy(&(codecParams.codecInstant), &sendCodec,
+ sizeof(CodecInst));
+ codecParams.enableVAD = _vadEnabled;
+ codecParams.enableDTX = _dtxEnabled;
+ codecParams.vadMode = _vadMode;
+
+ // force initialization
+ if(_codecs[_currentSendCodecIdx]->InitEncoder(&codecParams, true) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Could not change the codec packet-size.");
+ return -1;
+ }
+
+ _sendCodecInst.plfreq = sendCodec.plfreq;
+ _sendCodecInst.pacsize = sendCodec.pacsize;
+ }
+
+ // If the change of sampling frequency has been successful then
+ // we store the payload-type.
+ _sendCodecInst.pltype = sendCodec.pltype;
+
+ // check if a change in Rate is required
+ if(sendCodec.rate != _sendCodecInst.rate)
+ {
+ if(_codecs[codecID]->SetBitRate(sendCodec.rate) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Could not change the codec rate.");
+ return -1;
+ }
+ _sendCodecInst.rate = sendCodec.rate;
+ }
+ _previousPayloadType = _sendCodecInst.pltype;
+
+ return 0;
+ }
+}
+
+// get current send codec
+WebRtc_Word32
+AudioCodingModuleImpl::SendCodec(
+ CodecInst& currentSendCodec) const
+{
+ WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, _id,
+ "SendCodec()");
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ if(!_sendCodecRegistered)
+ {
+ WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, _id,
+ "SendCodec Failed, no codec is registered");
+
+ return -1;
+ }
+ WebRtcACMCodecParams encoderParam;
+ _codecs[_currentSendCodecIdx]->EncoderParams(&encoderParam);
+ encoderParam.codecInstant.pltype = _sendCodecInst.pltype;
+ memcpy(¤tSendCodec, &(encoderParam.codecInstant),
+ sizeof(CodecInst));
+
+ return 0;
+}
+
+// get current send freq
+WebRtc_Word32
+AudioCodingModuleImpl::SendFrequency() const
+{
+ WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, _id,
+ "SendFrequency()");
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ if(!_sendCodecRegistered)
+ {
+ WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, _id,
+ "SendFrequency Failed, no codec is registered");
+
+ return -1;
+ }
+
+ return _sendCodecInst.plfreq;
+}
+
+// Get encode bitrate
+// Adaptive rate codecs return their current encode target rate, while other codecs
+// return there longterm avarage or their fixed rate.
+WebRtc_Word32
+AudioCodingModuleImpl::SendBitrate() const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SendBitrate()");
+
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ if(!_sendCodecRegistered)
+ {
+ WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, _id,
+ "SendBitrate Failed, no codec is registered");
+
+ return -1;
+ }
+
+ WebRtcACMCodecParams encoderParam;
+ _codecs[_currentSendCodecIdx]->EncoderParams(&encoderParam);
+
+ return encoderParam.codecInstant.rate;
+}
+
+// set available bandwidth, inform the encoder about the estimated bandwidth
+// received from the remote party
+WebRtc_Word32
+AudioCodingModuleImpl::SetReceivedEstimatedBandwidth(
+ const WebRtc_Word32 bw )
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SetReceivedEstimatedBandwidth()");
+ return _codecs[_currentSendCodecIdx]->SetEstimatedBandwidth(bw);
+}
+
+// register a transport callback wich will be called to deliver
+// the encoded buffers
+WebRtc_Word32
+AudioCodingModuleImpl::RegisterTransportCallback(
+ AudioPacketizationCallback* transport)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "RegisterTransportCallback()");
+ CriticalSectionScoped lock(*_callbackCritSect);
+ _packetizationCallback = transport;
+ return 0;
+}
+
+// Used by the module to deliver messages to the codec module/appliation
+// AVT(DTMF)
+WebRtc_Word32
+AudioCodingModuleImpl::RegisterIncomingMessagesCallback(
+#ifndef WEBRTC_DTMF_DETECTION
+ AudioCodingFeedback* /* incomingMessagesCallback */,
+ const ACMCountries /* cpt */)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "RegisterIncomingMessagesCallback()");
+ return -1;
+#else
+ AudioCodingFeedback* incomingMessagesCallback,
+ const ACMCountries cpt)
+{
+ WebRtc_Word16 status = 0;
+
+ // Enter the critical section for callback
+ {
+ CriticalSectionScoped lock(*_callbackCritSect);
+ _dtmfCallback = incomingMessagesCallback;
+ }
+ // enter the ACM critical section to set up the DTMF class.
+ {
+ CriticalSectionScoped lock(*_acmCritSect);
+ // Check if the call is to disable or enable the callback
+ if(incomingMessagesCallback == NULL)
+ {
+ // callback is disabled, delete DTMF-detector class
+ if(_dtmfDetector != NULL)
+ {
+ delete _dtmfDetector;
+ _dtmfDetector = NULL;
+ }
+ status = 0;
+ }
+ else
+ {
+ status = 0;
+ if(_dtmfDetector == NULL)
+ {
+ _dtmfDetector = new(ACMDTMFDetection);
+ if(_dtmfDetector == NULL)
+ {
+ status = -1;
+ }
+ }
+ if(status >= 0)
+ {
+ status = _dtmfDetector->Enable(cpt);
+ if(status < 0)
+ {
+ // failed to initialize if DTMF-detection was not enabled before,
+ // delete the class, and set the callback to NULL and return -1.
+ delete _dtmfDetector;
+ _dtmfDetector = NULL;
+ }
+ }
+ }
+ }
+ // check if we failed in setting up the DTMF-detector class
+ if((status < 0))
+ {
+ // we failed, we cannot have the callback
+ CriticalSectionScoped lock(*_callbackCritSect);
+ _dtmfCallback = NULL;
+ }
+
+ return status;
+#endif
+}
+
+
+// Add 10MS of raw (PCM) audio data to the encoder
+WebRtc_Word32
+AudioCodingModuleImpl::Add10MsData(
+ const AudioFrame& audioFrame)
+{
+ // Do we have a codec registered?
+ CriticalSectionScoped lock(*_acmCritSect);
+ if(!HaveValidEncoder("Add10MsData"))
+ {
+ return -1;
+ }
+
+ if(audioFrame._payloadDataLengthInSamples == 0)
+ {
+ assert(false);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot Add 10 ms audio, payload length is zero");
+ return -1;
+ }
+ // Allow for 8, 16, 32 and 48kHz input audio
+ if((audioFrame._frequencyInHz != 8000) &&
+ (audioFrame._frequencyInHz != 16000) &&
+ (audioFrame._frequencyInHz != 32000) &&
+ (audioFrame._frequencyInHz != 48000))
+ {
+ assert(false);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot Add 10 ms audio, input frequency not valid");
+ return -1;
+ }
+
+
+ // If the length and frequency matches. We currently just support raw PCM
+ if((audioFrame._frequencyInHz/ 100) !=
+ audioFrame._payloadDataLengthInSamples)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot Add 10 ms audio, input frequency and length doesn't \
+match");
+ return -1;
+ }
+
+ // Calculate the timestamp that should be pushed to codec.
+ // This might be different from the timestamp of the frame
+ // due to re-sampling
+ bool resamplingRequired =
+ ((WebRtc_Word32)audioFrame._frequencyInHz != _sendCodecInst.plfreq);
+
+ WebRtc_UWord32 currentTimestamp;
+ WebRtc_Word32 status;
+ // if it is required, we have to do a resampling
+ if(resamplingRequired)
+ {
+ WebRtc_Word16 resampledAudio[WEBRTC_10MS_PCM_AUDIO];
+ WebRtc_Word32 sendPlFreq = _sendCodecInst.plfreq;
+ WebRtc_UWord32 diffInputTimestamp;
+ WebRtc_Word16 newLengthSmpl;
+
+ // calculate the timestamp of this frame
+ if(_lastInTimestamp > audioFrame._timeStamp)
+ {
+ // a wrap around has happened
+ diffInputTimestamp = ((WebRtc_UWord32)0xFFFFFFFF - _lastInTimestamp)
+ + audioFrame._timeStamp;
+ }
+ else
+ {
+ diffInputTimestamp = audioFrame._timeStamp - _lastInTimestamp;
+ }
+ currentTimestamp = _lastTimestamp + (WebRtc_UWord32)(diffInputTimestamp *
+ ((double)_sendCodecInst.plfreq / (double)audioFrame._frequencyInHz));
+
+ newLengthSmpl = _inputResampler.Resample10Msec(
+ audioFrame._payloadData, audioFrame._frequencyInHz,
+ resampledAudio, sendPlFreq, _sendCodecInst.channels);
+
+ if(newLengthSmpl < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot add 10 ms audio, resmapling failed");
+ return -1;
+ }
+ status = _codecs[_currentSendCodecIdx]->Add10MsData(currentTimestamp,
+ resampledAudio, newLengthSmpl, audioFrame._audioChannel);
+ }
+ else
+ {
+ currentTimestamp = audioFrame._timeStamp;
+
+ status = _codecs[_currentSendCodecIdx]->Add10MsData(currentTimestamp,
+ audioFrame._payloadData, audioFrame._payloadDataLengthInSamples,
+ audioFrame._audioChannel);
+ }
+ _lastInTimestamp = audioFrame._timeStamp;
+ _lastTimestamp = currentTimestamp;
+ return status;
+}
+
+/////////////////////////////////////////
+// (FEC) Forward Error Correction
+//
+
+bool
+AudioCodingModuleImpl::FECStatus() const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "FECStatus()");
+ CriticalSectionScoped lock(*_acmCritSect);
+ return _fecEnabled;
+}
+
+// configure FEC status i.e on/off
+WebRtc_Word32
+AudioCodingModuleImpl::SetFECStatus(
+#ifdef WEBRTC_CODEC_RED
+ const bool enableFEC)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SetFECStatus()");
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ if (_fecEnabled != enableFEC)
+ {
+ // Reset the RED buffer
+ memset(_redBuffer, 0, MAX_PAYLOAD_SIZE_BYTE);
+
+ // Reset fragmentation buffers
+ _fragmentation->fragmentationVectorSize = 2;
+ _fragmentation->fragmentationOffset[0] = 0;
+ _fragmentation->fragmentationOffset[1] = MAX_PAYLOAD_SIZE_BYTE;
+ memset(_fragmentation->fragmentationLength, 0, sizeof(WebRtc_UWord32) * 2);
+ memset(_fragmentation->fragmentationTimeDiff, 0, sizeof(WebRtc_UWord16) * 2);
+ memset(_fragmentation->fragmentationPlType, 0, sizeof(WebRtc_UWord8) * 2);
+
+ // set _fecEnabled
+ _fecEnabled = enableFEC;
+ }
+ _isFirstRED = true; // Make sure we restart FEC
+ return 0;
+#else
+ const bool /* enableFEC */)
+{
+ _fecEnabled = false;
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
+ " WEBRTC_CODEC_RED is undefined => _fecEnabled = %d", _fecEnabled);
+ return -1;
+#endif
+}
+
+
+/////////////////////////////////////////
+// (VAD) Voice Activity Detection
+//
+
+WebRtc_Word32
+AudioCodingModuleImpl::SetVAD(
+ const bool enableDTX,
+ const bool enableVAD,
+ const ACMVADMode vadMode)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SetVAD()");
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ // sanity check of the mode
+ if((vadMode != VADNormal) &&
+ (vadMode != VADLowBitrate) &&
+ (vadMode != VADAggr) &&
+ (vadMode != VADVeryAggr))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Invalid VAD Mode %d, no change is made to VAD/DTX status",
+ (int)vadMode);
+ return -1;
+ }
+
+ // If a send codec is registered, set VAD/DTX for the codec
+ if(HaveValidEncoder("SetVAD")) {
+ WebRtc_Word16 status =
+ _codecs[_currentSendCodecIdx]->SetVAD(enableDTX, enableVAD, vadMode);
+ if(status == 1) {
+ // Vad was enabled;
+ _vadEnabled = true;
+ _dtxEnabled = enableDTX;
+ _vadMode = vadMode;
+
+ return 0;
+ } else if (status < 0) {
+ // SetVAD failed
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "SetVAD failed");
+
+ _vadEnabled = false;
+ _dtxEnabled = false;
+
+ return -1;
+ }
+ }
+
+ _vadEnabled = enableVAD;
+ _dtxEnabled = enableDTX;
+ _vadMode = vadMode;
+
+ return 0;
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::VAD(
+ bool& dtxEnabled,
+ bool& vadEnabled,
+ ACMVADMode& vadMode) const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "VAD()");
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ dtxEnabled = _dtxEnabled;
+ vadEnabled = _vadEnabled;
+ vadMode = _vadMode;
+
+ return 0;
+}
+
+/////////////////////////////////////////
+// Receiver
+//
+
+WebRtc_Word32
+AudioCodingModuleImpl::InitializeReceiver()
+{
+ CriticalSectionScoped lock(*_acmCritSect);
+ return InitializeReceiverSafe();
+}
+
+// Initialize receiver, resets codec database etc
+WebRtc_Word32
+AudioCodingModuleImpl::InitializeReceiverSafe()
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "InitializeReceiver()");
+ WebRtc_Word16 noCodecs = ACMCodecDB::NoOfCodecs();
+ int i = 0;
+
+ // If the receiver is already initialized then we
+ // also like to destruct decoders if any exist. After a call
+ // to this function, we should have a clean start-up.
+ if(_receiverInitialized)
+ {
+ for(WebRtc_Word16 codecCntr = 0; codecCntr < noCodecs; codecCntr++)
+ {
+ if(UnregisterReceiveCodecSafe(codecCntr) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "InitializeReceiver() failed, Could not unregister codec");
+ return -1;
+ }
+ }
+ }
+ if (_netEq.Init() != 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "InitializeReceiver() failed, Could not initialize NetEq");
+ return -1;
+ }
+ _netEq.SetUniqueId(_id);
+ if (_netEq.AllocatePacketBuffer(ACMCodecDB::NetEqDecoders(),
+ ACMCodecDB::NoNetEqDecoders()) != 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "NetEq cannot allocatePacket Buffer");
+ return -1;
+ }
+
+ // Register RED and CN
+ int regInNeteq = 0;
+ for (i = (ACMCodecDB::_noOfCodecs - 1); i>-1; i--) {
+ if((STR_CASE_CMP(ACMCodecDB::_mycodecs[i].plname, "red") == 0)) {
+ regInNeteq = 1;
+ } else if ((STR_CASE_CMP(ACMCodecDB::_mycodecs[i].plname, "CN") == 0)) {
+ regInNeteq = 1;
+ }
+
+ if (regInNeteq == 1) {
+ if(RegisterRecCodecMSSafe(ACMCodecDB::_mycodecs[i], i, i,
+ ACMNetEQ::masterJB) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot register master codec.");
+ return -1;
+ }
+ _registeredPlTypes[i] = ACMCodecDB::_mycodecs[i].pltype;
+ regInNeteq = 0;
+ }
+ }
+
+ _receiverInitialized = true;
+ return 0;
+}
+
+// Reset the decoder state
+WebRtc_Word32
+AudioCodingModuleImpl::ResetDecoder()
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "ResetDecoder()");
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ for(WebRtc_Word16 codecCntr = 0; codecCntr < MAX_NR_OF_CODECS; codecCntr++)
+ {
+ if((_codecs[codecCntr] != NULL) && (_registeredPlTypes[codecCntr] != -1))
+ {
+ if(_codecs[codecCntr]->ResetDecoder(_registeredPlTypes[codecCntr]) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "ResetDecoder failed:");
+ return -1;
+ }
+ }
+ }
+ return _netEq.FlushBuffers();
+}
+
+// get current receive freq
+WebRtc_Word32
+AudioCodingModuleImpl::ReceiveFrequency() const
+{
+ WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, _id,
+ "ReceiveFrequency()");
+ WebRtcACMCodecParams codecParams;
+
+ CriticalSectionScoped lock(*_acmCritSect);
+ if(DecoderParamByPlType(_lastRecvAudioCodecPlType, codecParams) < 0)
+ {
+ return _netEq.CurrentSampFreqHz();
+ }
+ else
+ {
+ return codecParams.codecInstant.plfreq;
+ }
+}
+
+// get current playout freq
+WebRtc_Word32
+AudioCodingModuleImpl::PlayoutFrequency() const
+{
+ WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, _id,
+ "PlayoutFrequency()");
+
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ return _netEq.CurrentSampFreqHz();
+}
+
+
+// register possible reveive codecs, can be called multiple times,
+// for codecs, CNG (NB, WB and SWB), DTMF, RED
+WebRtc_Word32
+AudioCodingModuleImpl::RegisterReceiveCodec(
+ const CodecInst& receiveCodec)
+{
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "RegisterReceiveCodec()");
+
+ if(receiveCodec.channels > 2)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "More than 2 audio channel is not supported.");
+ return -1;
+ }
+
+ WebRtc_Word16 mirrorId;
+ WebRtc_Word16 codecId = ACMCodecDB::ReceiverCodecNumber(receiveCodec, mirrorId);
+
+ if(codecId < 0 || codecId >= ACMCodecDB::NoOfCodecs())
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Wrong codec params to be registered as receive codec");
+ return -1;
+ }
+ // Check if the payload-type is valid.
+ if(ACMCodecDB::ValidPayloadType(receiveCodec.pltype) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Invalid payload-type %d for %s.",
+ receiveCodec.pltype, receiveCodec.plname);
+ return -1;
+ }
+
+ if(!_receiverInitialized)
+ {
+ if(InitializeReceiverSafe() < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot initialize reciver, so failed registering a codec.");
+ return -1;
+ }
+ }
+
+ // If codec already registered, start with unregistering
+ if(_registeredPlTypes[codecId] != -1)
+ {
+ if(UnregisterReceiveCodecSafe(codecId) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot register master codec.");
+ return -1;
+ }
+ }
+
+ if(RegisterRecCodecMSSafe(receiveCodec, codecId, mirrorId,
+ ACMNetEQ::masterJB) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot register master codec.");
+ return -1;
+ }
+
+
+ // If receive stereo, make sure we have two instances of NetEQ, one for each channel
+ if(receiveCodec.channels == 2)
+ {
+ if(_netEq.NumSlaves() < 1)
+ {
+ if(_netEq.AddSlave(ACMCodecDB::NetEqDecoders(),
+ ACMCodecDB::NoNetEqDecoders()) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot Add Slave jitter buffer to NetEq.");
+ return -1;
+ }
+ }
+
+ if(RegisterRecCodecMSSafe(receiveCodec, codecId, mirrorId,
+ ACMNetEQ::slaveJB) < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot register slave codec.");
+ return -1;
+ }
+
+ if((_stereoReceive[codecId] == false) &&
+ (_lastRecvAudioCodecPlType == receiveCodec.pltype))
+ {
+ _lastRecvAudioCodecPlType = -1;
+ }
+ _stereoReceive[codecId] = true;
+ }
+ else
+ {
+ _stereoReceive[codecId] = false;
+ }
+
+ _registeredPlTypes[codecId] = receiveCodec.pltype;
+
+ if(!STR_CASE_CMP(receiveCodec.plname, "RED"))
+ {
+ _receiveREDPayloadType = receiveCodec.pltype;
+ }
+ return 0;
+}
+
+
+
+WebRtc_Word32
+AudioCodingModuleImpl::RegisterRecCodecMSSafe(
+ const CodecInst& receiveCodec,
+ WebRtc_Word16 codecId,
+ WebRtc_Word16 mirrorId,
+ ACMNetEQ::JB jitterBuffer)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "RegisterReceiveCodecMSSafe()");
+
+ ACMGenericCodec** codecArray;
+ if(jitterBuffer == ACMNetEQ::masterJB)
+ {
+ codecArray = &_codecs[0];
+ }
+ else if(jitterBuffer == ACMNetEQ::slaveJB)
+ {
+ codecArray = &_slaveCodecs[0];
+ }
+ else
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "RegisterReceiveCodecMSSafe failed, jitterBuffer is neither master or slave ");
+ return -1;
+ }
+
+ if (codecArray[mirrorId] == NULL)
+ {
+ codecArray[mirrorId] = CreateCodec(receiveCodec);
+ if(codecArray[mirrorId] == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot create codec to register as receive codec");
+ return -1;
+ }
+ _mirrorCodecIdx[mirrorId] = mirrorId;
+ }
+ if(mirrorId != codecId)
+ {
+ codecArray[codecId] = codecArray[mirrorId];
+ _mirrorCodecIdx[codecId] = mirrorId;
+ }
+
+ codecArray[codecId]->SetIsMaster(jitterBuffer == ACMNetEQ::masterJB);
+
+ WebRtc_Word16 status = 0;
+ bool registerInNetEq = true;
+ WebRtcACMCodecParams codecParams;
+ memcpy(&(codecParams.codecInstant), &receiveCodec,
+ sizeof(CodecInst));
+ codecParams.enableVAD = false;
+ codecParams.enableDTX = false;
+ codecParams.vadMode = VADNormal;
+ if (!codecArray[codecId]->DecoderInitialized())
+ {
+ // force initialization
+ status = codecArray[codecId]->InitDecoder(&codecParams, true);
+ if(status < 0)
+ {
+ // could not initialize the decoder we don't want to
+ // continue if we could not initialize properly.
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "could not initialize the receive codec, codec not registered");
+
+ return -1;
+ }
+ }
+ else if(mirrorId != codecId)
+ {
+ // Currently this only happens for iSAC.
+ // we have to store the decoder parameters
+
+ codecArray[codecId]->SaveDecoderParam(&codecParams);
+ }
+ if (registerInNetEq)
+ {
+ if(codecArray[codecId]->RegisterInNetEq(&_netEq, receiveCodec)
+ != 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Receive codec could not be registered in NetEq");
+
+ return -1;
+ }
+ // Guaranty that the same payload-type that is
+ // registered in NetEq is stored in the codec.
+ codecArray[codecId]->SaveDecoderParam(&codecParams);
+ }
+
+ return status;
+}
+
+
+
+// Get current received codec
+WebRtc_Word32
+AudioCodingModuleImpl::ReceiveCodec(
+ CodecInst& currentReceiveCodec) const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "ReceiveCodec()");
+ WebRtc_Word16 decCntr;
+ WebRtcACMCodecParams decoderParam;
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ for(decCntr = 0; decCntr < MAX_NR_OF_CODECS; decCntr++)
+ {
+ if(_codecs[decCntr] != NULL)
+ {
+ if(_codecs[decCntr]->DecoderInitialized())
+ {
+ if(_codecs[decCntr]->DecoderParams(&decoderParam,
+ _lastRecvAudioCodecPlType))
+ {
+ memcpy(¤tReceiveCodec, &decoderParam.codecInstant,
+ sizeof(CodecInst));
+ return 0;
+ }
+ }
+ }
+ }
+
+ // if we are here then we haven't found any codec
+ // set codec pltype to -1 to indicate that the structure
+ // is invalid and return -1.
+ currentReceiveCodec.pltype = -1;
+ return -1;
+}
+
+// Incoming packet from network parsed and ready for decode
+WebRtc_Word32
+AudioCodingModuleImpl::IncomingPacket(
+ const WebRtc_Word8* incomingPayload,
+ const WebRtc_Word32 payloadLength,
+ const WebRtcRTPHeader& rtpInfo)
+{
+
+ if (payloadLength < 0)
+ {
+ // Log error
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "IncomingPacket() Error, payload-length cannot be negative");
+ return -1;
+ }
+ {
+ // store the payload Type. this will be used to retrieve "received codec"
+ // and "received frequency."
+ CriticalSectionScoped lock(*_acmCritSect);
+#ifdef ACM_QA_TEST
+ if(_incomingPL != NULL)
+ {
+ fwrite(&rtpInfo.header.timestamp, sizeof(WebRtc_UWord32), 1, _incomingPL);
+ fwrite(&rtpInfo.header.payloadType, sizeof(WebRtc_UWord8), 1, _incomingPL);
+ fwrite(&payloadLength, sizeof(WebRtc_Word16), 1, _incomingPL);
+ }
+#endif
+
+ WebRtc_UWord8 myPayloadType;
+
+ // Check if this is an RED payload
+ if(rtpInfo.header.payloadType == _receiveREDPayloadType)
+ {
+ // get the primary payload-type.
+ myPayloadType = (WebRtc_UWord8)(incomingPayload[0] & 0x7F);
+ }
+ else
+ {
+ myPayloadType = rtpInfo.header.payloadType;
+ }
+
+ // If payload is audio, check if received payload is different from previous
+ if((!rtpInfo.type.Audio.isCNG) &&
+ (myPayloadType != _cngNB.pltype) &&
+ (myPayloadType != _cngWB.pltype) &&
+ (myPayloadType != _cngSWB.pltype))
+ {
+ // This is Audio not CNG
+
+ if(myPayloadType != _lastRecvAudioCodecPlType)
+ {
+ // We detect a change in payload type. It is necessary for iSAC
+ // we are going to use ONE iSAC instance for decoding both WB and
+ // SWB payloads. If payload is changed there might be a need to reset
+ // sampling rate of decoder. depending what we have received "now".
+ // TODO (tlegrand): Change or remove the following comment
+ // I cannot use the function that BV has written, i.e.
+ // "DecoderParamByPlType()" as for iSAC there is one instance and
+ // multiple payloads.
+ int i;
+ for(i = 0; i < MAX_NR_OF_CODECS; i++)
+ {
+ if(_registeredPlTypes[i] == myPayloadType)
+ {
+ if(_codecs[i] == NULL)
+ {
+ // we found a payload type but the corresponding
+ // codec is NULL this should not happen
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "IncomingPacket() Error, payload type found but corresponding "
+ "codec is NULL");
+ return -1;
+ }
+ _codecs[i]->UpdateDecoderSampFreq(i);
+ _netEq.SetReceivedStereo(_stereoReceive[i]);
+ break;
+ }
+ }
+ }
+ _lastRecvAudioCodecPlType = myPayloadType;
+ }
+ }
+
+ return _netEq.RecIn(incomingPayload, payloadLength, rtpInfo);
+}
+
+// Minimum playout delay (Used for lip-sync)
+WebRtc_Word32
+AudioCodingModuleImpl::SetMinimumPlayoutDelay(
+ const WebRtc_Word32 timeMs)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SetMinimumPlayoutDelay()");
+ if((timeMs < 0) || (timeMs > 1000))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Delay must be in the range of 0-1000 milliseconds.");
+ return -1;
+ }
+ return _netEq.SetExtraDelay(timeMs);
+}
+
+// current play out delay
+WebRtc_Word32
+AudioCodingModuleImpl::Delay(
+ WebRtc_UWord16& delayMs) const
+{
+ // NetEQ will get an API for this later.
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "Delay()");
+ return _netEq.Delay(delayMs);
+}
+
+// Get Dtmf playout status
+bool
+AudioCodingModuleImpl::DtmfPlayoutStatus() const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "DtmfPlayoutStatus()");
+#ifndef WEBRTC_CODEC_AVT
+ return false;
+#else
+ return _netEq.AVTPlayout();
+#endif
+}
+
+// configure Dtmf playout status i.e on/off
+// playout the incoming outband Dtmf tone
+WebRtc_Word32
+AudioCodingModuleImpl::SetDtmfPlayoutStatus(
+#ifndef WEBRTC_CODEC_AVT
+ const bool /* enable */)
+{
+ WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, _id,
+ "SetDtmfPlayoutStatus() failed: AVT is not supported.");
+ return -1;
+#else
+ const bool enable)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SetDtmfPlayoutStatus()");
+ return _netEq.SetAVTPlayout(enable);
+#endif
+}
+
+// Estimate the Bandwidth based on the incoming stream
+// This is also done in the RTP module
+// need this for one way audio where the RTCP send the BW estimate
+WebRtc_Word32
+AudioCodingModuleImpl::DecoderEstimatedBandwidth() const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "DecoderEstimatedBandwidth()");
+
+ CodecInst codecInst;
+ WebRtc_Word16 codecID = -1;
+ int plTypWB;
+ int plTypSWB;
+
+ // Get iSAC settings
+ for(WebRtc_Word16 codecCntr = 0; codecCntr < ACMCodecDB::NoOfCodecs();
+ codecCntr++)
+ {
+ // Store codec settings for codec number "codeCntr" in the output struct
+ ACMCodecDB::Codec(codecCntr, &codecInst);
+
+ if(!STR_CASE_CMP(codecInst.plname, "isac"))
+ {
+ codecID = 1;
+ plTypWB = codecInst.pltype;
+
+ ACMCodecDB::Codec(codecCntr+1, &codecInst);
+ plTypSWB = codecInst.pltype;
+
+ break;
+ }
+ }
+
+ if(codecID < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "DecoderEstimatedBandwidth failed");
+ return -1;
+ }
+
+ if ((_lastRecvAudioCodecPlType == plTypWB) || (_lastRecvAudioCodecPlType == plTypSWB))
+ {
+ return _codecs[codecID]->GetEstimatedBandwidth();
+ } else {
+ return -1;
+ }
+}
+
+// Set playout mode for: voice, fax, or streaming
+WebRtc_Word32
+AudioCodingModuleImpl::SetPlayoutMode(
+ const AudioPlayoutMode mode)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SetPlayoutMode()");
+ if((mode != voice) &&
+ (mode != fax) &&
+ (mode != streaming))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Invalid playout mode.");
+ return -1;
+ }
+ return _netEq.SetPlayoutMode(mode);
+}
+
+// Get playout mode voice, fax
+AudioPlayoutMode
+AudioCodingModuleImpl::PlayoutMode() const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "PlayoutMode()");
+ return _netEq.PlayoutMode();
+}
+
+
+// Get 10 milliseconds of raw audio data to play out
+// automatic resample to the requested frequency
+WebRtc_Word32
+AudioCodingModuleImpl::PlayoutData10Ms(
+ const WebRtc_Word32 desiredFreqHz,
+ AudioFrame& audioFrame)
+{
+ bool stereoMode;
+ AudioFrame audioFrameTmp;
+
+ // recOut always returns 10 ms
+ if (_netEq.RecOut(audioFrameTmp) != 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "PlayoutData failed, RecOut Failed");
+ return -1;
+ }
+
+ audioFrame._audioChannel = audioFrameTmp._audioChannel;
+ audioFrame._vadActivity = audioFrameTmp._vadActivity;
+ audioFrame._speechType = audioFrameTmp._speechType;
+
+ stereoMode = (audioFrameTmp._audioChannel > 1);
+ //For stereo playout:
+ // Master and Slave samples are interleaved starting with Master
+
+ const WebRtc_UWord16 recvFreq = static_cast<WebRtc_UWord16>(audioFrameTmp._frequencyInHz);
+ bool toneDetected = false;
+ WebRtc_Word16 lastDetectedTone;
+ WebRtc_Word16 tone;
+
+ // limit the scope of ACM Critical section
+ // perhaps we don't need to have output resampler in
+ // critical section, it is supposed to be called in this
+ // function and no where else. However, it won't degrade complexity
+ {
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ if ((recvFreq != desiredFreqHz) && (desiredFreqHz != -1))
+ {
+ // resample payloadData
+ WebRtc_Word16 tmpLen = _outputResampler.Resample10Msec(
+ audioFrameTmp._payloadData, recvFreq, audioFrame._payloadData, desiredFreqHz,
+ audioFrameTmp._audioChannel);
+
+ if(tmpLen < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "PlayoutData failed, resampler failed");
+ return -1;
+ }
+
+ //Set the payload data length from the resampler
+ audioFrame._payloadDataLengthInSamples = (WebRtc_UWord16)tmpLen;
+ // set the ssampling frequency
+ audioFrame._frequencyInHz = desiredFreqHz;
+ }
+ else
+ {
+ memcpy(audioFrame._payloadData, audioFrameTmp._payloadData,
+ audioFrameTmp._payloadDataLengthInSamples * audioFrame._audioChannel
+ * sizeof(WebRtc_Word16));
+ // set the payload length
+ audioFrame._payloadDataLengthInSamples = audioFrameTmp._payloadDataLengthInSamples;
+ // set the sampling frequency
+ audioFrame._frequencyInHz = recvFreq;
+ }
+
+ //Tone detection done for master channel
+ if(_dtmfDetector != NULL)
+ {
+ // Dtmf Detection
+ if(audioFrame._frequencyInHz == 8000)
+ {
+ // use audioFrame._payloadData then Dtmf detector doesn't
+ // need resampling
+ if(!stereoMode)
+ {
+ _dtmfDetector->Detect(audioFrame._payloadData,
+ audioFrame._payloadDataLengthInSamples,
+ audioFrame._frequencyInHz, toneDetected, tone);
+ }
+ else
+ {
+ // we are in 8 kHz so the master channel needs only 80 samples
+ WebRtc_Word16 masterChannel[80];
+ for(WebRtc_Word16 n = 0; n < 80; n++)
+ {
+ masterChannel[n] = audioFrame._payloadData[n<<1];
+ }
+ _dtmfDetector->Detect(audioFrame._payloadData,
+ audioFrame._payloadDataLengthInSamples,
+ audioFrame._frequencyInHz, toneDetected, tone);
+ }
+ }
+ else
+ {
+ // Do the detection on the audio that we got from NetEQ (audioFrameTmp).
+ if(!stereoMode)
+ {
+ _dtmfDetector->Detect(audioFrameTmp._payloadData,
+ audioFrameTmp._payloadDataLengthInSamples, recvFreq,
+ toneDetected, tone);
+ }
+ else
+ {
+ WebRtc_Word16 masterChannel[WEBRTC_10MS_PCM_AUDIO];
+ WebRtc_Word16 n;
+ for(n = 0; n < audioFrameTmp._payloadDataLengthInSamples; n++)
+ {
+ masterChannel[n] = audioFrameTmp._payloadData[n<<1];
+ }
+ _dtmfDetector->Detect(audioFrameTmp._payloadData,
+ audioFrameTmp._payloadDataLengthInSamples, recvFreq,
+ toneDetected, tone);
+ }
+ }
+ }
+
+ // we want to do this while we are in _acmCritSect
+ // doesn't really need to initialize the following
+ // variable but Linux complains if we don't
+ lastDetectedTone = kACMToneEnd;
+ if(toneDetected)
+ {
+ lastDetectedTone = _lastDetectedTone;
+ _lastDetectedTone = tone;
+ }
+ }
+
+ if(toneDetected)
+ {
+ // we will deal with callback here, so enter callback critical
+ // section
+ CriticalSectionScoped lock(*_callbackCritSect);
+
+ if(_dtmfCallback != NULL)
+ {
+ if(tone != kACMToneEnd)
+ {
+ // just a tone
+ _dtmfCallback->IncomingDtmf((WebRtc_UWord8)tone, false);
+ }
+ else if((tone == kACMToneEnd) &&
+ (lastDetectedTone != kACMToneEnd))
+ {
+ // The tone is "END" and the previously detected tone is
+ // not "END," so call fir an end.
+ _dtmfCallback->IncomingDtmf((WebRtc_UWord8)lastDetectedTone,
+ true);
+ }
+ }
+ }
+
+ audioFrame._id = _id;
+ audioFrame._volume = -1;
+ audioFrame._energy = -1;
+ audioFrame._timeStamp = 0;
+
+ return 0;
+}
+
+
+
+/////////////////////////////////////////
+// (CNG) Comfort Noise Generation
+// Generate comfort noise when receiving DTX packets
+//
+
+// Get VAD status on the incoming stream
+bool
+AudioCodingModuleImpl::ReceiveVADStatus() const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "ReceiveVADStatus()");
+ return _netEq.VADStatus();
+}
+
+
+// configure VAD status i.e on/off on the incoming stream
+WebRtc_Word16
+AudioCodingModuleImpl::SetReceiveVADStatus(
+ const bool enable)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SetReceiveVADStatus()");
+ return _netEq.SetVADStatus(enable);
+}
+
+// Get VAD aggressiveness on the incoming stream
+ACMVADMode
+AudioCodingModuleImpl::ReceiveVADMode() const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "ReceiveVADMode()");
+ return _netEq.VADMode();
+}
+
+// Configure VAD aggressiveness on the incoming stream
+WebRtc_Word16
+AudioCodingModuleImpl::SetReceiveVADMode(
+ const ACMVADMode mode)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SetReceiveVADMode()");
+ return _netEq.SetVADMode(mode);
+}
+
+/////////////////////////////////////////
+// statistics
+//
+
+WebRtc_Word32
+AudioCodingModuleImpl::NetworkStatistics(
+ ACMNetworkStatistics& statistics) const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "NetworkStatistics()");
+ WebRtc_Word32 status;
+ status = _netEq.NetworkStatistics(&statistics);
+ return status;
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::JitterStatistics(
+ ACMJitterStatistics& jitterStatistics) const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "JitterStatistics()");
+ return _netEq.JitterStatistics(&jitterStatistics);
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::PreferredBufferSize(
+ WebRtc_UWord16& prefbufsize) const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "PreferedBufferSize()");
+ return _netEq.PreferredBufferSize(&prefbufsize);
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::ResetJitterStatistics() const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "ResetJitterStatistics()");
+ return _netEq.ResetJitterStatistics();
+}
+
+void
+AudioCodingModuleImpl::DestructEncoderInst(
+ void* ptrInst)
+{
+ WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _id,
+ "DestructEncoderInst()");
+ if(!HaveValidEncoder("DestructEncoderInst"))
+ {
+ return;
+ }
+
+ _codecs[_currentSendCodecIdx]->DestructEncoderInst(ptrInst);
+}
+
+WebRtc_Word16
+AudioCodingModuleImpl::AudioBuffer(
+ WebRtcACMAudioBuff& audioBuff)
+{
+ WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _id,
+ "AudioBuffer()");
+ if(!HaveValidEncoder("AudioBuffer"))
+ {
+ return -1;
+ }
+
+ audioBuff.lastInTimestamp = _lastInTimestamp;
+ return _codecs[_currentSendCodecIdx]->AudioBuffer(audioBuff);
+}
+
+WebRtc_Word16
+AudioCodingModuleImpl::SetAudioBuffer(
+ WebRtcACMAudioBuff& audioBuff)
+{
+ WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _id,
+ "SetAudioBuffer()");
+ if(!HaveValidEncoder("SetAudioBuffer"))
+ {
+ return -1;
+ }
+
+ return _codecs[_currentSendCodecIdx]->SetAudioBuffer(audioBuff);
+}
+
+
+WebRtc_UWord32
+AudioCodingModuleImpl::EarliestTimestamp() const
+{
+ WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _id,
+ "EarliestTimestamp()");
+ if(!HaveValidEncoder("EarliestTimestamp"))
+ {
+ return -1;
+ }
+
+ return _codecs[_currentSendCodecIdx]->EarliestTimestamp();
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::RegisterVADCallback(
+ ACMVADCallback* vadCallback)
+{
+ WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, _id,
+ "RegisterVADCallback()");
+ CriticalSectionScoped lock(*_callbackCritSect);
+ _vadCallback = vadCallback;
+ return 0;
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::IncomingPayload(
+ const WebRtc_Word8* incomingPayload,
+ const WebRtc_Word32 payloadLength,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timestamp)
+{
+ if (payloadLength < 0)
+ {
+ // Log error in trace file.
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "IncomingPacket() Error, payload-length cannot be negative");
+ return -1;
+ }
+
+ if(_dummyRTPHeader == NULL)
+ {
+ // This is the first time that we are using _dummyRTPHeader
+ // so we have to create it.
+ WebRtcACMCodecParams codecParams;
+ _dummyRTPHeader = new WebRtcRTPHeader;
+ if (_dummyRTPHeader == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "IncomingPacket() Error, out of memory");
+ return -1;
+ }
+ _dummyRTPHeader->header.payloadType = payloadType;
+ // Don't matter in this case
+ _dummyRTPHeader->header.ssrc = 0;
+ _dummyRTPHeader->header.markerBit = false;
+ // start with random numbers
+ _dummyRTPHeader->header.sequenceNumber = rand();
+ _dummyRTPHeader->header.timestamp = (((WebRtc_UWord32)rand()) << 16) +
+ (WebRtc_UWord32)rand();
+ _dummyRTPHeader->type.Audio.channel = 1;
+
+ if(DecoderParamByPlType(payloadType, codecParams) < 0)
+ {
+ // we didn't find a codec with the given payload.
+ // something is wrong we exit, but we delete _dummyRTPHeader
+ // and set it to NULL to start clean next time
+ delete _dummyRTPHeader;
+ _dummyRTPHeader = NULL;
+ return -1;
+ }
+ _recvPlFrameSizeSmpls = codecParams.codecInstant.pacsize;
+ }
+
+ if(payloadType != _dummyRTPHeader->header.payloadType)
+ {
+ // payload type has changed since the last time we might need to
+ // update the frame-size
+ WebRtcACMCodecParams codecParams;
+ if(DecoderParamByPlType(payloadType, codecParams) < 0)
+ {
+ // we didn't find a codec with the given payload.
+ // something is wrong we exit
+ return -1;
+ }
+ _recvPlFrameSizeSmpls = codecParams.codecInstant.pacsize;
+ _dummyRTPHeader->header.payloadType = payloadType;
+ }
+
+ if(timestamp > 0)
+ {
+ _dummyRTPHeader->header.timestamp = timestamp;
+ }
+
+ // store the payload Type. this will be used to retrieve "received codec"
+ // and "received frequency."
+ _lastRecvAudioCodecPlType = payloadType;
+
+ // Insert in NetEQ
+ if(_netEq.RecIn(incomingPayload, payloadLength, (*_dummyRTPHeader)) < 0)
+ {
+ return -1;
+ }
+
+ // get ready for the next payload
+ _dummyRTPHeader->header.sequenceNumber++;
+ _dummyRTPHeader->header.timestamp += _recvPlFrameSizeSmpls;
+ return 0;
+}
+
+WebRtc_Word16
+AudioCodingModuleImpl::DecoderParamByPlType(
+ const WebRtc_UWord8 payloadType,
+ WebRtcACMCodecParams& codecParams) const
+{
+ CriticalSectionScoped lock(*_acmCritSect);
+ for(WebRtc_Word16 codecCntr = 0; codecCntr < MAX_NR_OF_CODECS; codecCntr++)
+ {
+ if(_codecs[codecCntr] != NULL)
+ {
+ if(_codecs[codecCntr]->DecoderInitialized())
+ {
+ if(_codecs[codecCntr]->DecoderParams(&codecParams,
+ payloadType))
+ {
+ return 0;
+ }
+ }
+ }
+ }
+ // if we are here it means that we could not find a
+ // codec with that payload type. reset the values to
+ // not acceptable values and return -1;
+ codecParams.codecInstant.plname[0] = '\0';
+ codecParams.codecInstant.pacsize = 0;
+ codecParams.codecInstant.rate = 0;
+ codecParams.codecInstant.pltype = -1;
+ return -1;
+}
+
+
+
+WebRtc_Word16
+AudioCodingModuleImpl::DecoderListIDByPlName(
+ const WebRtc_Word8* payloadName,
+ const WebRtc_UWord16 sampFreqHz) const
+{
+ WebRtcACMCodecParams codecParams;
+ CriticalSectionScoped lock(*_acmCritSect);
+ for(WebRtc_Word16 codecCntr = 0; codecCntr < MAX_NR_OF_CODECS; codecCntr++)
+ {
+ if((_codecs[codecCntr] != NULL))
+ {
+ if(_codecs[codecCntr]->DecoderInitialized())
+ {
+ assert(_registeredPlTypes[codecCntr] >= 0);
+ assert(_registeredPlTypes[codecCntr] <= 255);
+ _codecs[codecCntr]->DecoderParams(&codecParams,
+ (WebRtc_UWord8)_registeredPlTypes[codecCntr]);
+ if(!STR_CASE_CMP(codecParams.codecInstant.plname, payloadName))
+ {
+ // Check if the given sampling frequency matches.
+ // A zero sampling frequency means we matching the names
+ // is sufficient and we don't need to check for the
+ // frequencies.
+ // Currently it is only iSAC which has one name but two
+ // sampling frequencies.
+ if((sampFreqHz == 0) ||
+ (codecParams.codecInstant.plfreq == sampFreqHz))
+ {
+ return codecCntr;
+ }
+ }
+ }
+ }
+ }
+ // if we are here it means that we could not find a
+ // codec with that payload type. return -1;
+ return -1;
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::LastEncodedTimestamp(WebRtc_UWord32& timestamp) const
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "LastEncodedTimestamp()");
+ CriticalSectionScoped lock(*_acmCritSect);
+ if(!HaveValidEncoder("LastEncodedTimestamp"))
+ {
+ return -1;
+ }
+ timestamp = _codecs[_currentSendCodecIdx]->LastEncodedTimestamp();
+ return 0;
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::ReplaceInternalDTXWithWebRtc(bool useWebRtcDTX)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "ReplaceInternalDTXWithWebRtc()");
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ if(!HaveValidEncoder("ReplaceInternalDTXWithWebRtc"))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Cannot replace codec internal DTX when no send codec is registered.");
+ return -1;
+ }
+
+ WebRtc_Word32 res = _codecs[_currentSendCodecIdx]->ReplaceInternalDTX(useWebRtcDTX);
+ // Check if VAD is turned on, or if there is any error
+ if(res == 1)
+ {
+ _vadEnabled = true;
+ } else if(res < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Failed to set ReplaceInternalDTXWithWebRtc(%d)", useWebRtcDTX);
+ return res;
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::IsInternalDTXReplacedWithWebRtc(bool& usesWebRtcDTX)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "IsInternalDTXReplacedWithWebRtc()");
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ if(!HaveValidEncoder("IsInternalDTXReplacedWithWebRtc"))
+ {
+ return -1;
+ }
+ if(_codecs[_currentSendCodecIdx]->IsInternalDTXReplaced(&usesWebRtcDTX) < 0)
+ {
+ return -1;
+ }
+ return 0;
+}
+
+
+WebRtc_Word32
+AudioCodingModuleImpl::SetISACMaxRate(
+ const WebRtc_UWord32 maxRateBitPerSec)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SetISACMaxRate()");
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ if(!HaveValidEncoder("SetISACMaxRate"))
+ {
+ return -1;
+ }
+
+ return _codecs[_currentSendCodecIdx]->SetISACMaxRate(maxRateBitPerSec);
+}
+
+
+WebRtc_Word32
+AudioCodingModuleImpl::SetISACMaxPayloadSize(
+ const WebRtc_UWord16 maxPayloadLenBytes)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SetISACPayloadSize()");
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ if(!HaveValidEncoder("SetISACMaxPayloadSize"))
+ {
+ return -1;
+ }
+
+ return _codecs[_currentSendCodecIdx]->SetISACMaxPayloadSize(maxPayloadLenBytes);
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 initFrameSizeMsec,
+ const WebRtc_UWord16 initRateBitPerSec,
+ const bool enforceFrameSize)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "ConfigISACBandwidthEstimator()");
+ CriticalSectionScoped lock(*_acmCritSect);
+
+ if(!HaveValidEncoder("ConfigISACBandwidthEstimator"))
+ {
+ return -1;
+ }
+
+ return _codecs[_currentSendCodecIdx]->ConfigISACBandwidthEstimator(
+ initFrameSizeMsec, initRateBitPerSec, enforceFrameSize);
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::SetBackgroundNoiseMode(
+ const ACMBackgroundNoiseMode mode)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "SetBackgroundNoiseMode()");
+ if((mode < On) ||
+ (mode > Off))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "The specified background noise is out of range.\n");
+ return -1;
+ }
+ return _netEq.SetBackgroundNoiseMode(mode);
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::BackgroundNoiseMode(
+ ACMBackgroundNoiseMode& mode)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "BackgroundNoiseMode()");
+ return _netEq.BackgroundNoiseMode(mode);
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::PlayoutTimestamp(
+ WebRtc_UWord32& timestamp)
+{
+ WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, _id,
+ "PlayoutTimestamp()");
+ return _netEq.PlayoutTimestamp(timestamp);
+}
+
+
+
+
+
+bool
+AudioCodingModuleImpl::HaveValidEncoder(
+ const WebRtc_Word8* callerName) const
+{
+ WebRtc_Word16 numCodecs = ACMCodecDB::NoOfCodecs();
+ if((!_sendCodecRegistered) ||
+ (_currentSendCodecIdx < 0) ||
+ (_currentSendCodecIdx >= numCodecs))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "%s failed: No send codec is registered.", callerName);
+ return false;
+ }
+ if((_currentSendCodecIdx < 0) ||
+ (_currentSendCodecIdx >= numCodecs))
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "%s failed: Send codec index out of range.", callerName);
+ return false;
+ }
+ if(_codecs[_currentSendCodecIdx] == NULL)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "%s failed: Send codec is NULL pointer.", callerName);
+ return false;
+ }
+ return true;
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::UnregisterReceiveCodec(
+ const WebRtc_Word16 payloadType)
+{
+ WEBRTC_TRACE(webrtc::kTraceModuleCall, webrtc::kTraceAudioCoding, _id,
+ "UnregisterReceiveCodec()");
+ CriticalSectionScoped lock(*_acmCritSect);
+ WebRtc_Word16 codecID;
+
+ // Search through the list of registered payload types
+ for (codecID = 0; codecID < MAX_NR_OF_CODECS; codecID++)
+ {
+ if (_registeredPlTypes[codecID] == payloadType)
+ {
+ // we have found the codecID registered with the payload type
+ break;
+ }
+ }
+
+ if(codecID >= ACMCodecDB::NoOfCodecs())
+ {
+ // payload type was not registered. No need to unregister
+ return 0;
+ }
+ else if(codecID < 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "UnregisterReceiveCodec() failed: the given codec, %s, is not supported",
+ payloadType);
+ return -1;
+ }
+
+ // Unregister the codec with the given payload type
+ return UnregisterReceiveCodecSafe(codecID);
+}
+
+WebRtc_Word32
+AudioCodingModuleImpl::UnregisterReceiveCodecSafe(
+ const WebRtc_Word16 codecID)
+{
+ WebRtcNetEQDecoder *neteqDecoder = ACMCodecDB::NetEqDecoders();
+ WebRtc_Word16 mirrorID = ACMCodecDB::MirrorID(codecID);
+ if(_codecs[codecID] != NULL)
+ {
+ if(_registeredPlTypes[codecID] != -1)
+ {
+ // before deleting the decoder instance unregister
+ // from NetEq.
+ if(_netEq.RemoveCodec(neteqDecoder[codecID], _stereoReceive[codecID]) < 0)
+ {
+ CodecInst codecInst;
+ ACMCodecDB::Codec(codecID, &codecInst);
+ WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
+ "Unregistering %s-%d from NetEq failed.",
+ codecInst.plname, codecInst.plfreq);
+ return -1;
+ }
+
+ // CN is a special case for NetEQ, all three sampling frequencies are
+ // deletad if one is deleted
+ if(STR_CASE_CMP(ACMCodecDB::_mycodecs[codecID].plname, "CN") == 0)
+ {
+ // Search codecs nearby in the database to unregister all CN.
+ for (int i=-2; i<3; i++)
+ {
+ if (STR_CASE_CMP(ACMCodecDB::_mycodecs[codecID+i].plname, "CN") == 0)
+ {
+ _codecs[codecID+i]->DestructDecoder();
+ if(_stereoReceive[codecID+i])
+ {
+ _slaveCodecs[codecID+i]->DestructDecoder();
+ }
+ _registeredPlTypes[codecID+i] = -1;
+ }
+ }
+ } else
+ {
+ if(codecID == mirrorID)
+ {
+ _codecs[codecID]->DestructDecoder();
+ if(_stereoReceive[codecID])
+ {
+ _slaveCodecs[codecID]->DestructDecoder();
+ }
+ }
+ }
+ }
+ }
+
+ if(_registeredPlTypes[codecID] == _receiveREDPayloadType)
+ {
+ // RED is going to be unregistered.
+ // set the following to an invalid value.
+ _receiveREDPayloadType = 255;
+ }
+ _registeredPlTypes[codecID] = -1;
+
+ return 0;
+}
+
+
+WebRtc_Word32
+AudioCodingModuleImpl::REDPayloadISAC(
+ const WebRtc_Word32 isacRate,
+ const WebRtc_Word16 isacBwEstimate,
+ WebRtc_UWord8* payload,
+ WebRtc_Word16* payloadLenByte)
+{
+
+ if(!HaveValidEncoder("EncodeData"))
+ {
+ return -1;
+ }
+ WebRtc_Word16 status;
+
+ status = _codecs[_currentSendCodecIdx]->REDPayloadISAC(isacRate, isacBwEstimate,
+ payload, payloadLenByte);
+
+ return status;
+}
+
+} // namespace webrtc
diff --git a/src/modules/audio_coding/main/source/audio_coding_module_impl.h b/src/modules/audio_coding/main/source/audio_coding_module_impl.h
new file mode 100644
index 0000000..e39c115
--- /dev/null
+++ b/src/modules/audio_coding/main/source/audio_coding_module_impl.h
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef AUDIO_CODING_MODULE_IMPL_H
+#define AUDIO_CODING_MODULE_IMPL_H
+
+#include "acm_codec_database.h"
+#include "acm_neteq.h"
+#include "acm_resampler.h"
+#include "common_types.h"
+#include "engine_configurations.h"
+
+namespace webrtc {
+
+class ACMDTMFDetection;
+class ACMGenericCodec;
+class CriticalSectionWrapper;
+class RWLockWrapper;
+
+//#define TIMED_LOGGING
+
+#ifdef TIMED_LOGGING
+ #include "../test/timedtrace.h"
+#endif
+
+#ifdef ACM_QA_TEST
+# include <stdio.h>
+#endif
+
+class AudioCodingModuleImpl : public AudioCodingModule
+{
+public:
+ // constructor
+ AudioCodingModuleImpl(
+ const WebRtc_Word32 id);
+
+ // destructor
+ ~AudioCodingModuleImpl();
+
+ // get version information for ACM and all components
+ WebRtc_Word32 Version(
+ WebRtc_Word8* version,
+ WebRtc_UWord32& remainingBufferInBytes,
+ WebRtc_UWord32& position) const;
+
+ // change the unique identifier of this object
+ virtual WebRtc_Word32 ChangeUniqueId(
+ const WebRtc_Word32 id);
+
+ // returns the number of milliseconds until the module want
+ // a worker thread to call Process
+ WebRtc_Word32 TimeUntilNextProcess();
+
+ // Process any pending tasks such as timeouts
+ WebRtc_Word32 Process();
+
+ // used in conference to go to and from active encoding, hence
+ // in and out of mix
+ WebRtc_Word32 SetMode(
+ const bool passive);
+
+
+
+ /////////////////////////////////////////
+ // Sender
+ //
+
+ // initialize send codec
+ WebRtc_Word32 InitializeSender();
+
+ // reset send codec
+ WebRtc_Word32 ResetEncoder();
+
+ // can be called multiple times for Codec, CNG, RED
+ WebRtc_Word32 RegisterSendCodec(
+ const CodecInst& sendCodec);
+
+ // get current send codec
+ WebRtc_Word32 SendCodec(
+ CodecInst& currentSendCodec) const;
+
+ // get current send freq
+ WebRtc_Word32 SendFrequency() const;
+
+ // Get encode bitrate
+ // Adaptive rate codecs return their current encode target rate, while other codecs
+ // return there longterm avarage or their fixed rate.
+ WebRtc_Word32 SendBitrate() const;
+
+ // set available bandwidth, inform the encoder about the
+ // estimated bandwidth received from the remote party
+ virtual WebRtc_Word32 SetReceivedEstimatedBandwidth(
+ const WebRtc_Word32 bw);
+
+ // register a transport callback which will be
+ // called to deliver the encoded buffers
+ WebRtc_Word32 RegisterTransportCallback(
+ AudioPacketizationCallback* transport);
+
+ // Used by the module to deliver messages to the codec module/application
+ // AVT(DTMF)
+ WebRtc_Word32 RegisterIncomingMessagesCallback(
+ AudioCodingFeedback* incomingMessagesCallback,
+ const ACMCountries cpt);
+
+ // Add 10MS of raw (PCM) audio data to the encoder
+ WebRtc_Word32 Add10MsData(
+ const AudioFrame& audioFrame);
+
+ // set background noise mode for NetEQ, on, off or fade
+ WebRtc_Word32 SetBackgroundNoiseMode(
+ const ACMBackgroundNoiseMode mode);
+
+ // get current background noise mode
+ WebRtc_Word32 BackgroundNoiseMode(
+ ACMBackgroundNoiseMode& mode);
+
+ /////////////////////////////////////////
+ // (FEC) Forward Error Correction
+ //
+
+ // configure FEC status i.e on/off
+ WebRtc_Word32 SetFECStatus(
+ const bool enable);
+
+ // Get FEC status
+ bool FECStatus() const;
+
+ /////////////////////////////////////////
+ // (VAD) Voice Activity Detection
+ // and
+ // (CNG) Comfort Noise Generation
+ //
+
+ WebRtc_Word32 SetVAD(
+ const bool enableDTX = true,
+ const bool enableVAD = false,
+ const ACMVADMode vadMode = VADNormal);
+
+ WebRtc_Word32 VAD(
+ bool& dtxEnabled,
+ bool& vadEnabled,
+ ACMVADMode& vadMode) const;
+
+ WebRtc_Word32 RegisterVADCallback(
+ ACMVADCallback* vadCallback);
+
+ // Get VAD status on the incoming stream
+ bool ReceiveVADStatus() const;
+
+ // configure VAD status i.e on/off on the incoming stream
+ WebRtc_Word16 SetReceiveVADStatus(
+ const bool enable);
+
+ // Get VAD aggressiveness on the incoming stream
+ ACMVADMode ReceiveVADMode() const;
+
+ // Configure VAD aggressiveness on the incoming stream
+ WebRtc_Word16 SetReceiveVADMode(
+ const ACMVADMode mode);
+
+
+ /////////////////////////////////////////
+ // Receiver
+ //
+
+ // initialize receiver, resets codec database etc
+ WebRtc_Word32 InitializeReceiver();
+
+ // reset the decoder state
+ WebRtc_Word32 ResetDecoder();
+
+ // get current receive freq
+ WebRtc_Word32 ReceiveFrequency() const;
+
+ // get current playout freq
+ WebRtc_Word32 PlayoutFrequency() const;
+
+ // register possible reveive codecs, can be called multiple times,
+ // for codecs, CNG, DTMF, RED
+ WebRtc_Word32 RegisterReceiveCodec(
+ const CodecInst& receiveCodec);
+
+ // get current received codec
+ WebRtc_Word32 ReceiveCodec(
+ CodecInst& currentReceiveCodec) const;
+
+ // incoming packet from network parsed and ready for decode
+ WebRtc_Word32 IncomingPacket(
+ const WebRtc_Word8* incomingPayload,
+ const WebRtc_Word32 payloadLength,
+ const WebRtcRTPHeader& rtpInfo);
+
+ // Incoming payloads, without rtp-info, the rtp-info will be created in ACM.
+ // One usage for this API is when pre-encoded files are pushed in ACM.
+ WebRtc_Word32 IncomingPayload(
+ const WebRtc_Word8* incomingPayload,
+ const WebRtc_Word32 payloadLength,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timestamp = 0);
+
+ // Minimum playout dealy (Used for lip-sync)
+ WebRtc_Word32 SetMinimumPlayoutDelay(
+ const WebRtc_Word32 timeMs);
+
+ // current play out delay
+ WebRtc_Word32 Delay(WebRtc_UWord16& delayMs) const;
+
+ // configure Dtmf playout status i.e on/off playout the incoming outband Dtmf tone
+ WebRtc_Word32 SetDtmfPlayoutStatus(
+ const bool enable);
+
+ // Get Dtmf playout status
+ bool DtmfPlayoutStatus() const;
+
+ // Estimate the Bandwidth based on the incoming stream
+ // This is also done in the RTP module
+ // need this for one way audio where the RTCP send the BW estimate
+ WebRtc_Word32 DecoderEstimatedBandwidth() const;
+
+ // Set playout mode voice, fax
+ WebRtc_Word32 SetPlayoutMode(
+ const AudioPlayoutMode mode);
+
+ // Get playout mode voice, fax
+ AudioPlayoutMode PlayoutMode() const;
+
+ // Get playout timestamp
+ WebRtc_Word32 PlayoutTimestamp(
+ WebRtc_UWord32& timestamp);
+
+ // Get 10 milliseconds of raw audio data to play out
+ // automatic resample to the requested frequency if > 0
+ WebRtc_Word32 PlayoutData10Ms(
+ const WebRtc_Word32 desiredFreqHz,
+ AudioFrame &audioFrame);
+
+
+ /////////////////////////////////////////
+ // Statistics
+ //
+
+ WebRtc_Word32 NetworkStatistics(
+ ACMNetworkStatistics& statistics) const;
+
+ WebRtc_Word32 JitterStatistics(
+ ACMJitterStatistics& jitterStatistics) const;
+
+ WebRtc_Word32 PreferredBufferSize(
+ WebRtc_UWord16& prefbufsize) const;
+
+ WebRtc_Word32 ResetJitterStatistics() const;
+
+ void DestructEncoderInst(void* ptrInst);
+
+ WebRtc_Word16 AudioBuffer(WebRtcACMAudioBuff& audioBuff);
+
+ // GET RED payload for iSAC. The method id called
+ // when 'this' ACM is default ACM.
+ WebRtc_Word32 REDPayloadISAC(
+ const WebRtc_Word32 isacRate,
+ const WebRtc_Word16 isacBwEstimate,
+ WebRtc_UWord8* payload,
+ WebRtc_Word16* payloadLenByte);
+
+ WebRtc_Word16 SetAudioBuffer(WebRtcACMAudioBuff& audioBuff);
+
+ WebRtc_UWord32 EarliestTimestamp() const;
+
+ WebRtc_Word32 LastEncodedTimestamp(WebRtc_UWord32& timestamp) const;
+
+ WebRtc_Word32 ReplaceInternalDTXWithWebRtc(
+ const bool useWebRtcDTX);
+
+ WebRtc_Word32 IsInternalDTXReplacedWithWebRtc(
+ bool& usesWebRtcDTX);
+
+ WebRtc_Word32 SetISACMaxRate(
+ const WebRtc_UWord32 rateBitPerSec);
+
+ WebRtc_Word32 SetISACMaxPayloadSize(
+ const WebRtc_UWord16 payloadLenBytes);
+
+ WebRtc_Word32 ConfigISACBandwidthEstimator(
+ const WebRtc_UWord8 initFrameSizeMsec,
+ const WebRtc_UWord16 initRateBitPerSec,
+ const bool enforceFrameSize = false);
+
+ WebRtc_Word32 UnregisterReceiveCodec(
+ const WebRtc_Word16 payloadType);
+
+protected:
+ void UnregisterSendCodec();
+
+ WebRtc_Word32 UnregisterReceiveCodecSafe(
+ const WebRtc_Word16 codecID);
+
+ ACMGenericCodec* CreateCodec(
+ const CodecInst& codec);
+
+ WebRtc_Word16 DecoderParamByPlType(
+ const WebRtc_UWord8 payloadType,
+ WebRtcACMCodecParams& codecParams) const;
+
+ WebRtc_Word16 DecoderListIDByPlName(
+ const WebRtc_Word8* payloadName,
+ const WebRtc_UWord16 sampFreqHz = 0) const;
+
+ WebRtc_Word32 InitializeReceiverSafe();
+
+ bool HaveValidEncoder(const WebRtc_Word8* callerName) const;
+
+ WebRtc_Word32 RegisterRecCodecMSSafe(
+ const CodecInst& receiveCodec,
+ WebRtc_Word16 codecId,
+ WebRtc_Word16 mirrorId,
+ ACMNetEQ::JB jitterBuffer);
+
+private:
+ AudioPacketizationCallback* _packetizationCallback;
+ WebRtc_Word32 _id;
+ WebRtc_UWord32 _lastTimestamp;
+ WebRtc_UWord32 _lastInTimestamp;
+ CodecInst _sendCodecInst;
+ CodecInst _cngNB;
+ CodecInst _cngWB;
+ CodecInst _cngSWB;
+ CodecInst _RED;
+ bool _REDRegistered;
+ CodecInst _DTMF;
+ bool _DTMFRegistered;
+ bool _vadEnabled;
+ bool _dtxEnabled;
+ ACMVADMode _vadMode;
+ ACMGenericCodec* _codecs[MAX_NR_OF_CODECS];
+ ACMGenericCodec* _slaveCodecs[MAX_NR_OF_CODECS];
+ WebRtc_Word16 _mirrorCodecIdx[MAX_NR_OF_CODECS];
+ bool _stereoReceive[MAX_NR_OF_CODECS];
+ bool _stereoSend;
+ WebRtc_Word32 _currentSendCodecIdx;
+ bool _sendCodecRegistered;
+ ACMResampler _inputResampler;
+ ACMResampler _outputResampler;
+ ACMNetEQ _netEq;
+ CriticalSectionWrapper* _acmCritSect;
+ ACMVADCallback* _vadCallback;
+ WebRtc_UWord8 _lastRecvAudioCodecPlType;
+
+ // RED/FEC
+ bool _isFirstRED;
+ bool _fecEnabled;
+ WebRtc_UWord8* _redBuffer;
+ RTPFragmentationHeader* _fragmentation;
+ WebRtc_UWord32 _lastFECTimestamp;
+ WebRtc_UWord8 _redPayloadType;
+ // if no RED is registered as receive codec this
+ // will have an invalid value.
+ WebRtc_UWord8 _receiveREDPayloadType;
+
+ // This is to keep track of CN instances where we can send DTMFs
+ WebRtc_UWord8 _previousPayloadType;
+
+ // This keeps track of payload types associated with _codecs[].
+ // We define it as signed variable and initialize with -1 to indicate
+ // unused elements.
+ WebRtc_Word16 _registeredPlTypes[MAX_NR_OF_CODECS];
+
+ // Used when payloads are pushed into ACM without any RTP info
+ // One example is when pre-encoded bit-stream is pushed from
+ // a file.
+ WebRtcRTPHeader* _dummyRTPHeader;
+ WebRtc_UWord16 _recvPlFrameSizeSmpls;
+
+ bool _receiverInitialized;
+ ACMDTMFDetection* _dtmfDetector;
+
+ AudioCodingFeedback* _dtmfCallback;
+ WebRtc_Word16 _lastDetectedTone;
+ CriticalSectionWrapper* _callbackCritSect;
+#ifdef TIMED_LOGGING
+ TimedTrace _trace;
+#endif
+
+#ifdef ACM_QA_TEST
+ FILE* _outgoingPL;
+ FILE* _incomingPL;
+#endif
+
+};
+
+} // namespace webrtc
+
+#endif
diff --git a/src/modules/audio_coding/main/test/ACMTest.cpp b/src/modules/audio_coding/main/test/ACMTest.cpp
new file mode 100644
index 0000000..1bbac0e
--- /dev/null
+++ b/src/modules/audio_coding/main/test/ACMTest.cpp
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "ACMTest.h"
+
+ACMTest::~ACMTest()
+{
+}
+
diff --git a/src/modules/audio_coding/main/test/ACMTest.h b/src/modules/audio_coding/main/test/ACMTest.h
new file mode 100644
index 0000000..e965671
--- /dev/null
+++ b/src/modules/audio_coding/main/test/ACMTest.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACMTEST_H
+#define ACMTEST_H
+
+class ACMTest
+{
+public:
+ virtual ~ACMTest() =0;
+ virtual void Perform() =0;
+};
+
+#endif
diff --git a/src/modules/audio_coding/main/test/APITest.cpp b/src/modules/audio_coding/main/test/APITest.cpp
new file mode 100644
index 0000000..98c3905
--- /dev/null
+++ b/src/modules/audio_coding/main/test/APITest.cpp
@@ -0,0 +1,1602 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cctype>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <iostream>
+#include <ostream>
+
+#include "APITest.h"
+#include "thread_wrapper.h"
+#include "event_wrapper.h"
+#include "tick_util.h"
+#include "trace.h"
+#include "utility.h"
+#include "common_types.h"
+#include "engine_configurations.h"
+
+#define TEST_DURATION_SEC 600
+
+#define NUMBER_OF_SENDER_TESTS 6
+
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+#define CHECK_THREAD_NULLITY(myThread, S) if(myThread != NULL){unsigned int i; (myThread)->Start(i);}else{throw S; exit(1);}
+
+using namespace webrtc;
+
+void
+APITest::Wait(WebRtc_UWord32 waitLengthMs)
+{
+ if(_randomTest)
+ {
+ return;
+ }
+ else
+ {
+ EventWrapper* myEvent = EventWrapper::Create();
+ myEvent->Wait(waitLengthMs);
+ delete myEvent;
+ return;
+ }
+}
+
+
+
+APITest::APITest():
+_acmA(NULL),
+_acmB(NULL),
+_channel_A2B(NULL),
+_channel_B2A(NULL),
+_writeToFile(true),
+_pullEventA(NULL),
+_pushEventA(NULL),
+_processEventA(NULL),
+_apiEventA(NULL),
+_pullEventB(NULL),
+_pushEventB(NULL),
+_processEventB(NULL),
+_apiEventB(NULL),
+_codecCntrA(0),
+_codecCntrB(0),
+_testCntrA(1),
+_testCntrB(1),
+_thereIsEncoderA(false),
+_thereIsEncoderB(false),
+_thereIsDecoderA(false),
+_thereIsDecoderB(false),
+_sendVADA(false),
+_sendDTXA(false),
+_sendVADModeA(VADNormal),
+_sendVADB(false),
+_sendDTXB(false),
+_sendVADModeB(VADNormal),
+_minDelayA(0),
+_minDelayB(0),
+_dotPositionA(0),
+_dotMoveDirectionA(1),
+_dotPositionB(39),
+_dotMoveDirectionB(-1),
+_dtmfCallback(NULL),
+_vadCallbackA(NULL),
+_vadCallbackB(NULL),
+_apiTestRWLock(*RWLockWrapper::CreateRWLock()),
+_randomTest(false),
+_testNumA(0),
+_testNumB(1)
+{
+ int n;
+ for( n = 0; n < 32; n++)
+ {
+ _payloadUsed[n] = false;
+ }
+
+ for(n = 0; n < 3; n++)
+ {
+ _receiveVADActivityA[n] = 0;
+ _receiveVADActivityB[n] = 0;
+ }
+
+ _movingDot[40] = '\0';
+
+ for(int n = 0; n <40; n++)
+ {
+ _movingDot[n] = ' ';
+ }
+}
+
+APITest::~APITest()
+{
+ DESTROY_ACM(_acmA);
+ DESTROY_ACM(_acmB);
+
+ DELETE_POINTER(_channel_A2B);
+ DELETE_POINTER(_channel_B2A);
+
+ DELETE_POINTER(_pushEventA);
+ DELETE_POINTER(_pullEventA);
+ DELETE_POINTER(_processEventA);
+ DELETE_POINTER(_apiEventA);
+
+ DELETE_POINTER(_pushEventB);
+ DELETE_POINTER(_pullEventB);
+ DELETE_POINTER(_processEventB);
+ DELETE_POINTER(_apiEventB);
+
+ _inFileA.Close();
+ _outFileA.Close();
+
+ _inFileB.Close();
+ _outFileB.Close();
+
+ DELETE_POINTER(_dtmfCallback);
+ DELETE_POINTER(_vadCallbackA);
+ DELETE_POINTER(_vadCallbackB);
+
+ delete &_apiTestRWLock;
+}
+
+
+
+//WebRtc_Word16
+//APITest::SetInFile(char* fileName, WebRtc_UWord16 frequencyHz)
+//{
+// return _inFile.Open(fileName, frequencyHz, "rb");
+//}
+//
+//WebRtc_Word16
+//APITest::SetOutFile(char* fileName, WebRtc_UWord16 frequencyHz)
+//{
+// return _outFile.Open(fileName, frequencyHz, "wb");
+//}
+
+WebRtc_Word16
+APITest::SetUp()
+{
+ _acmA = AudioCodingModule::Create(1);
+ _acmB = AudioCodingModule::Create(2);
+
+ CodecInst dummyCodec;
+ int lastPayloadType = 0;
+
+ WebRtc_Word16 numCodecs = _acmA->NumberOfCodecs();
+ for(WebRtc_UWord8 n = 0; n < numCodecs; n++)
+ {
+ AudioCodingModule::Codec(n, dummyCodec);
+ if((STR_CASE_CMP(dummyCodec.plname, "CN") == 0) &&
+ (dummyCodec.plfreq == 32000))
+ {
+ continue;
+ }
+
+ printf("Register Receive Codec %s ", dummyCodec.plname);
+
+ if((n != 0) && !FixedPayloadTypeCodec(dummyCodec.plname))
+ {
+ // Check registration with an already occupied payload type
+ int currentPayloadType = dummyCodec.pltype;
+ dummyCodec.pltype = 97; //lastPayloadType;
+ CHECK_ERROR(_acmB->RegisterReceiveCodec(dummyCodec));
+ dummyCodec.pltype = currentPayloadType;
+ }
+
+ if((n < numCodecs - 1) && !FixedPayloadTypeCodec(dummyCodec.plname))
+ {
+ // test if re-registration works;
+ CodecInst nextCodec;
+ int currentPayloadType = dummyCodec.pltype;
+ AudioCodingModule::Codec(n + 1, nextCodec);
+ dummyCodec.pltype = nextCodec.pltype;
+ if(!FixedPayloadTypeCodec(nextCodec.plname))
+ {
+ _acmB->RegisterReceiveCodec(dummyCodec);
+ }
+ dummyCodec.pltype = currentPayloadType;
+ }
+
+ if((n < numCodecs - 1) && !FixedPayloadTypeCodec(dummyCodec.plname))
+ {
+ // test if un-registration works;
+ CodecInst nextCodec;
+ int currentPayloadType = dummyCodec.pltype;
+ AudioCodingModule::Codec(n + 1, nextCodec);
+ nextCodec.pltype = dummyCodec.pltype;
+ if(!FixedPayloadTypeCodec(nextCodec.plname))
+ {
+ CHECK_ERROR_MT(_acmA->RegisterReceiveCodec(nextCodec));
+ CHECK_ERROR_MT(_acmA->UnregisterReceiveCodec(nextCodec.pltype));
+ }
+ }
+
+
+ CHECK_ERROR_MT(_acmA->RegisterReceiveCodec(dummyCodec));
+ printf(" side A done!");
+ CHECK_ERROR_MT(_acmB->RegisterReceiveCodec(dummyCodec));
+ printf(" side B done!\n");
+
+ if(!strcmp(dummyCodec.plname, "CN"))
+ {
+ CHECK_ERROR_MT(_acmA->RegisterSendCodec(dummyCodec));
+ CHECK_ERROR_MT(_acmB->RegisterSendCodec(dummyCodec));
+ }
+ lastPayloadType = dummyCodec.pltype;
+ if((lastPayloadType >= 96) && (lastPayloadType <= 127))
+ {
+ _payloadUsed[lastPayloadType - 96] = true;
+ }
+ }
+ _thereIsDecoderA = true;
+ _thereIsDecoderB = true;
+
+ // Register Send Codec
+ AudioCodingModule::Codec((WebRtc_UWord8)_codecCntrA, dummyCodec);
+ CHECK_ERROR_MT(_acmA->RegisterSendCodec(dummyCodec));
+ _thereIsEncoderA = true;
+ //
+ AudioCodingModule::Codec((WebRtc_UWord8)_codecCntrB, dummyCodec);
+ CHECK_ERROR_MT(_acmB->RegisterSendCodec(dummyCodec));
+ _thereIsEncoderB = true;
+
+ char fileName[500];
+ WebRtc_UWord16 frequencyHz;
+
+ printf("\n\nAPI Test\n");
+ printf("========\n");
+ printf("Hit enter to accept the default values indicated in []\n\n");
+
+ //--- Input A
+ strcpy(fileName, "./modules/audio_coding/main/test/testfile32kHz.pcm");
+ frequencyHz = 32000;
+ printf("Enter input file at side A [%s]: ", fileName);
+ PCMFile::ChooseFile(fileName, 499, &frequencyHz);
+ _inFileA.Open(fileName, frequencyHz, "rb", true);
+
+ //--- Output A
+ strcpy(fileName, "./modules/audio_coding/main/test/outA.pcm");
+ printf("Enter output file at side A [%s]: ", fileName);
+ PCMFile::ChooseFile(fileName, 499, &frequencyHz);
+ _outFileA.Open(fileName, frequencyHz, "wb");
+
+ //--- Input B
+ strcpy(fileName, "./modules/audio_coding/main/test/testfile32kHz.pcm");
+ printf("\n\nEnter input file at side B [%s]: ", fileName);
+ PCMFile::ChooseFile(fileName, 499, &frequencyHz);
+ _inFileB.Open(fileName, frequencyHz, "rb", true);
+
+ //--- Output B
+ strcpy(fileName, "./modules/audio_coding/main/test/outB.pcm");
+ printf("Enter output file at side B [%s]: ", fileName);
+ PCMFile::ChooseFile(fileName, 499, &frequencyHz);
+ _outFileB.Open(fileName, frequencyHz, "wb");
+
+ //--- Set A-to-B channel
+ _channel_A2B = new Channel(2);
+ CHECK_ERROR_MT(_acmA->RegisterTransportCallback(_channel_A2B));
+ _channel_A2B->RegisterReceiverACM(_acmB);
+
+ //--- Set B-to-A channel
+ _channel_B2A = new Channel(1);
+ CHECK_ERROR_MT(_acmB->RegisterTransportCallback(_channel_B2A));
+ _channel_B2A->RegisterReceiverACM(_acmA);
+
+ //--- EVENT TIMERS
+ // A
+ _pullEventA = EventWrapper::Create();
+ _pushEventA = EventWrapper::Create();
+ _processEventA = EventWrapper::Create();
+ _apiEventA = EventWrapper::Create();
+ // B
+ _pullEventB = EventWrapper::Create();
+ _pushEventB = EventWrapper::Create();
+ _processEventB = EventWrapper::Create();
+ _apiEventB = EventWrapper::Create();
+
+ //--- I/O params
+ // A
+ _outFreqHzA = _outFileA.SamplingFrequency();
+ // B
+ _outFreqHzB = _outFileB.SamplingFrequency();
+
+
+ //Trace::SetEncryptedTraceFile("ACMAPITestEncrypted.txt");
+
+ char print[11];
+
+ printf("\nRandom Test (y/n)?");
+ fgets(print, 10, stdin);
+ print[10] = '\0';
+ if(strstr(print, "y") != NULL)
+ {
+ _randomTest = true;
+ _verbose = false;
+ _writeToFile = false;
+ Trace::CreateTrace();
+ Trace::SetTraceFile("ACMAPITest.txt");
+ //freopen("APITest_log.txt", "w", stdout);
+ }
+ else
+ {
+ Trace::CreateTrace();
+ Trace::SetTraceFile("ACMAPITest.txt", true);
+ _randomTest = false;
+ printf("\nPrint Tests (y/n)? ");
+ fgets(print, 10, stdin);
+ print[10] = '\0';
+ if(strstr(print, "y") == NULL)
+ {
+ freopen("APITest_log.txt", "w", stdout);
+ _verbose = false;
+ }
+ }
+
+#ifdef WEBRTC_DTMF_DETECTION
+ _dtmfCallback = new DTMFDetector;
+#endif
+ _vadCallbackA = new VADCallback;
+ _vadCallbackB = new VADCallback;
+
+ return 0;
+}
+
+bool
+APITest::PushAudioThreadA(void* obj)
+{
+ return static_cast<APITest*>(obj)->PushAudioRunA();
+}
+
+bool
+APITest::PushAudioThreadB(void* obj)
+{
+ return static_cast<APITest*>(obj)->PushAudioRunB();
+}
+
+bool
+APITest::PullAudioThreadA(void* obj)
+{
+ return static_cast<APITest*>(obj)->PullAudioRunA();
+}
+
+bool
+APITest::PullAudioThreadB(void* obj)
+{
+ return static_cast<APITest*>(obj)->PullAudioRunB();
+}
+
+bool
+APITest::ProcessThreadA(void* obj)
+{
+ return static_cast<APITest*>(obj)->ProcessRunA();
+}
+
+bool
+APITest::ProcessThreadB(void* obj)
+{
+ return static_cast<APITest*>(obj)->ProcessRunB();
+}
+
+bool
+APITest::APIThreadA(void* obj)
+{
+ return static_cast<APITest*>(obj)->APIRunA();
+}
+
+bool
+APITest::APIThreadB(void* obj)
+{
+ return static_cast<APITest*>(obj)->APIRunB();
+}
+
+bool
+APITest::PullAudioRunA()
+{
+ _pullEventA->Wait(100);
+ AudioFrame audioFrame;
+ if(_acmA->PlayoutData10Ms(_outFreqHzA, audioFrame) < 0)
+ {
+ bool thereIsDecoder;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ thereIsDecoder = _thereIsDecoderA;
+ }
+ if(thereIsDecoder)
+ {
+ fprintf(stderr, "\n>>>>>> cannot pull audio A <<<<<<<< \n");
+ }
+ }
+ else
+ {
+ if(_writeToFile)
+ {
+ _outFileA.Write10MsData(audioFrame);
+ }
+ _receiveVADActivityA[(int)audioFrame._vadActivity]++;
+ }
+ return true;
+}
+
+bool
+APITest::PullAudioRunB()
+{
+ _pullEventB->Wait(100);
+ AudioFrame audioFrame;
+ if(_acmB->PlayoutData10Ms(_outFreqHzB, audioFrame) < 0)
+ {
+ bool thereIsDecoder;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ thereIsDecoder = _thereIsDecoderB;
+ }
+ if(thereIsDecoder)
+ {
+ fprintf(stderr, "\n>>>>>> cannot pull audio B <<<<<<<< \n");
+ fprintf(stderr, "%d %d\n", _testNumA, _testNumB);
+ }
+ }
+ else
+ {
+ if(_writeToFile)
+ {
+ _outFileB.Write10MsData(audioFrame);
+ }
+ _receiveVADActivityB[(int)audioFrame._vadActivity]++;
+ }
+ return true;
+}
+
+bool
+APITest::PushAudioRunA()
+{
+ _pushEventA->Wait(100);
+ AudioFrame audioFrame;
+ _inFileA.Read10MsData(audioFrame);
+ if(_acmA->Add10MsData(audioFrame) < 0)
+ {
+ bool thereIsEncoder;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ thereIsEncoder = _thereIsEncoderA;
+ }
+ if(thereIsEncoder)
+ {
+ fprintf(stderr, "\n>>>> add10MsData at A failed <<<<\n");
+ }
+ }
+ return true;
+}
+
+bool
+APITest::PushAudioRunB()
+{
+ _pushEventB->Wait(100);
+ AudioFrame audioFrame;
+ _inFileB.Read10MsData(audioFrame);
+ if(_acmB->Add10MsData(audioFrame) < 0)
+ {
+ bool thereIsEncoder;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ thereIsEncoder = _thereIsEncoderB;
+ }
+
+ if(thereIsEncoder)
+ {
+ fprintf(stderr, "\n>>>> cannot add audio to B <<<<");
+ }
+ }
+
+ return true;
+}
+
+bool
+APITest::ProcessRunA()
+{
+ _processEventA->Wait(100);
+ if(_acmA->Process() < 0)
+ {
+ // do not print error message if there is no encoder
+ bool thereIsEncoder;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ thereIsEncoder = _thereIsEncoderA;
+ }
+
+ if(thereIsEncoder)
+ {
+ fprintf(stderr, "\n>>>>> Process Failed at A <<<<<\n");
+ }
+ }
+ return true;
+}
+
+bool
+APITest::ProcessRunB()
+{
+ _processEventB->Wait(100);
+ if(_acmB->Process() < 0)
+ {
+ bool thereIsEncoder;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ thereIsEncoder = _thereIsEncoderB;
+ }
+ if(thereIsEncoder)
+ {
+ fprintf(stderr, "\n>>>>> Process Failed at B <<<<<\n");
+ }
+ }
+ return true;
+}
+
+/*/
+ *
+ * In side A we test the APIs which are related to sender Side.
+ *
+/*/
+
+
+void
+APITest::RunTest(char thread)
+{
+ int testNum;
+ {
+ WriteLockScoped cs(_apiTestRWLock);
+ if(thread == 'A')
+ {
+ _testNumA = (_testNumB + 1 + (rand() % 6)) % 7;
+ testNum = _testNumA;
+
+ _movingDot[_dotPositionA] = ' ';
+ if(_dotPositionA == 0)
+ {
+ _dotMoveDirectionA = 1;
+ }
+ if(_dotPositionA == 19)
+ {
+ _dotMoveDirectionA = -1;
+ }
+ _dotPositionA += _dotMoveDirectionA;
+ _movingDot[_dotPositionA] = (_dotMoveDirectionA > 0)? '>':'<';
+ }
+ else
+ {
+ _testNumB = (_testNumA + 1 + (rand() % 6)) % 7;
+ testNum = _testNumB;
+
+ _movingDot[_dotPositionB] = ' ';
+ if(_dotPositionB == 20)
+ {
+ _dotMoveDirectionB = 1;
+ }
+ if(_dotPositionB == 39)
+ {
+ _dotMoveDirectionB = -1;
+ }
+ _dotPositionB += _dotMoveDirectionB;
+ _movingDot[_dotPositionB] = (_dotMoveDirectionB > 0)? '>':'<';
+ }
+ //fprintf(stderr, "%c: %d \n", thread, testNum);
+ //fflush(stderr);
+ }
+ switch(testNum)
+ {
+ case 0:
+ CurrentCodec('A');
+ ChangeCodec('A');
+ break;
+ case 1:
+ TestPlayout('B');
+ break;
+ case 2:
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\nTesting Delay ...\n");
+ }
+ TestDelay('A');
+ break;
+ case 3:
+ TestSendVAD('A');
+ break;
+ case 4:
+ TestRegisteration('A');
+ break;
+ case 5:
+ TestReceiverVAD('A');
+ break;
+ case 6:
+#ifdef WEBRTC_DTMF_DETECTION
+ LookForDTMF('A');
+#endif
+ break;
+ default:
+ fprintf(stderr, "Wrong Test Number\n");
+ getchar();
+ exit(1);
+ }
+}
+
+
+
+bool
+APITest::APIRunA()
+{
+ _apiEventA->Wait(50);
+
+ bool randomTest;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ randomTest = _randomTest;
+ }
+ if(randomTest)
+ {
+ RunTest('A');
+ }
+ else
+ {
+ CurrentCodec('A');
+ ChangeCodec('A');
+ TestPlayout('B');
+ if(_codecCntrA == 0)
+ {
+ fprintf(stdout, "\nTesting Delay ...\n");
+ TestDelay('A');
+ }
+ // VAD TEST
+ TestSendVAD('A');
+ TestRegisteration('A');
+ TestReceiverVAD('A');
+#ifdef WEBRTC_DTMF_DETECTION
+ LookForDTMF('A');
+#endif
+ }
+ return true;
+}
+
+bool
+APITest::APIRunB()
+{
+ _apiEventB->Wait(50);
+ bool randomTest;
+ {
+ ReadLockScoped rl(_apiTestRWLock);
+ randomTest = _randomTest;
+ }
+ //_apiEventB->Wait(2000);
+ if(randomTest)
+ {
+ RunTest('B');
+ }
+
+ return true;
+}
+
+void
+APITest::Perform()
+{
+ SetUp();
+
+ //--- THREADS
+ // A
+ // PUSH
+ ThreadWrapper* myPushAudioThreadA = ThreadWrapper::CreateThread(PushAudioThreadA,
+ this, kNormalPriority, "PushAudioThreadA");
+ CHECK_THREAD_NULLITY(myPushAudioThreadA, "Unable to start A::PUSH thread");
+ // PULL
+ ThreadWrapper* myPullAudioThreadA = ThreadWrapper::CreateThread(PullAudioThreadA,
+ this, kNormalPriority, "PullAudioThreadA");
+ CHECK_THREAD_NULLITY(myPullAudioThreadA, "Unable to start A::PULL thread");
+ // Process
+ ThreadWrapper* myProcessThreadA = ThreadWrapper::CreateThread(ProcessThreadA,
+ this, kNormalPriority, "ProcessThreadA");
+ CHECK_THREAD_NULLITY(myProcessThreadA, "Unable to start A::Process thread");
+ // API
+ ThreadWrapper* myAPIThreadA = ThreadWrapper::CreateThread(APIThreadA,
+ this, kNormalPriority, "APIThreadA");
+ CHECK_THREAD_NULLITY(myAPIThreadA, "Unable to start A::API thread");
+ // B
+ // PUSH
+ ThreadWrapper* myPushAudioThreadB = ThreadWrapper::CreateThread(PushAudioThreadB,
+ this, kNormalPriority, "PushAudioThreadB");
+ CHECK_THREAD_NULLITY(myPushAudioThreadB, "Unable to start B::PUSH thread");
+ // PULL
+ ThreadWrapper* myPullAudioThreadB = ThreadWrapper::CreateThread(PullAudioThreadB,
+ this, kNormalPriority, "PullAudioThreadB");
+ CHECK_THREAD_NULLITY(myPullAudioThreadB, "Unable to start B::PULL thread");
+ // Process
+ ThreadWrapper* myProcessThreadB = ThreadWrapper::CreateThread(ProcessThreadB,
+ this, kNormalPriority, "ProcessThreadB");
+ CHECK_THREAD_NULLITY(myProcessThreadB, "Unable to start B::Process thread");
+ // API
+ ThreadWrapper* myAPIThreadB = ThreadWrapper::CreateThread(APIThreadB,
+ this, kNormalPriority, "APIThreadB");
+ CHECK_THREAD_NULLITY(myAPIThreadB, "Unable to start B::API thread");
+
+
+ //_apiEventA->StartTimer(true, 5000);
+ //_apiEventB->StartTimer(true, 5000);
+
+ _processEventA->StartTimer(true, 10);
+ _processEventB->StartTimer(true, 10);
+
+ _pullEventA->StartTimer(true, 10);
+ _pullEventB->StartTimer(true, 10);
+
+ _pushEventA->StartTimer(true, 10);
+ _pushEventB->StartTimer(true, 10);
+
+ // Keep main thread waiting for sender/receiver
+ // threads to complete
+ EventWrapper* completeEvent = EventWrapper::Create();
+ WebRtc_UWord64 startTime = TickTime::MillisecondTimestamp();
+ WebRtc_UWord64 currentTime;
+ do
+ {
+ {
+ //ReadLockScoped rl(_apiTestRWLock);
+ //fprintf(stderr, "\r%s", _movingDot);
+ }
+ //fflush(stderr);
+ completeEvent->Wait(50);
+ currentTime = TickTime::MillisecondTimestamp();
+ } while((currentTime - startTime) < 120000); // Run test in 2 minutes (120000 ms)
+
+ //completeEvent->Wait(0xFFFFFFFF);//(unsigned long)((unsigned long)TEST_DURATION_SEC * (unsigned long)1000));
+ delete completeEvent;
+
+ myPushAudioThreadA->Stop();
+ myPullAudioThreadA->Stop();
+ myProcessThreadA->Stop();
+ myAPIThreadA->Stop();
+
+ delete myPushAudioThreadA;
+ delete myPullAudioThreadA;
+ delete myProcessThreadA;
+ delete myAPIThreadA;
+
+
+ myPushAudioThreadB->Stop();
+ myPullAudioThreadB->Stop();
+ myProcessThreadB->Stop();
+ myAPIThreadB->Stop();
+
+ delete myPushAudioThreadB;
+ delete myPullAudioThreadB;
+ delete myProcessThreadB;
+ delete myAPIThreadB;
+}
+
+
+void
+APITest::CheckVADStatus(char side)
+{
+
+ bool dtxEnabled;
+ bool vadEnabled;
+ ACMVADMode vadMode;
+ EventWrapper* myEvent = EventWrapper::Create();
+ if(side == 'A')
+ {
+ _acmA->VAD(dtxEnabled, vadEnabled, vadMode);
+ _acmA->RegisterVADCallback(NULL);
+ _vadCallbackA->Reset();
+ _acmA->RegisterVADCallback(_vadCallbackA);
+
+ if(!_randomTest)
+ {
+ if(_verbose)
+ {
+ fprintf(stdout, "DTX %3s, VAD %3s, Mode %d",
+ dtxEnabled? "ON":"OFF",
+ vadEnabled? "ON":"OFF",
+ (int)vadMode);
+ Wait(5000);
+ fprintf(stdout, " => bit-rate %3.0f kbps\n",
+ _channel_A2B->BitRate());
+ }
+ else
+ {
+ Wait(5000);
+ fprintf(stdout, "DTX %3s, VAD %3s, Mode %d => bit-rate %3.0f kbps\n",
+ dtxEnabled? "ON":"OFF",
+ vadEnabled? "ON":"OFF",
+ (int)vadMode,
+ _channel_A2B->BitRate());
+ }
+ _vadCallbackA->PrintFrameTypes();
+ }
+
+ if(dtxEnabled != _sendDTXA)
+ {
+ fprintf(stderr, ">>> Error Enabling DTX <<<\n");
+ }
+ if((vadEnabled != _sendVADA) && (!dtxEnabled))
+ {
+ fprintf(stderr, ">>> Error Enabling VAD <<<\n");
+ }
+ if((vadMode != _sendVADModeA) && vadEnabled)
+ {
+ fprintf(stderr, ">>> Error setting VAD-mode <<<\n");
+ }
+ }
+ else
+ {
+ _acmB->VAD(dtxEnabled, vadEnabled, vadMode);
+
+ _acmB->RegisterVADCallback(NULL);
+ _vadCallbackB->Reset();
+ _acmB->RegisterVADCallback(_vadCallbackB);
+
+ if(!_randomTest)
+ {
+ if(_verbose)
+ {
+ fprintf(stdout, "DTX %3s, VAD %3s, Mode %d",
+ dtxEnabled? "ON":"OFF",
+ vadEnabled? "ON":"OFF",
+ (int)vadMode);
+ Wait(5000);
+ fprintf(stdout, " => bit-rate %3.0f kbps\n",
+ _channel_B2A->BitRate());
+ }
+ else
+ {
+ Wait(5000);
+ fprintf(stdout, "DTX %3s, VAD %3s, Mode %d => bit-rate %3.0f kbps\n",
+ dtxEnabled? "ON":"OFF",
+ vadEnabled? "ON":"OFF",
+ (int)vadMode,
+ _channel_B2A->BitRate());
+ }
+ _vadCallbackB->PrintFrameTypes();
+ }
+
+ if(dtxEnabled != _sendDTXB)
+ {
+ fprintf(stderr, ">>> Error Enabling DTX <<<\n");
+ }
+ if((vadEnabled != _sendVADB) && (!dtxEnabled))
+ {
+ fprintf(stderr, ">>> Error Enabling VAD <<<\n");
+ }
+ if((vadMode != _sendVADModeB) && vadEnabled)
+ {
+ fprintf(stderr, ">>> Error setting VAD-mode <<<\n");
+ }
+ }
+}
+
+// Set Min delay, get delay, playout timestamp
+void
+APITest::TestDelay(char side)
+{
+ AudioCodingModule* myACM;
+ Channel* myChannel;
+ WebRtc_Word32* myMinDelay;
+ EventWrapper* myEvent = EventWrapper::Create();
+
+ WebRtc_UWord32 inTimestamp = 0;
+ WebRtc_UWord32 outTimestamp = 0;
+ double estimDelay = 0;
+ WebRtc_UWord16 delay = 0;
+
+ double averageEstimDelay = 0;
+ double averageDelay = 0;
+
+ CircularBuffer estimDelayCB(100);
+ CircularBuffer delayCB(100);
+ estimDelayCB.SetArithMean(true);
+ delayCB.SetArithMean(true);
+
+
+ if(side == 'A')
+ {
+ myACM = _acmA;
+ myChannel = _channel_B2A;
+ myMinDelay = &_minDelayA;
+ }
+ else
+ {
+ myACM = _acmB;
+ myChannel = _channel_A2B;
+ myMinDelay = &_minDelayB;
+ }
+
+
+ CHECK_ERROR_MT(myACM->SetMinimumPlayoutDelay(*myMinDelay));
+
+
+ inTimestamp = myChannel->LastInTimestamp();
+ CHECK_ERROR_MT(myACM->PlayoutTimestamp(outTimestamp));
+ CHECK_ERROR_MT(myACM->Delay(delay));
+
+ if(!_randomTest)
+ {
+ myEvent->StartTimer(true, 30);
+ int n = 0;
+ int settlePoint = 5000;
+ while(n < settlePoint + 400)
+ {
+ myEvent->Wait(1000);
+
+ inTimestamp = myChannel->LastInTimestamp();
+ CHECK_ERROR_MT(myACM->PlayoutTimestamp(outTimestamp));
+
+ //std::cout << outTimestamp << std::endl << std::flush;
+ estimDelay = (double)((WebRtc_UWord32)(inTimestamp - outTimestamp)) /
+ ((double)myACM->ReceiveFrequency() / 1000.0);
+
+ estimDelayCB.Update(estimDelay);
+
+ estimDelayCB.ArithMean(averageEstimDelay);
+ //printf("\n %6.1f \n", estimDelay);
+ //std::cout << " " << std::flush;
+
+ CHECK_ERROR_MT(myACM->Delay(delay));
+ delayCB.Update(delay);
+ delayCB.ArithMean(averageDelay);
+
+ if(_verbose)
+ {
+ fprintf(stdout, "\rExpected: %4d, retreived: %6.1f, measured: %6.1f",
+ *myMinDelay, averageDelay, averageEstimDelay);
+ std::cout << " " << std::flush;
+ }
+ if((averageDelay > *myMinDelay) && (n < settlePoint))
+ {
+ settlePoint = n;
+ }
+ n++;
+ }
+ myEvent->StopTimer();
+ }
+
+ if((!_verbose) && (!_randomTest))
+ {
+ fprintf(stdout, "\nExpected: %4d, retreived: %6.1f, measured: %6.1f",
+ *myMinDelay, averageDelay, averageEstimDelay);
+ }
+
+ *myMinDelay = (rand() % 1000) + 1;
+
+ ACMJitterStatistics jitterStat;
+ ACMNetworkStatistics networkStat;
+ CHECK_ERROR_MT(myACM->JitterStatistics(jitterStat));
+ CHECK_ERROR_MT(myACM->NetworkStatistics(networkStat));
+
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\n\nJitter Statistics at Side %c\n", side);
+ fprintf(stdout, "--------------------------------------\n");
+ fprintf(stdout, "buffer-size............. %d\n", networkStat.currentBufferSize);
+ fprintf(stdout, "Preferred buffer-size... %d\n", networkStat.preferredBufferSize);
+ fprintf(stdout, "packet-size rate........ %d\n", networkStat.currentPacketLossRate);
+ fprintf(stdout, "discard rate............ %d\n", networkStat.currentDiscardRate);
+ fprintf(stdout, "expand rate............. %d\n", networkStat.currentExpandRate);
+ fprintf(stdout, "Preemptive rate......... %d\n", networkStat.currentPreemptiveRate);
+ fprintf(stdout, "Accelerate rate......... %d\n", networkStat.currentAccelerateRate);
+
+ fprintf(stdout, "\n\nJitter Statistics at side %c\n", side);
+ fprintf(stdout, "--------------------------------------\n");
+ fprintf(stdout, "Jitter buffer min size....... %d\n", jitterStat.jbMinSize);
+ fprintf(stdout, "Jitter buffer Max size....... %d\n", jitterStat.jbMaxSize);
+ fprintf(stdout, "Jitter buffer Average size... %d\n", jitterStat.jbAvgSize);
+ fprintf(stdout, "Change Count................. %d ms\n", jitterStat.jbChangeCount);
+ fprintf(stdout, "Late Loss.................... %d ms\n", jitterStat.lateLossMs);
+ fprintf(stdout, "Accelerate................... %d ms\n", jitterStat.accelerateMs);
+ fprintf(stdout, "Flushed...................... %d ms\n", jitterStat.flushedMs);
+ fprintf(stdout, "Generated Silence............ %d ms\n", jitterStat.generatedSilentMs);
+ fprintf(stdout, "Interpolated Voice........... %d ms\n", jitterStat.interpolatedVoiceMs);
+ fprintf(stdout, "Interpolated Silence......... %d ms\n", jitterStat.interpolatedSilentMs);
+ fprintf(stdout, "No tiny expand............... %d\n", jitterStat.numExpandTiny);
+ fprintf(stdout, "No small expand.............. %d\n", jitterStat.numExpandSmall);
+ fprintf(stdout, "No Medium expand............. %d\n", jitterStat.numExpandMedium);
+ fprintf(stdout, "No long expand............... %d\n", jitterStat.numExpandLong);
+ fprintf(stdout, "longest expand............... %d ms\n", jitterStat.longestExpandDurationMs);
+ fprintf(stdout, "No IAT 500................... %d ms\n", jitterStat.countIAT500ms);
+ fprintf(stdout, "No IAT 1000.................. %d ms\n", jitterStat.countIAT1000ms);
+ fprintf(stdout, "No IAT 2000.................. %d ms\n", jitterStat.countIAT2000ms);
+ fprintf(stdout, "longest IAT.................. %d ms\n", jitterStat.longestIATms);
+ fprintf(stdout, "Min packet delay............. %d ms\n", jitterStat.minPacketDelayMs);
+ fprintf(stdout, "Max packet delay............. %d ms\n", jitterStat.maxPacketDelayMs);
+ fprintf(stdout, "Average packet delay......... %d ms\n", jitterStat.avgPacketDelayMs);
+ }
+
+ CHECK_ERROR_MT(myACM->SetMinimumPlayoutDelay(*myMinDelay));
+
+ if(!_randomTest)
+ {
+ myEvent->Wait(500);
+ fprintf(stdout, "\n");
+ fprintf(stdout, "\n");
+ }
+ delete myEvent;
+}
+
+// Unregister a codec & register again.
+void
+APITest::TestRegisteration(char sendSide)
+{
+ AudioCodingModule* sendACM;
+ AudioCodingModule* receiveACM;
+ bool* thereIsDecoder;
+ EventWrapper* myEvent = EventWrapper::Create();
+
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\n\n");
+ fprintf(stdout, "---------------------------------------------------------\n");
+ fprintf(stdout, " Unregister/register Receive Codec\n");
+ fprintf(stdout, "---------------------------------------------------------\n");
+ }
+
+ switch(sendSide)
+ {
+ case 'A':
+ {
+ sendACM = _acmA;
+ receiveACM = _acmB;
+ thereIsDecoder = &_thereIsDecoderB;
+ break;
+ }
+ case 'B':
+ {
+ sendACM = _acmB;
+ receiveACM = _acmA;
+ thereIsDecoder = &_thereIsDecoderA;
+ break;
+ }
+ default:
+ fprintf(stderr, "Invalid sender-side in TestRegistration(%c)\n", sendSide);
+ exit(-1);
+ }
+
+ CodecInst myCodec;
+ if(sendACM->SendCodec(myCodec) < 0)
+ {
+ AudioCodingModule::Codec(_codecCntrA, myCodec);
+ }
+
+ if(!_randomTest)
+ {
+ fprintf(stdout, "Unregistering reveive codec, NO AUDIO.\n");
+ fflush(stdout);
+ }
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsDecoder = false;
+ }
+ //myEvent->Wait(20);
+ CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec.pltype));
+ Wait(1000);
+
+ int currentPayload = myCodec.pltype;
+
+ if(!FixedPayloadTypeCodec(myCodec.plname))
+ {
+ WebRtc_Word32 i;
+ for(i = 0; i < 32; i++)
+ {
+ if(!_payloadUsed[i])
+ {
+ if(!_randomTest)
+ {
+ fprintf(stdout, "Register receive codec with new Payload, AUDIO BACK.\n");
+ }
+ //myCodec.pltype = i + 96;
+ //CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(myCodec));
+ //CHECK_ERROR_MT(sendACM->RegisterSendCodec(myCodec));
+ //myEvent->Wait(20);
+ //{
+ // WriteLockScoped wl(_apiTestRWLock);
+ // *thereIsDecoder = true;
+ //}
+ Wait(1000);
+
+ if(!_randomTest)
+ {
+ fprintf(stdout, "Unregistering reveive codec, NO AUDIO.\n");
+ }
+ //{
+ // WriteLockScoped wl(_apiTestRWLock);
+ // *thereIsDecoder = false;
+ //}
+ //myEvent->Wait(20);
+ //CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec.pltype));
+ Wait(1000);
+
+ myCodec.pltype = currentPayload;
+ if(!_randomTest)
+ {
+ fprintf(stdout, "Register receive codec with default Payload, AUDIO BACK.\n");
+ fflush(stdout);
+ }
+ CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(myCodec));
+ //CHECK_ERROR_MT(sendACM->RegisterSendCodec(myCodec));
+ myEvent->Wait(20);
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsDecoder = true;
+ }
+ Wait(1000);
+
+ break;
+ }
+ }
+ if(i == 32)
+ {
+ CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(myCodec));
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsDecoder = true;
+ }
+ }
+ }
+ else
+ {
+ if(!_randomTest)
+ {
+ fprintf(stdout, "Register receive codec with fixed Payload, AUDIO BACK.\n");
+ fflush(stdout);
+ }
+ CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(myCodec));
+ //CHECK_ERROR_MT(receiveACM->UnregisterReceiveCodec(myCodec.pltype));
+ //CHECK_ERROR_MT(receiveACM->RegisterReceiveCodec(myCodec));
+ myEvent->Wait(20);
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsDecoder = true;
+ }
+ }
+ delete myEvent;
+ if(!_randomTest)
+ {
+ fprintf(stdout, "---------------------------------------------------------\n");
+ }
+}
+
+// Playout Mode, background noise mode.
+// Receiver Frequency, playout frequency.
+void
+APITest::TestPlayout(char receiveSide)
+{
+ AudioCodingModule* receiveACM;
+ AudioPlayoutMode* playoutMode;
+ ACMBackgroundNoiseMode* bgnMode;
+ switch(receiveSide)
+ {
+ case 'A':
+ {
+ receiveACM = _acmA;
+ playoutMode = &_playoutModeA;
+ bgnMode = &_bgnModeA;
+ break;
+ }
+ case 'B':
+ {
+ receiveACM = _acmB;
+ playoutMode = &_playoutModeB;
+ bgnMode = &_bgnModeB;
+ break;
+ }
+ default:
+ receiveACM = _acmA;
+ }
+
+ WebRtc_Word32 receiveFreqHz = receiveACM->ReceiveFrequency();
+ WebRtc_Word32 playoutFreqHz = receiveACM->PlayoutFrequency();
+
+ CHECK_ERROR_MT(receiveFreqHz);
+ CHECK_ERROR_MT(playoutFreqHz);
+
+ char bgnString[25];
+ switch(*bgnMode)
+ {
+ case On:
+ {
+ *bgnMode = Fade;
+ strncpy(bgnString, "Fade", 25);
+ break;
+ }
+ case Fade:
+ {
+ *bgnMode = Off;
+ strncpy(bgnString, "OFF", 25);
+ break;
+ }
+ case Off:
+ {
+ *bgnMode = On;
+ strncpy(bgnString, "ON", 25);
+ break;
+ }
+ default:
+ *bgnMode = On;
+ strncpy(bgnString, "ON", 25);
+ }
+ CHECK_ERROR_MT(receiveACM->SetBackgroundNoiseMode(*bgnMode));
+ bgnString[24] = '\0';
+
+ char playoutString[25];
+ switch(*playoutMode)
+ {
+ case voice:
+ {
+ *playoutMode = fax;
+ strncpy(playoutString, "FAX", 25);
+ break;
+ }
+ case fax:
+ {
+ *playoutMode = streaming;
+ strncpy(playoutString, "Streaming", 25);
+ break;
+ }
+ case streaming:
+ {
+ *playoutMode = voice;
+ strncpy(playoutString, "Voice", 25);
+ break;
+ }
+ default:
+ *playoutMode = voice;
+ strncpy(playoutString, "Voice", 25);
+ }
+ CHECK_ERROR_MT(receiveACM->SetPlayoutMode(*playoutMode));
+ playoutString[24] = '\0';
+
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\n");
+ fprintf(stdout, "In Side %c\n", receiveSide);
+ fprintf(stdout, "---------------------------------\n");
+ fprintf(stdout, "Receive Frequency....... %d Hz\n", receiveFreqHz);
+ fprintf(stdout, "Playout Frequency....... %d Hz\n", playoutFreqHz);
+ fprintf(stdout, "Audio Playout Mode...... %s\n", playoutString);
+ fprintf(stdout, "Background Noise Mode... %s\n", bgnString);
+ }
+}
+
+// set/get receiver VAD status & mode.
+void
+APITest::TestReceiverVAD(char side)
+{
+ AudioCodingModule* myACM;
+ EventWrapper* myEvent = EventWrapper::Create();
+ WebRtc_UWord64* myReceiveVADActivity;
+
+ if(side == 'A')
+ {
+ myACM = _acmA;
+ myReceiveVADActivity = _receiveVADActivityA;
+ }
+ else
+ {
+ myACM = _acmB;
+ myReceiveVADActivity = _receiveVADActivityB;
+ }
+
+ bool vadStatus = myACM->ReceiveVADStatus();
+ ACMVADMode mode = myACM->ReceiveVADMode();
+
+ CHECK_ERROR_MT(mode);
+
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\n\nCurrent Receive VAD at side %c\n", side);
+ fprintf(stdout, "----------------------------------\n");
+ fprintf(stdout, "Status........ %s\n", vadStatus? "ON":"OFF");
+ fprintf(stdout, "mode.......... %d\n", (int)mode);
+ fprintf(stdout, "VAD Active.... %llu\n", myReceiveVADActivity[0]);
+ fprintf(stdout, "VAD Passive... %llu\n", myReceiveVADActivity[1]);
+ fprintf(stdout, "VAD Unknown... %llu\n", myReceiveVADActivity[2]);
+ }
+
+ if(vadStatus)
+ {
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\nChange Receive VAD at side %c\n\n", side);
+ }
+
+ switch(mode)
+ {
+ case VADNormal:
+ mode = VADAggr;
+ break;
+ case VADLowBitrate:
+ mode = VADVeryAggr;
+ break;
+ case VADAggr:
+ mode = VADLowBitrate;
+ break;
+ case VADVeryAggr:
+ vadStatus = false;
+ mode = VADNormal;
+ break;
+ default:
+ mode = VADNormal;
+ }
+
+ CHECK_ERROR_MT(myACM->SetReceiveVADMode(mode));
+ CHECK_ERROR_MT(myACM->SetReceiveVADStatus(vadStatus));
+ }
+ else
+ {
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\nTurn on Receive VAD at side %c\n\n", side);
+ }
+ CHECK_ERROR_MT(myACM->SetReceiveVADStatus(true));
+ CHECK_ERROR_MT(myACM->SetReceiveVADMode(VADNormal));
+ }
+ for(int n = 0; n < 3; n++)
+ {
+ myReceiveVADActivity[n] = 0;
+ }
+}
+
+
+void
+APITest::TestSendVAD(char side)
+{
+ if(_randomTest)
+ {
+ return;
+ }
+
+ bool* vad;
+ bool* dtx;
+ ACMVADMode* mode;
+ Channel* myChannel;
+ AudioCodingModule* myACM;
+
+ CodecInst myCodec;
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\n\n");
+ fprintf(stdout, "-----------------------------------------------\n");
+ fprintf(stdout, " Test VAD API\n");
+ fprintf(stdout, "-----------------------------------------------\n");
+ }
+
+ if(side == 'A')
+ {
+ AudioCodingModule::Codec(_codecCntrA, myCodec);
+ vad = &_sendVADA;
+ dtx = &_sendDTXA;
+ mode = &_sendVADModeA;
+ myChannel = _channel_A2B;
+ myACM = _acmA;
+ }
+ else
+ {
+ AudioCodingModule::Codec(_codecCntrB, myCodec);
+ vad = &_sendVADB;
+ dtx = &_sendDTXB;
+ mode = &_sendVADModeB;
+ myChannel = _channel_B2A;
+ myACM = _acmB;
+ }
+
+ CheckVADStatus(side);
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\n\n");
+ }
+
+ switch(*mode)
+ {
+ case VADNormal:
+ *vad = true;
+ *dtx = true;
+ *mode = VADAggr;
+ break;
+ case VADLowBitrate:
+ *vad = true;
+ *dtx = true;
+ *mode = VADVeryAggr;
+ break;
+ case VADAggr:
+ *vad = true;
+ *dtx = true;
+ *mode = VADLowBitrate;
+ break;
+ case VADVeryAggr:
+ *vad = false;
+ *dtx = false;
+ *mode = VADNormal;
+ break;
+ default:
+ *mode = VADNormal;
+ }
+
+ *dtx = (myCodec.plfreq == 32000)? false:*dtx;
+
+ CHECK_ERROR_MT(myACM->SetVAD(*dtx, *vad, *mode));
+ myChannel->ResetStats();
+
+ CheckVADStatus(side);
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\n");
+ fprintf(stdout, "-----------------------------------------------\n");
+ }
+
+ // Fault Test
+ CHECK_PROTECTED_MT(myACM->SetVAD(false, true, (ACMVADMode)-1));
+ CHECK_PROTECTED_MT(myACM->SetVAD(false, true, (ACMVADMode)4));
+
+
+
+}
+
+
+void
+APITest::CurrentCodec(char side)
+{
+ CodecInst myCodec;
+ EventWrapper* myEvent = EventWrapper::Create();
+ if(side == 'A')
+ {
+ _acmA->SendCodec(myCodec);
+ }
+ else
+ {
+ _acmB->SendCodec(myCodec);
+ }
+
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\n\n");
+ fprintf(stdout, "Send codec in Side A\n");
+ fprintf(stdout, "----------------------------\n");
+ fprintf(stdout, "Name................. %s\n", myCodec.plname);
+ fprintf(stdout, "Sampling Frequency... %d\n", myCodec.plfreq);
+ fprintf(stdout, "Rate................. %d\n", myCodec.rate);
+ fprintf(stdout, "Payload-type......... %d\n", myCodec.pltype);
+ fprintf(stdout, "Packet-size.......... %d\n", myCodec.pacsize);
+ }
+
+ Wait(100);
+}
+
+void
+APITest::ChangeCodec(char side)
+{
+ CodecInst myCodec;
+ AudioCodingModule* myACM;
+ WebRtc_UWord8* codecCntr;
+ bool* thereIsEncoder;
+ bool* vad;
+ bool* dtx;
+ ACMVADMode* mode;
+ Channel* myChannel;
+ EventWrapper* myEvent = EventWrapper::Create();
+ // Reset and Wait
+ if(!_randomTest)
+ {
+ fprintf(stdout, "Reset Encoder Side A \n");
+ }
+ if(side == 'A')
+ {
+ myACM = _acmA;
+ codecCntr = &_codecCntrA;
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ thereIsEncoder = &_thereIsEncoderA;
+ }
+ vad = &_sendVADA;
+ dtx = &_sendDTXA;
+ mode = &_sendVADModeA;
+ myChannel = _channel_A2B;
+ }
+ else
+ {
+ myACM = _acmB;
+ codecCntr = &_codecCntrB;
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ thereIsEncoder = &_thereIsEncoderB;
+ }
+ vad = &_sendVADB;
+ dtx = &_sendDTXB;
+ mode = &_sendVADModeB;
+ myChannel = _channel_B2A;
+ }
+
+ myACM->ResetEncoder();
+ Wait(100);
+
+ // Register the next codec
+ do
+ {
+ *codecCntr = (*codecCntr < AudioCodingModule::NumberOfCodecs() - 1)?
+ (*codecCntr + 1):0;
+
+ if(*codecCntr == 0)
+ {
+ //printf("Initialize Sender Side A \n");
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsEncoder = false;
+ }
+ CHECK_ERROR_MT(myACM->InitializeSender());
+ Wait(1000);
+
+ // After Initialization CN is lost, re-register them
+ if(AudioCodingModule::Codec("CN", myCodec, 8000) >= 0)
+ {
+ CHECK_ERROR_MT(myACM->RegisterSendCodec(myCodec));
+ }
+ if(AudioCodingModule::Codec("CN", myCodec, 16000) >= 0)
+ {
+ CHECK_ERROR_MT(myACM->RegisterSendCodec(myCodec));
+ }
+ // VAD & DTX are disabled after initialization
+ *vad = false;
+ *dtx = false;
+ _writeToFile = false;
+ }
+
+ AudioCodingModule::Codec(*codecCntr, myCodec);
+ } while(!STR_CASE_CMP(myCodec.plname, "CN") ||
+ !STR_CASE_CMP(myCodec.plname, "telephone-event") ||
+ !STR_CASE_CMP(myCodec.plname, "RED"));
+
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\n====================================================================\n");
+ fprintf(stdout, " Registering New Codec %s, %d kHz, %d kbps\n",
+ myCodec.plname, myCodec.plfreq / 1000, myCodec.rate / 1000);
+ }
+ //std::cout<< std::flush;
+
+ // NO DTX for supe-wideband codec at this point
+ if(myCodec.plfreq == 32000)
+ {
+ *dtx = false;
+ CHECK_ERROR_MT(myACM->SetVAD(*dtx, *vad, *mode));
+
+ }
+
+ CHECK_ERROR_MT(myACM->RegisterSendCodec(myCodec));
+ myChannel->ResetStats();
+ {
+ WriteLockScoped wl(_apiTestRWLock);
+ *thereIsEncoder = true;
+ }
+ Wait(500);
+}
+
+
+void
+APITest::LookForDTMF(char side)
+{
+ if(!_randomTest)
+ {
+ fprintf(stdout, "\n\nLooking for DTMF Signal in Side %c\n", side);
+ fprintf(stdout, "----------------------------------------\n");
+ }
+
+ if(side == 'A')
+ {
+ _acmB->RegisterIncomingMessagesCallback(NULL);
+ _acmA->RegisterIncomingMessagesCallback(_dtmfCallback);
+ Wait(1000);
+ _acmA->RegisterIncomingMessagesCallback(NULL);
+ }
+ else
+ {
+ _acmA->RegisterIncomingMessagesCallback(NULL);
+ _acmB->RegisterIncomingMessagesCallback(_dtmfCallback);
+ Wait(1000);
+ _acmB->RegisterIncomingMessagesCallback(NULL);
+ }
+}
diff --git a/src/modules/audio_coding/main/test/APITest.h b/src/modules/audio_coding/main/test/APITest.h
new file mode 100644
index 0000000..52ecb27
--- /dev/null
+++ b/src/modules/audio_coding/main/test/APITest.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_TEST_H
+#define API_TEST_H
+
+#include "ACMTest.h"
+#include "Channel.h"
+#include "PCMFile.h"
+#include "event_wrapper.h"
+#include "utility.h"
+
+enum APITESTAction {TEST_CHANGE_CODEC_ONLY = 0, DTX_TEST = 1};
+
+class APITest : public ACMTest
+{
+public:
+ APITest();
+ ~APITest();
+
+ void Perform();
+private:
+ WebRtc_Word16 SetUp();
+
+ static bool PushAudioThreadA(void* obj);
+ static bool PullAudioThreadA(void* obj);
+ static bool ProcessThreadA(void* obj);
+ static bool APIThreadA(void* obj);
+
+ static bool PushAudioThreadB(void* obj);
+ static bool PullAudioThreadB(void* obj);
+ static bool ProcessThreadB(void* obj);
+ static bool APIThreadB(void* obj);
+
+ void CheckVADStatus(char side);
+
+ // Set Min delay, get delay, playout timestamp
+ void TestDelay(char side);
+
+ // Unregister a codec & register again.
+ void TestRegisteration(char side);
+
+ // Playout Mode, background noise mode.
+ // Receiver Frequency, playout frequency.
+ void TestPlayout(char receiveSide);
+
+ // set/get receiver VAD status & mode.
+ void TestReceiverVAD(char side);
+
+ //
+ void TestSendVAD(char side);
+
+ void CurrentCodec(char side);
+
+ void ChangeCodec(char side);
+
+ void Wait(WebRtc_UWord32 waitLengthMs);
+
+ void LookForDTMF(char side);
+
+ void RunTest(char thread);
+
+ bool PushAudioRunA();
+ bool PullAudioRunA();
+ bool ProcessRunA();
+ bool APIRunA();
+
+ bool PullAudioRunB();
+ bool PushAudioRunB();
+ bool ProcessRunB();
+ bool APIRunB();
+
+
+
+ //--- ACMs
+ AudioCodingModule* _acmA;
+ AudioCodingModule* _acmB;
+
+ //--- Channels
+ Channel* _channel_A2B;
+ Channel* _channel_B2A;
+
+ //--- I/O files
+ // A
+ PCMFile _inFileA;
+ PCMFile _outFileA;
+ // B
+ PCMFile _outFileB;
+ PCMFile _inFileB;
+
+ //--- I/O params
+ // A
+ WebRtc_Word32 _outFreqHzA;
+ // B
+ WebRtc_Word32 _outFreqHzB;
+
+ // Should we write to file.
+ // we might skip writing to file if we
+ // run the test for a long time.
+ bool _writeToFile;
+ //--- Events
+ // A
+ EventWrapper* _pullEventA; // pulling data from ACM
+ EventWrapper* _pushEventA; // pushing data to ACM
+ EventWrapper* _processEventA; // process
+ EventWrapper* _apiEventA; // API calls
+ // B
+ EventWrapper* _pullEventB; // pulling data from ACM
+ EventWrapper* _pushEventB; // pushing data to ACM
+ EventWrapper* _processEventB; // process
+ EventWrapper* _apiEventB; // API calls
+
+ // keep track of the codec in either side.
+ WebRtc_UWord8 _codecCntrA;
+ WebRtc_UWord8 _codecCntrB;
+
+ // keep track of tests
+ WebRtc_UWord8 _testCntrA;
+ WebRtc_UWord8 _testCntrB;
+
+ // Is set to true if there is no encoder in either side
+ bool _thereIsEncoderA;
+ bool _thereIsEncoderB;
+ bool _thereIsDecoderA;
+ bool _thereIsDecoderB;
+
+ bool _sendVADA;
+ bool _sendDTXA;
+ ACMVADMode _sendVADModeA;
+
+ bool _sendVADB;
+ bool _sendDTXB;
+ ACMVADMode _sendVADModeB;
+
+ WebRtc_Word32 _minDelayA;
+ WebRtc_Word32 _minDelayB;
+ bool _payloadUsed[32];
+
+ AudioPlayoutMode _playoutModeA;
+ AudioPlayoutMode _playoutModeB;
+
+ ACMBackgroundNoiseMode _bgnModeA;
+ ACMBackgroundNoiseMode _bgnModeB;
+
+
+ WebRtc_UWord64 _receiveVADActivityA[3];
+ WebRtc_UWord64 _receiveVADActivityB[3];
+ bool _verbose;
+
+ int _dotPositionA;
+ int _dotMoveDirectionA;
+ int _dotPositionB;
+ int _dotMoveDirectionB;
+
+ char _movingDot[41];
+
+ DTMFDetector* _dtmfCallback;
+ VADCallback* _vadCallbackA;
+ VADCallback* _vadCallbackB;
+ RWLockWrapper& _apiTestRWLock;
+ bool _randomTest;
+ int _testNumA;
+ int _testNumB;
+};
+
+
+#endif
diff --git a/src/modules/audio_coding/main/test/Channel.cpp b/src/modules/audio_coding/main/test/Channel.cpp
new file mode 100644
index 0000000..bf440ea
--- /dev/null
+++ b/src/modules/audio_coding/main/test/Channel.cpp
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <iostream>
+
+#include "audio_coding_module.h"
+#include "Channel.h"
+#include "tick_util.h"
+#include "typedefs.h"
+#include "common_types.h"
+
+using namespace webrtc;
+
+WebRtc_Word32
+Channel::SendData(
+ const FrameType frameType,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation)
+{
+ WebRtcRTPHeader rtpInfo;
+ WebRtc_Word32 status;
+ WebRtc_UWord16 payloadDataSize = payloadSize;
+
+ rtpInfo.header.markerBit = false;
+ rtpInfo.header.ssrc = 0;
+ rtpInfo.header.sequenceNumber = _seqNo++;
+ rtpInfo.header.payloadType = payloadType;
+ rtpInfo.header.timestamp = timeStamp;
+ if(frameType == kAudioFrameCN)
+ {
+ rtpInfo.type.Audio.isCNG = true;
+ }
+ else
+ {
+ rtpInfo.type.Audio.isCNG = false;
+ }
+ if(frameType == kFrameEmpty)
+ {
+ // Skip this frame
+ return 0;
+ }
+
+ rtpInfo.type.Audio.channel = 1;
+ // Treat fragmentation separately
+ if(fragmentation != NULL)
+ {
+ if((fragmentation->fragmentationTimeDiff[1] <= 0x3fff) && // silence for too long send only new data
+ (fragmentation->fragmentationVectorSize == 2))
+ {
+ // only 0x80 if we have multiple blocks
+ _payloadData[0] = 0x80 + fragmentation->fragmentationPlType[1];
+ WebRtc_UWord32 REDheader = (((WebRtc_UWord32)fragmentation->fragmentationTimeDiff[1]) << 10) + fragmentation->fragmentationLength[1];
+ _payloadData[1] = WebRtc_UWord8((REDheader >> 16) & 0x000000FF);
+ _payloadData[2] = WebRtc_UWord8((REDheader >> 8) & 0x000000FF);
+ _payloadData[3] = WebRtc_UWord8(REDheader & 0x000000FF);
+
+ _payloadData[4] = fragmentation->fragmentationPlType[0];
+ // copy the RED data
+ memcpy(_payloadData + 5,
+ payloadData + fragmentation->fragmentationOffset[1],
+ fragmentation->fragmentationLength[1]);
+ // copy the normal data
+ memcpy(_payloadData + 5 + fragmentation->fragmentationLength[1],
+ payloadData + fragmentation->fragmentationOffset[0],
+ fragmentation->fragmentationLength[0]);
+ payloadDataSize += 5;
+ } else
+ {
+ // single block (newest one)
+ memcpy(_payloadData,
+ payloadData + fragmentation->fragmentationOffset[0],
+ fragmentation->fragmentationLength[0]);
+ payloadDataSize = WebRtc_UWord16(fragmentation->fragmentationLength[0]);
+ rtpInfo.header.payloadType = fragmentation->fragmentationPlType[0];
+ }
+ }
+ else
+ {
+ memcpy(_payloadData, payloadData, payloadDataSize);
+ if(_isStereo)
+ {
+ if(_leftChannel)
+ {
+ memcpy(&_rtpInfo, &rtpInfo, sizeof(WebRtcRTPHeader));
+ _leftChannel = false;
+ rtpInfo.type.Audio.channel = 1;
+ }
+ else
+ {
+ memcpy(&rtpInfo, &_rtpInfo, sizeof(WebRtcRTPHeader));
+ _leftChannel = true;
+ rtpInfo.type.Audio.channel = 2;
+ }
+ }
+ }
+
+ _channelCritSect->Enter();
+ if(_saveBitStream)
+ {
+ //fwrite(payloadData, sizeof(WebRtc_UWord8), payloadSize, _bitStreamFile);
+ }
+
+ if(!_isStereo)
+ {
+ CalcStatistics(rtpInfo, payloadSize);
+ }
+ _lastInTimestamp = timeStamp;
+ _totalBytes += payloadDataSize;
+ _channelCritSect->Leave();
+
+ if(_useFECTestWithPacketLoss)
+ {
+ _packetLoss += 1;
+ if(_packetLoss == 3)
+ {
+ _packetLoss = 0;
+ return 0;
+ }
+ }
+
+
+ //status = _receiverACM->IncomingPayload((WebRtc_Word8*)_payloadData, payloadSize, payloadType, timeStamp);
+ status = _receiverACM->IncomingPacket((WebRtc_Word8*)_payloadData, payloadDataSize, rtpInfo);
+
+ //delete [] payloadData;
+
+
+
+ return status;
+}
+
+void
+Channel::CalcStatistics(
+ WebRtcRTPHeader& rtpInfo,
+ WebRtc_UWord16 payloadSize)
+{
+ int n;
+ if((rtpInfo.header.payloadType != _lastPayloadType) &&
+ (_lastPayloadType != -1))
+ {
+ // payload-type is changed.
+ // we have to terminate the calculations on the previous payload type
+ // we ignore the last packet in that payload type just to make things
+ // easier.
+ for(n = 0; n < MAX_NUM_PAYLOADS; n++)
+ {
+ if(_lastPayloadType == _payloadStats[n].payloadType)
+ {
+ _payloadStats[n].newPacket = true;
+ break;
+ }
+ }
+ }
+ _lastPayloadType = rtpInfo.header.payloadType;
+
+ bool newPayload = true;
+ ACMTestPayloadStats* currentPayloadStr;
+ for(n = 0; n < MAX_NUM_PAYLOADS; n++)
+ {
+ if(rtpInfo.header.payloadType == _payloadStats[n].payloadType)
+ {
+ newPayload = false;
+ currentPayloadStr = &_payloadStats[n];
+ break;
+ }
+ }
+
+ if(!newPayload)
+ {
+ if(!currentPayloadStr->newPacket)
+ {
+ WebRtc_UWord32 lastFrameSizeSample = (WebRtc_UWord32)((WebRtc_UWord32)rtpInfo.header.timestamp -
+ (WebRtc_UWord32)currentPayloadStr->lastTimestamp);
+ assert(lastFrameSizeSample > 0);
+ int k = 0;
+ while((currentPayloadStr->frameSizeStats[k].frameSizeSample !=
+ lastFrameSizeSample) &&
+ (currentPayloadStr->frameSizeStats[k].frameSizeSample != 0))
+ {
+ k++;
+ }
+ ACMTestFrameSizeStats* currentFrameSizeStats =
+ &(currentPayloadStr->frameSizeStats[k]);
+ currentFrameSizeStats->frameSizeSample = (WebRtc_Word16)lastFrameSizeSample;
+
+ // increment the number of encoded samples.
+ currentFrameSizeStats->totalEncodedSamples +=
+ lastFrameSizeSample;
+ // increment the number of recveived packets
+ currentFrameSizeStats->numPackets++;
+ // increment the total number of bytes (this is based on
+ // the previous payload we don't know the frame-size of
+ // the current payload.
+ currentFrameSizeStats->totalPayloadLenByte +=
+ currentPayloadStr->lastPayloadLenByte;
+ // store the maximum payload-size (this is based on
+ // the previous payload we don't know the frame-size of
+ // the current payload.
+ if(currentFrameSizeStats->maxPayloadLen <
+ currentPayloadStr->lastPayloadLenByte)
+ {
+ currentFrameSizeStats->maxPayloadLen =
+ currentPayloadStr->lastPayloadLenByte;
+ }
+ // store the current values for the next time
+ currentPayloadStr->lastTimestamp = rtpInfo.header.timestamp;
+ currentPayloadStr->lastPayloadLenByte = payloadSize;
+ }
+ else
+ {
+ currentPayloadStr->newPacket = false;
+ currentPayloadStr->lastPayloadLenByte = payloadSize;
+ currentPayloadStr->lastTimestamp = rtpInfo.header.timestamp;
+ currentPayloadStr->payloadType = rtpInfo.header.payloadType;
+ }
+ }
+ else
+ {
+ n = 0;
+ while(_payloadStats[n].payloadType != -1)
+ {
+ n++;
+ }
+ // first packet
+ _payloadStats[n].newPacket = false;
+ _payloadStats[n].lastPayloadLenByte = payloadSize;
+ _payloadStats[n].lastTimestamp = rtpInfo.header.timestamp;
+ _payloadStats[n].payloadType = rtpInfo.header.payloadType;
+ }
+}
+
+Channel::Channel(WebRtc_Word16 chID) :
+_receiverACM(NULL),
+_seqNo(0),
+_channelCritSect(CriticalSectionWrapper::CreateCriticalSection()),
+_bitStreamFile(NULL),
+_saveBitStream(false),
+_lastPayloadType(-1),
+_isStereo(false),
+_leftChannel(true),
+_useFECTestWithPacketLoss(false),
+_packetLoss(0),
+_lastInTimestamp(0),
+_chID(chID),
+_beginTime(TickTime::MillisecondTimestamp()),
+_totalBytes(0)
+{
+ int n;
+ int k;
+ for(n = 0; n < MAX_NUM_PAYLOADS; n++)
+ {
+ _payloadStats[n].payloadType = -1;
+ _payloadStats[n].newPacket = true;
+ for(k = 0; k < MAX_NUM_FRAMESIZES; k++)
+ {
+ _payloadStats[n].frameSizeStats[k].frameSizeSample = 0;
+ _payloadStats[n].frameSizeStats[k].maxPayloadLen = 0;
+ _payloadStats[n].frameSizeStats[k].numPackets = 0;
+ _payloadStats[n].frameSizeStats[k].totalPayloadLenByte = 0;
+ _payloadStats[n].frameSizeStats[k].totalEncodedSamples = 0;
+ }
+ }
+ if(chID >= 0)
+ {
+ _saveBitStream = true;
+ char bitStreamFileName[500];
+ sprintf(bitStreamFileName, "bitStream_%d.dat", chID);
+ _bitStreamFile = fopen(bitStreamFileName, "wb");
+ }
+ else
+ {
+ _saveBitStream = false;
+ }
+}
+
+Channel::~Channel()
+{
+ delete _channelCritSect;
+}
+
+void
+Channel::RegisterReceiverACM(AudioCodingModule* acm)
+{
+ _receiverACM = acm;
+ return;
+}
+
+void
+Channel::ResetStats()
+{
+ int n;
+ int k;
+ _channelCritSect->Enter();
+ _lastPayloadType = -1;
+ for(n = 0; n < MAX_NUM_PAYLOADS; n++)
+ {
+ _payloadStats[n].payloadType = -1;
+ _payloadStats[n].newPacket = true;
+ for(k = 0; k < MAX_NUM_FRAMESIZES; k++)
+ {
+ _payloadStats[n].frameSizeStats[k].frameSizeSample = 0;
+ _payloadStats[n].frameSizeStats[k].maxPayloadLen = 0;
+ _payloadStats[n].frameSizeStats[k].numPackets = 0;
+ _payloadStats[n].frameSizeStats[k].totalPayloadLenByte = 0;
+ _payloadStats[n].frameSizeStats[k].totalEncodedSamples = 0;
+ }
+ }
+ _beginTime = TickTime::MillisecondTimestamp();
+ _totalBytes = 0;
+ _channelCritSect->Leave();
+}
+
+WebRtc_Word16
+Channel::Stats(CodecInst& codecInst, ACMTestPayloadStats& payloadStats)
+{
+ _channelCritSect->Enter();
+ int n;
+ payloadStats.payloadType = -1;
+ for(n = 0; n < MAX_NUM_PAYLOADS; n++)
+ {
+ if(_payloadStats[n].payloadType == codecInst.pltype)
+ {
+ memcpy(&payloadStats, &_payloadStats[n], sizeof(ACMTestPayloadStats));
+ break;
+ }
+ }
+ if(payloadStats.payloadType == -1)
+ {
+ _channelCritSect->Leave();
+ return -1;
+ }
+ for(n = 0; n < MAX_NUM_FRAMESIZES; n++)
+ {
+ if(payloadStats.frameSizeStats[n].frameSizeSample == 0)
+ {
+ _channelCritSect->Leave();
+ return 0;
+ }
+ payloadStats.frameSizeStats[n].usageLenSec =
+ (double)payloadStats.frameSizeStats[n].totalEncodedSamples
+ / (double)codecInst.plfreq;
+
+ payloadStats.frameSizeStats[n].rateBitPerSec =
+ payloadStats.frameSizeStats[n].totalPayloadLenByte * 8 /
+ payloadStats.frameSizeStats[n].usageLenSec;
+
+ }
+ _channelCritSect->Leave();
+ return 0;
+}
+
+void
+Channel::Stats(WebRtc_UWord32* numPackets)
+{
+ _channelCritSect->Enter();
+ int k;
+ int n;
+ memset(numPackets, 0, MAX_NUM_PAYLOADS * sizeof(WebRtc_UWord32));
+ for(k = 0; k < MAX_NUM_PAYLOADS; k++)
+ {
+ if(_payloadStats[k].payloadType == -1)
+ {
+ break;
+ }
+ numPackets[k] = 0;
+ for(n = 0; n < MAX_NUM_FRAMESIZES; n++)
+ {
+ if(_payloadStats[k].frameSizeStats[n].frameSizeSample == 0)
+ {
+ break;
+ }
+ numPackets[k] +=
+ _payloadStats[k].frameSizeStats[n].numPackets;
+ }
+ }
+ _channelCritSect->Leave();
+}
+
+void
+Channel::Stats(WebRtc_UWord8* payloadType, WebRtc_UWord32* payloadLenByte)
+{
+ _channelCritSect->Enter();
+
+ int k;
+ int n;
+ memset(payloadLenByte, 0, MAX_NUM_PAYLOADS * sizeof(WebRtc_UWord32));
+ for(k = 0; k < MAX_NUM_PAYLOADS; k++)
+ {
+ if(_payloadStats[k].payloadType == -1)
+ {
+ break;
+ }
+ payloadType[k] = (WebRtc_UWord8)_payloadStats[k].payloadType;
+ payloadLenByte[k] = 0;
+ for(n = 0; n < MAX_NUM_FRAMESIZES; n++)
+ {
+ if(_payloadStats[k].frameSizeStats[n].frameSizeSample == 0)
+ {
+ break;
+ }
+ payloadLenByte[k] += (WebRtc_UWord16)
+ _payloadStats[k].frameSizeStats[n].totalPayloadLenByte;
+ }
+ }
+
+ _channelCritSect->Leave();
+}
+
+
+void
+Channel::PrintStats(CodecInst& codecInst)
+{
+ ACMTestPayloadStats payloadStats;
+ Stats(codecInst, payloadStats);
+ printf("%s %d kHz\n",
+ codecInst.plname,
+ codecInst.plfreq / 1000);
+ printf("=====================================================\n");
+ if(payloadStats.payloadType == -1)
+ {
+ printf("No Packets are sent with payload-type %d (%s)\n\n",
+ codecInst.pltype,
+ codecInst.plname);
+ return;
+ }
+ for(int k = 0; k < MAX_NUM_FRAMESIZES; k++)
+ {
+ if(payloadStats.frameSizeStats[k].frameSizeSample == 0)
+ {
+ break;
+ }
+ printf("Frame-size.................... %d samples\n",
+ payloadStats.frameSizeStats[k].frameSizeSample);
+ printf("Average Rate.................. %.0f bits/sec\n",
+ payloadStats.frameSizeStats[k].rateBitPerSec);
+ printf("Maximum Payload-Size.......... %d Bytes\n",
+ payloadStats.frameSizeStats[k].maxPayloadLen);
+ printf("Maximum Instantaneous Rate.... %.0f bits/sec\n",
+ ((double)payloadStats.frameSizeStats[k].maxPayloadLen * 8.0 *
+ (double)codecInst.plfreq) /
+ (double)payloadStats.frameSizeStats[k].frameSizeSample);
+ printf("Number of Packets............. %u\n",
+ (unsigned int)payloadStats.frameSizeStats[k].numPackets);
+ printf("Duration...................... %0.3f sec\n\n",
+ payloadStats.frameSizeStats[k].usageLenSec);
+
+ }
+
+}
+
+WebRtc_UWord32
+Channel::LastInTimestamp()
+{
+ WebRtc_UWord32 timestamp;
+ _channelCritSect->Enter();
+ timestamp = _lastInTimestamp;
+ _channelCritSect->Leave();
+ return timestamp;
+}
+
+double
+Channel::BitRate()
+{
+ double rate;
+ WebRtc_UWord64 currTime = TickTime::MillisecondTimestamp();
+ _channelCritSect->Enter();
+ rate = ((double)_totalBytes * 8.0)/ (double)(currTime - _beginTime);
+ _channelCritSect->Leave();
+ return rate;
+}
diff --git a/src/modules/audio_coding/main/test/Channel.h b/src/modules/audio_coding/main/test/Channel.h
new file mode 100644
index 0000000..396fadc
--- /dev/null
+++ b/src/modules/audio_coding/main/test/Channel.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef CHANNEL_H
+#define CHANNEL_H
+
+#include <stdio.h>
+
+#include "audio_coding_module.h"
+#include "critical_section_wrapper.h"
+#include "rw_lock_wrapper.h"
+
+
+#define MAX_NUM_PAYLOADS 50
+#define MAX_NUM_FRAMESIZES 6
+
+
+struct ACMTestFrameSizeStats
+{
+ WebRtc_UWord16 frameSizeSample;
+ WebRtc_Word16 maxPayloadLen;
+ WebRtc_UWord32 numPackets;
+ WebRtc_UWord64 totalPayloadLenByte;
+ WebRtc_UWord64 totalEncodedSamples;
+ double rateBitPerSec;
+ double usageLenSec;
+
+};
+
+struct ACMTestPayloadStats
+{
+ bool newPacket;
+ WebRtc_Word16 payloadType;
+ WebRtc_Word16 lastPayloadLenByte;
+ WebRtc_UWord32 lastTimestamp;
+ ACMTestFrameSizeStats frameSizeStats[MAX_NUM_FRAMESIZES];
+};
+
+using namespace webrtc;
+
+class Channel: public AudioPacketizationCallback
+{
+public:
+
+ Channel(
+ WebRtc_Word16 chID = -1);
+ ~Channel();
+
+ WebRtc_Word32 SendData(
+ const FrameType frameType,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation);
+
+ void RegisterReceiverACM(
+ AudioCodingModule *acm);
+
+ void ResetStats();
+
+ WebRtc_Word16 Stats(
+ CodecInst& codecInst,
+ ACMTestPayloadStats& payloadStats);
+
+ void Stats(
+ WebRtc_UWord32* numPackets);
+
+ void Stats(
+ WebRtc_UWord8* payloadLenByte,
+ WebRtc_UWord32* payloadType);
+
+ void PrintStats(
+ CodecInst& codecInst);
+
+ void SetIsStereo(bool isStereo)
+ {
+ _isStereo = isStereo;
+ }
+
+ WebRtc_UWord32 LastInTimestamp();
+
+ void SetFECTestWithPacketLoss(bool usePacketLoss)
+ {
+ _useFECTestWithPacketLoss = usePacketLoss;
+ }
+
+ double BitRate();
+
+private:
+ void CalcStatistics(
+ WebRtcRTPHeader& rtpInfo,
+ WebRtc_UWord16 payloadSize);
+
+ AudioCodingModule* _receiverACM;
+ WebRtc_UWord16 _seqNo;
+ // 60 msec * 32 sample (max) / msec * 2 description (maybe) * 2 bytes / sample
+ WebRtc_UWord8 _payloadData[60 * 32 * 2 * 2];
+
+ CriticalSectionWrapper* _channelCritSect;
+ FILE* _bitStreamFile;
+ bool _saveBitStream;
+ WebRtc_Word16 _lastPayloadType;
+ ACMTestPayloadStats _payloadStats[MAX_NUM_PAYLOADS];
+ bool _isStereo;
+ WebRtcRTPHeader _rtpInfo;
+ bool _leftChannel;
+ WebRtc_UWord32 _lastInTimestamp;
+ // FEC Test variables
+ WebRtc_Word16 _packetLoss;
+ bool _useFECTestWithPacketLoss;
+ WebRtc_Word16 _chID;
+ WebRtc_UWord64 _beginTime;
+ WebRtc_UWord64 _totalBytes;
+};
+
+
+#endif
diff --git a/src/modules/audio_coding/main/test/EncodeDecodeTest.cpp b/src/modules/audio_coding/main/test/EncodeDecodeTest.cpp
new file mode 100644
index 0000000..08555da
--- /dev/null
+++ b/src/modules/audio_coding/main/test/EncodeDecodeTest.cpp
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "EncodeDecodeTest.h"
+#include "common_types.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include "trace.h"
+#include "utility.h"
+
+Receiver::Receiver()
+:
+_playoutLengthSmpls(WEBRTC_10MS_PCM_AUDIO),
+_payloadSizeBytes(MAX_INCOMING_PAYLOAD)
+{
+}
+
+void Receiver::Setup(AudioCodingModule *acm, RTPStream *rtpStream)
+{
+ struct CodecInst recvCodec;
+ int noOfCodecs;
+ acm->InitializeReceiver();
+
+ noOfCodecs = acm->NumberOfCodecs();
+ for (int i=0; i < noOfCodecs; i++)
+ {
+ acm->Codec((WebRtc_UWord8)i, recvCodec);
+ if (acm->RegisterReceiveCodec(recvCodec) != 0)
+ {
+ printf("Unable to register codec: for run: codecId: %d\n", codeId);
+ exit(1);
+ }
+ }
+
+ char filename[128];
+ _rtpStream = rtpStream;
+ int playSampFreq;
+
+ if (testMode == 1)
+ {
+ playSampFreq=recvCodec.plfreq;
+ //output file for current run
+ sprintf(filename,"./modules/audio_coding/main/test/res_tests/out%dFile.pcm",codeId);
+ _pcmFile.Open(filename, recvCodec.plfreq, "wb+");
+ }
+ else if (testMode == 0)
+ {
+ playSampFreq=32000;
+ //output file for current run
+ sprintf(filename,"./modules/audio_coding/main/test/res_autotests/encodeDecode_out%d.pcm",codeId);
+ _pcmFile.Open(filename, 32000/*recvCodec.plfreq*/, "wb+");
+ }
+ else
+ {
+ printf("\nValid output frequencies:\n");
+ printf("8000\n16000\n32000\n-1, which means output freq equal to received signal freq");
+ printf("\n\nChoose output sampling frequency: ");
+ scanf("%d", &playSampFreq);
+ char fileName[] = "./modules/audio_coding/main/test/outFile.pcm";
+ _pcmFile.Open(fileName, 32000, "wb+");
+ }
+
+ _realPayloadSizeBytes = 0;
+ _playoutBuffer = new WebRtc_Word16[WEBRTC_10MS_PCM_AUDIO];
+ _frequency = playSampFreq;
+ _acm = acm;
+ _firstTime = true;
+}
+
+void Receiver::Teardown()
+{
+ delete [] _playoutBuffer;
+ _pcmFile.Close();
+ if (testMode > 1) Trace::ReturnTrace();
+}
+
+bool Receiver::IncomingPacket()
+{
+ if (!_rtpStream->EndOfFile())
+ {
+ if (_firstTime)
+ {
+ _firstTime = false;
+ _realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload, _payloadSizeBytes, &_nextTime);
+ if (_realPayloadSizeBytes == 0 && _rtpStream->EndOfFile())
+ {
+ _firstTime = true;
+ return true;
+ }
+ }
+
+ WebRtc_Word32 ok = _acm->IncomingPacket(_incomingPayload, _realPayloadSizeBytes, _rtpInfo);
+ if (ok != 0)
+ {
+ printf("Error when inserting packet to ACM, for run: codecId: %d\n", codeId);
+ exit(1);
+ }
+ _realPayloadSizeBytes = _rtpStream->Read(&_rtpInfo, _incomingPayload, _payloadSizeBytes, &_nextTime);
+ if (_realPayloadSizeBytes == 0 && _rtpStream->EndOfFile())
+ {
+ _firstTime = true;
+ }
+ }
+ return true;
+}
+
+bool Receiver::PlayoutData()
+{
+ AudioFrame audioFrame;
+
+ if (_acm->PlayoutData10Ms(_frequency, audioFrame) != 0)
+ {
+ printf("Error when calling PlayoutData10Ms, for run: codecId: %d\n", codeId);
+ exit(1);
+ }
+ if (_playoutLengthSmpls == 0)
+ {
+ return false;
+ }
+ _pcmFile.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
+ return true;
+}
+
+void Receiver::Run()
+{
+ WebRtc_UWord8 counter500Ms = 50;
+
+ WebRtc_UWord32 clock = 0;
+
+ while (counter500Ms > 0)
+ {
+ if (clock == 0 || clock >= _nextTime)
+ {
+ IncomingPacket();
+ if (clock == 0)
+ {
+ clock = _nextTime;
+ }
+ }
+ if ((clock % 10) == 0)
+ {
+ if (!PlayoutData())
+ {
+ clock++;
+ continue;
+ }
+ }
+ if (_rtpStream->EndOfFile())
+ {
+ counter500Ms--;
+ }
+ clock++;
+ }
+}
+
+EncodeDecodeTest::EncodeDecodeTest()
+{
+ _testMode = 2;
+ Trace::CreateTrace();
+ Trace::SetTraceFile("acm_encdec_test.txt");
+}
+
+EncodeDecodeTest::EncodeDecodeTest(int testMode)
+{
+ //testMode == 0 for autotest
+ //testMode == 1 for testing all codecs/parameters
+ //testMode > 1 for specific user-input test (as it was used before)
+ _testMode = testMode;
+ if(_testMode != 0)
+ {
+ Trace::CreateTrace();
+ Trace::SetTraceFile("acm_encdec_test.txt");
+ }
+}
+void EncodeDecodeTest::Perform()
+{
+
+ if(_testMode == 0)
+ {
+ printf("Running Encode/Decode Test");
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1, "---------- EncodeDecodeTest ----------");
+ }
+
+ int numCodecs = 1;
+ int codePars[3]; //freq, pacsize, rate
+ int playoutFreq[3]; //8, 16, 32k
+
+ int numPars[52]; //number of codec parameters sets (rate,freq,pacsize)to test, for a given codec
+
+ codePars[0]=0;
+ codePars[1]=0;
+ codePars[2]=0;
+
+ if (_testMode == 1)
+ {
+ AudioCodingModule *acmTmp = AudioCodingModule::Create(0);
+ struct CodecInst sendCodecTmp;
+ numCodecs = acmTmp->NumberOfCodecs();
+ printf("List of supported codec.\n");
+ for(int n = 0; n < numCodecs; n++)
+ {
+ acmTmp->Codec(n, sendCodecTmp);
+ if (STR_CASE_CMP(sendCodecTmp.plname, "telephone-event") == 0) {
+ numPars[n] = 0;
+ } else if (STR_CASE_CMP(sendCodecTmp.plname, "cn") == 0) {
+ numPars[n] = 0;
+ } else if (STR_CASE_CMP(sendCodecTmp.plname, "red") == 0) {
+ numPars[n] = 0;
+ } else {
+ numPars[n] = 1;
+ printf("%d %s\n", n, sendCodecTmp.plname);
+ }
+ }
+ AudioCodingModule::Destroy(acmTmp);
+ playoutFreq[1]=16000;
+ }
+ else if (_testMode == 0)
+ {
+ AudioCodingModule *acmTmp = AudioCodingModule::Create(0);
+ numCodecs = acmTmp->NumberOfCodecs();
+ AudioCodingModule::Destroy(acmTmp);
+ struct CodecInst dummyCodec;
+
+ //chose range of testing for codecs/parameters
+ for(int i = 0 ; i < numCodecs ; i++)
+ {
+ numPars[i] = 1;
+ acmTmp->Codec(i, dummyCodec);
+ if (STR_CASE_CMP(dummyCodec.plname, "telephone-event") == 0)
+ {
+ numPars[i] = 0;
+ } else if (STR_CASE_CMP(dummyCodec.plname, "cn") == 0) {
+ numPars[i] = 0;
+ } else if (STR_CASE_CMP(dummyCodec.plname, "red") == 0) {
+ numPars[i] = 0;
+ }
+ }
+ playoutFreq[1] = 16000;
+ }
+ else
+ {
+ numCodecs = 1;
+ numPars[0] = 1;
+ playoutFreq[1]=16000;
+ }
+
+ _receiver.testMode = _testMode;
+
+ //loop over all codecs:
+ for(int codeId=0;codeId<numCodecs;codeId++)
+ {
+ //only encode using real encoders, not telephone-event anc cn
+ for(int loopPars=1;loopPars<=numPars[codeId];loopPars++)
+ {
+ if (_testMode == 1)
+ {
+ printf("\n");
+ printf("***FOR RUN: codeId: %d\n",codeId);
+ printf("\n");
+ }
+ else if (_testMode == 0)
+ {
+ printf(".");
+ }
+
+ EncodeToFileTest::Perform(1, codeId, codePars, _testMode);
+
+ AudioCodingModule *acm = AudioCodingModule::Create(10);
+ RTPFile rtpFile;
+ char fileName[] = "outFile.rtp";
+ rtpFile.Open(fileName, "rb");
+
+ _receiver.codeId = codeId;
+
+ rtpFile.ReadHeader();
+ _receiver.Setup(acm, &rtpFile);
+ _receiver.Run();
+ _receiver.Teardown();
+ rtpFile.Close();
+ AudioCodingModule::Destroy(acm);
+
+ if (_testMode == 1)
+ {
+ printf("***COMPLETED RUN FOR: codecID: %d ***\n",
+ codeId);
+ }
+ }
+ }
+ if (_testMode == 0)
+ {
+ printf("Done!\n");
+ }
+ if (_testMode == 1) Trace::ReturnTrace();
+}
+
diff --git a/src/modules/audio_coding/main/test/EncodeDecodeTest.h b/src/modules/audio_coding/main/test/EncodeDecodeTest.h
new file mode 100644
index 0000000..01172f3
--- /dev/null
+++ b/src/modules/audio_coding/main/test/EncodeDecodeTest.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ENCODEDECODETEST_H
+#define ENCODEDECODETEST_H
+
+#include "EncodeToFileTest.h"
+
+#define MAX_INCOMING_PAYLOAD 8096
+#include "audio_coding_module.h"
+
+class Receiver
+{
+public:
+ Receiver();
+ void Setup(AudioCodingModule *acm, RTPStream *rtpStream);
+ void Teardown();
+ void Run();
+ bool IncomingPacket();
+ bool PlayoutData();
+
+ //for auto_test and logging
+ WebRtc_UWord8 codeId;
+ WebRtc_UWord8 testMode;
+
+private:
+ AudioCodingModule* _acm;
+ bool _rtpEOF;
+ RTPStream* _rtpStream;
+ PCMFile _pcmFile;
+ WebRtc_Word16* _playoutBuffer;
+ WebRtc_UWord16 _playoutLengthSmpls;
+ WebRtc_Word8 _incomingPayload[MAX_INCOMING_PAYLOAD];
+ WebRtc_UWord16 _payloadSizeBytes;
+ WebRtc_UWord16 _realPayloadSizeBytes;
+ WebRtc_Word32 _frequency;
+ bool _firstTime;
+ WebRtcRTPHeader _rtpInfo;
+ WebRtc_UWord32 _nextTime;
+};
+
+class EncodeDecodeTest : public EncodeToFileTest
+{
+public:
+ EncodeDecodeTest();
+ EncodeDecodeTest(int testMode);
+ virtual void Perform();
+ WebRtc_UWord16 _playoutFreq;
+ WebRtc_UWord8 _testMode;
+protected:
+ Receiver _receiver;
+};
+
+
+
+#endif
+
diff --git a/src/modules/audio_coding/main/test/EncodeToFileTest.cpp b/src/modules/audio_coding/main/test/EncodeToFileTest.cpp
new file mode 100644
index 0000000..6eed2ea
--- /dev/null
+++ b/src/modules/audio_coding/main/test/EncodeToFileTest.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "EncodeToFileTest.h"
+#include "audio_coding_module.h"
+#include "common_types.h"
+
+#ifdef WIN32
+# include <Winsock2.h>
+#else
+# include <arpa/inet.h>
+#endif
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+TestPacketization::TestPacketization(RTPStream *rtpStream, WebRtc_UWord16 frequency)
+:
+_frequency(frequency),
+_seqNo(0)
+{
+ _rtpStream = rtpStream;
+}
+
+TestPacketization::~TestPacketization()
+{
+}
+
+WebRtc_Word32 TestPacketization::SendData(
+ const FrameType /* frameType */,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* /* fragmentation */)
+{
+ _rtpStream->Write(payloadType, timeStamp, _seqNo++, payloadData, payloadSize, _frequency);
+ //delete [] payloadData;
+ return 1;
+}
+
+Sender::Sender()
+:
+_acm(NULL),
+//_payloadData(NULL),
+_payloadSize(0),
+_timeStamp(0)
+{
+}
+
+void Sender::Setup(AudioCodingModule *acm, RTPStream *rtpStream)
+{
+ acm->InitializeSender();
+ struct CodecInst sendCodec;
+ int noOfCodecs = acm->NumberOfCodecs();
+ int codecNo;
+
+ if (testMode == 1)
+ {
+ //set the codec, input file, and parameters for the current test
+ codecNo = codeId;
+ //use same input file for now
+ char fileName[] = "./modules/audio_coding/main/test/testfile32kHz.pcm";
+ _pcmFile.Open(fileName, 32000, "rb");
+ }
+ else if (testMode == 0)
+ {
+ //set the codec, input file, and parameters for the current test
+ codecNo = codeId;
+ acm->Codec(codecNo, sendCodec);
+ //use same input file for now
+ char fileName[] = "./modules/audio_coding/main/test/testfile32kHz.pcm";
+ _pcmFile.Open(fileName, 32000, "rb");
+ }
+ else
+ {
+ printf("List of supported codec.\n");
+ for(int n = 0; n < noOfCodecs; n++)
+ {
+ acm->Codec(n, sendCodec);
+ printf("%d %s\n", n, sendCodec.plname);
+ }
+ printf("Choose your codec:");
+
+ scanf("%d", &codecNo);
+ char fileName[] = "./modules/audio_coding/main/test/testfile32kHz.pcm";
+ _pcmFile.Open(fileName, 32000, "rb");
+ }
+
+ acm->Codec(codecNo, sendCodec);
+ acm->RegisterSendCodec(sendCodec);
+ _packetization = new TestPacketization(rtpStream, sendCodec.plfreq);
+ if(acm->RegisterTransportCallback(_packetization) < 0)
+ {
+ printf("Registering Transport Callback failed, for run: codecId: %d: --\n",
+ codeId);
+ }
+
+ _acm = acm;
+}
+
+void Sender::Teardown()
+{
+ _pcmFile.Close();
+ delete _packetization;
+}
+
+bool Sender::Add10MsData()
+{
+ if (!_pcmFile.EndOfFile())
+ {
+ _pcmFile.Read10MsData(_audioFrame);
+ WebRtc_Word32 ok = _acm->Add10MsData(_audioFrame);
+ if (ok != 0)
+ {
+ printf("Error calling Add10MsData: for run: codecId: %d\n",
+ codeId);
+ exit(1);
+ }
+ //_audioFrame._timeStamp += _pcmFile.PayloadLength10Ms();
+ return true;
+ }
+ return false;
+}
+
+bool Sender::Process()
+{
+ WebRtc_Word32 ok = _acm->Process();
+ if (ok < 0)
+ {
+ printf("Error calling Add10MsData: for run: codecId: %d\n",
+ codeId);
+ exit(1);
+ }
+ return true;
+}
+
+void Sender::Run()
+{
+ while (true)
+ {
+ if (!Add10MsData())
+ {
+ break;
+ }
+ if (!Process()) // This could be done in a processing thread
+ {
+ break;
+ }
+ }
+}
+
+EncodeToFileTest::EncodeToFileTest()
+{
+}
+
+
+void EncodeToFileTest::Perform(int fileType, int codeId, int* codePars, int testMode)
+{
+ AudioCodingModule *acm = AudioCodingModule::Create(0);
+ RTPFile rtpFile;
+ char fileName[] = "outFile.rtp";
+ rtpFile.Open(fileName, "wb+");
+ rtpFile.WriteHeader();
+
+ //for auto_test and logging
+ _sender.testMode = testMode;
+ _sender.codeId = codeId;
+
+ _sender.Setup(acm, &rtpFile);
+ struct CodecInst sendCodecInst;
+ if(acm->SendCodec(sendCodecInst) >= 0)
+ {
+ _sender.Run();
+ }
+ _sender.Teardown();
+ rtpFile.Close();
+ AudioCodingModule::Destroy(acm);
+}
diff --git a/src/modules/audio_coding/main/test/EncodeToFileTest.h b/src/modules/audio_coding/main/test/EncodeToFileTest.h
new file mode 100644
index 0000000..fdd3804
--- /dev/null
+++ b/src/modules/audio_coding/main/test/EncodeToFileTest.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ENCODETOFILETEST_H
+#define ENCODETOFILETEST_H
+
+#include "ACMTest.h"
+#include "audio_coding_module.h"
+#include "typedefs.h"
+#include "RTPFile.h"
+#include "PCMFile.h"
+#include <stdio.h>
+
+using namespace webrtc;
+
+// TestPacketization callback which writes the encoded payloads to file
+class TestPacketization : public AudioPacketizationCallback
+{
+public:
+ TestPacketization(RTPStream *rtpStream, WebRtc_UWord16 frequency);
+ ~TestPacketization();
+ virtual WebRtc_Word32 SendData(const FrameType frameType,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation);
+
+private:
+ static void MakeRTPheader(WebRtc_UWord8* rtpHeader,
+ WebRtc_UWord8 payloadType, WebRtc_Word16 seqNo,
+ WebRtc_UWord32 timeStamp, WebRtc_UWord32 ssrc);
+ RTPStream* _rtpStream;
+ WebRtc_Word32 _frequency;
+ WebRtc_Word16 _seqNo;
+};
+
+class Sender
+{
+public:
+ Sender();
+ void Setup(AudioCodingModule *acm, RTPStream *rtpStream);
+ void Teardown();
+ void Run();
+ bool Add10MsData();
+ bool Process();
+
+ //for auto_test and logging
+ WebRtc_UWord8 testMode;
+ WebRtc_UWord8 codeId;
+
+private:
+ AudioCodingModule* _acm;
+ PCMFile _pcmFile;
+ //WebRtc_Word16* _payloadData;
+ AudioFrame _audioFrame;
+ WebRtc_UWord16 _payloadSize;
+ WebRtc_UWord32 _timeStamp;
+ TestPacketization* _packetization;
+};
+
+// Test class
+class EncodeToFileTest : public ACMTest
+{
+public:
+ EncodeToFileTest();
+ virtual void Perform(int fileType, int codeId, int* codePars, int testMode);
+protected:
+ Sender _sender;
+};
+
+#endif
diff --git a/src/modules/audio_coding/main/test/PCMFile.cpp b/src/modules/audio_coding/main/test/PCMFile.cpp
new file mode 100644
index 0000000..7418a3e
--- /dev/null
+++ b/src/modules/audio_coding/main/test/PCMFile.cpp
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cctype>
+#include <stdio.h>
+#include <string.h>
+
+
+#include "PCMFile.h"
+#include "module_common_types.h"
+
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+
+
+
+PCMFile::PCMFile():
+_pcmFile(NULL),
+_nSamples10Ms(160),
+_frequency(16000),
+_endOfFile(false),
+_autoRewind(false),
+_rewinded(false),
+_timestamp(0),
+_readStereo(false),
+_saveStereo(false)
+{
+ _timestamp = (((WebRtc_UWord32)rand() & 0x0000FFFF) << 16) |
+ ((WebRtc_UWord32)rand() & 0x0000FFFF);
+}
+
+/*
+PCMFile::~PCMFile()
+{
+ if(_pcmFile != NULL)
+ {
+ fclose(_pcmFile);
+ _pcmFile = NULL;
+ }
+}
+*/
+
+WebRtc_Word16
+PCMFile::ChooseFile(
+ char* fileName,
+ WebRtc_Word16 maxLen)
+{
+ WebRtc_Word8 tmpName[MAX_FILE_NAME_LENGTH_BYTE];
+ //strcpy(_fileName, "in.pcm");
+ //printf("\n\nPlease enter the input file: ");
+ fgets(tmpName, MAX_FILE_NAME_LENGTH_BYTE, stdin);
+ tmpName[MAX_FILE_NAME_LENGTH_BYTE-1] = '\0';
+ WebRtc_Word16 n = 0;
+
+ // removing leading spaces
+ while((isspace(tmpName[n]) || iscntrl(tmpName[n])) &&
+ (tmpName[n] != 0) &&
+ (n < MAX_FILE_NAME_LENGTH_BYTE))
+ {
+ n++;
+ }
+ if(n > 0)
+ {
+ memmove(tmpName, &tmpName[n], MAX_FILE_NAME_LENGTH_BYTE - n);
+ }
+
+ //removing trailing spaces
+ n = (WebRtc_Word16)(strlen(tmpName) - 1);
+ if(n >= 0)
+ {
+ while((isspace(tmpName[n]) || iscntrl(tmpName[n])) &&
+ (n >= 0))
+ {
+ n--;
+ }
+ }
+ if(n >= 0)
+ {
+ tmpName[n + 1] = '\0';
+ }
+
+ WebRtc_Word16 len = (WebRtc_Word16)strlen(tmpName);
+ if(len > maxLen)
+ {
+ return -1;
+ }
+ if(len > 0)
+ {
+ strncpy(fileName, tmpName, len+1);
+ }
+ return 0;
+}
+
+WebRtc_Word16
+PCMFile::ChooseFile(
+ char* fileName,
+ WebRtc_Word16 maxLen,
+ WebRtc_UWord16* frequencyHz)
+{
+ WebRtc_Word8 tmpName[MAX_FILE_NAME_LENGTH_BYTE];
+ //strcpy(_fileName, "in.pcm");
+ //printf("\n\nPlease enter the input file: ");
+ fgets(tmpName, MAX_FILE_NAME_LENGTH_BYTE, stdin);
+ tmpName[MAX_FILE_NAME_LENGTH_BYTE-1] = '\0';
+ WebRtc_Word16 n = 0;
+
+ // removing leading spaces
+ while((isspace(tmpName[n]) || iscntrl(tmpName[n])) &&
+ (tmpName[n] != 0) &&
+ (n < MAX_FILE_NAME_LENGTH_BYTE))
+ {
+ n++;
+ }
+ if(n > 0)
+ {
+ memmove(tmpName, &tmpName[n], MAX_FILE_NAME_LENGTH_BYTE - n);
+ }
+
+ //removing trailing spaces
+ n = (WebRtc_Word16)(strlen(tmpName) - 1);
+ if(n >= 0)
+ {
+ while((isspace(tmpName[n]) || iscntrl(tmpName[n])) &&
+ (n >= 0))
+ {
+ n--;
+ }
+ }
+ if(n >= 0)
+ {
+ tmpName[n + 1] = '\0';
+ }
+
+ WebRtc_Word16 len = (WebRtc_Word16)strlen(tmpName);
+ if(len > maxLen)
+ {
+ return -1;
+ }
+ if(len > 0)
+ {
+ strncpy(fileName, tmpName, len+1);
+ }
+ printf("Enter the sampling frequency (in Hz) of the above file [%u]: ", *frequencyHz);
+ fgets(tmpName, 10, stdin);
+ WebRtc_UWord16 tmpFreq = (WebRtc_UWord16)atoi(tmpName);
+ if(tmpFreq > 0)
+ {
+ *frequencyHz = tmpFreq;
+ }
+ return 0;
+}
+
+void
+PCMFile::Open(
+ char* filename,
+ WebRtc_UWord16 frequency,
+ const char* mode,
+ bool autoRewind)
+{
+ if ((_pcmFile = fopen(filename, mode)) == NULL)
+ {
+ printf("Cannot open file %s.\n", filename);
+ throw "Unable to read file";
+ exit(1);
+ }
+ _frequency = frequency;
+ _nSamples10Ms = (WebRtc_UWord16)(_frequency / 100);
+ _autoRewind = autoRewind;
+ _endOfFile = false;
+ _rewinded = false;
+}
+
+WebRtc_Word32
+PCMFile::SamplingFrequency() const
+{
+ return _frequency;
+}
+
+WebRtc_UWord16
+PCMFile::PayloadLength10Ms() const
+{
+ return _nSamples10Ms;
+}
+
+WebRtc_Word32
+PCMFile::Read10MsData(
+ AudioFrame& audioFrame)
+{
+ WebRtc_UWord16 noChannels = 1;
+ if (_readStereo)
+ {
+ noChannels = 2;
+ }
+
+ WebRtc_Word32 payloadSize = (WebRtc_Word32)fread(audioFrame._payloadData, sizeof(WebRtc_UWord16), _nSamples10Ms*noChannels, _pcmFile);
+ if (payloadSize < _nSamples10Ms*noChannels) {
+ for (int k = payloadSize; k < _nSamples10Ms*noChannels; k++)
+ {
+ audioFrame._payloadData[k] = 0;
+ }
+ if(_autoRewind)
+ {
+ rewind(_pcmFile);
+ _rewinded = true;
+ }
+ else
+ {
+ _endOfFile = true;
+ }
+ }
+ audioFrame._payloadDataLengthInSamples = _nSamples10Ms;
+ audioFrame._frequencyInHz = _frequency;
+ audioFrame._audioChannel = noChannels;
+ audioFrame._timeStamp = _timestamp;
+ _timestamp += _nSamples10Ms;
+ return _nSamples10Ms;
+}
+
+void
+PCMFile::Write10MsData(
+ AudioFrame& audioFrame)
+{
+ if(audioFrame._audioChannel == 1)
+ {
+ if(!_saveStereo)
+ {
+ fwrite(audioFrame._payloadData, sizeof(WebRtc_UWord16),
+ audioFrame._payloadDataLengthInSamples, _pcmFile);
+ }
+ else
+ {
+ WebRtc_Word16* stereoAudio = new WebRtc_Word16[2 *
+ audioFrame._payloadDataLengthInSamples];
+ int k;
+ for(k = 0; k < audioFrame._payloadDataLengthInSamples; k++)
+ {
+ stereoAudio[k<<1] = audioFrame._payloadData[k];
+ stereoAudio[(k<<1) + 1] = audioFrame._payloadData[k];
+ }
+ fwrite(stereoAudio, sizeof(WebRtc_Word16), 2*audioFrame._payloadDataLengthInSamples,
+ _pcmFile);
+ delete [] stereoAudio;
+ }
+ }
+ else
+ {
+ fwrite(audioFrame._payloadData, sizeof(WebRtc_Word16),
+ audioFrame._audioChannel * audioFrame._payloadDataLengthInSamples, _pcmFile);
+ }
+}
+
+
+void
+PCMFile::Write10MsData(
+ WebRtc_Word16* playoutBuffer,
+ WebRtc_UWord16 playoutLengthSmpls)
+{
+ fwrite(playoutBuffer, sizeof(WebRtc_UWord16), playoutLengthSmpls, _pcmFile);
+}
+
+
+void
+PCMFile::Close()
+{
+ fclose(_pcmFile);
+ _pcmFile = NULL;
+}
+
+void
+PCMFile::Rewind()
+{
+ rewind(_pcmFile);
+ _endOfFile = false;
+}
+
+bool
+PCMFile::Rewinded()
+{
+ return _rewinded;
+}
+
+void
+PCMFile::SaveStereo(
+ bool saveStereo)
+{
+ _saveStereo = saveStereo;
+}
+
+void
+PCMFile::ReadStereo(
+ bool readStereo)
+{
+ _readStereo = readStereo;
+}
diff --git a/src/modules/audio_coding/main/test/PCMFile.h b/src/modules/audio_coding/main/test/PCMFile.h
new file mode 100644
index 0000000..dd8d06f
--- /dev/null
+++ b/src/modules/audio_coding/main/test/PCMFile.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef PCMFILE_H
+#define PCMFILE_H
+
+#include "typedefs.h"
+#include "module_common_types.h"
+#include <cstdio>
+#include <cstdlib>
+
+// class PCMStream
+// {
+// protected:
+// PCMStream(){}
+// ~PCMStream(){}
+// public:
+// virtual WebRtc_Word32 Read10MsData(AudioFrame& audioFrame) = 0;
+// virtual void Write10MsData(WebRtc_Word16 *playoutBuffer, WebRtc_UWord16 playoutLengthSmpls) = 0;
+// virtual WebRtc_UWord16 PayloadLength10Ms() const = 0;
+// virtual WebRtc_Word32 SamplingFrequency() const = 0;
+// };
+
+
+using namespace webrtc;
+
+class PCMFile /*: public PCMStream*/
+{
+public:
+ PCMFile();
+ ~PCMFile()
+ {
+ if(_pcmFile != NULL)
+ {
+ fclose(_pcmFile);
+ }
+ }
+ void Open(char *filename, WebRtc_UWord16 frequency, const char *mode, bool autoRewind = false);
+
+ WebRtc_Word32 Read10MsData(AudioFrame& audioFrame);
+
+ void Write10MsData(WebRtc_Word16 *playoutBuffer, WebRtc_UWord16 playoutLengthSmpls);
+ void Write10MsData(AudioFrame& audioFrame);
+
+ WebRtc_UWord16 PayloadLength10Ms() const;
+ WebRtc_Word32 SamplingFrequency() const;
+ void Close();
+ bool EndOfFile() const { return _endOfFile; }
+ void Rewind();
+ static WebRtc_Word16 ChooseFile(char* fileName, WebRtc_Word16 maxLen, WebRtc_UWord16* frequencyHz);
+ static WebRtc_Word16 ChooseFile(char* fileName, WebRtc_Word16 maxLen);
+ bool Rewinded();
+ void SaveStereo(
+ bool saveStereo = true);
+ void ReadStereo(
+ bool readStereo = true);
+private:
+ FILE* _pcmFile;
+ WebRtc_UWord16 _nSamples10Ms;
+ WebRtc_Word32 _frequency;
+ bool _endOfFile;
+ bool _autoRewind;
+ bool _rewinded;
+ WebRtc_UWord32 _timestamp;
+ bool _saveStereo;
+ bool _readStereo;
+};
+
+#endif
diff --git a/src/modules/audio_coding/main/test/RTPFile.cpp b/src/modules/audio_coding/main/test/RTPFile.cpp
new file mode 100644
index 0000000..23d78d7
--- /dev/null
+++ b/src/modules/audio_coding/main/test/RTPFile.cpp
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "RTPFile.h"
+#include "rw_lock_wrapper.h"
+#include "engine_configurations.h"
+#include <stdlib.h>
+
+#ifdef WIN32
+# include <Winsock2.h>
+#else
+# include <arpa/inet.h>
+#endif
+
+#include "audio_coding_module.h"
+
+void RTPStream::ParseRTPHeader(WebRtcRTPHeader* rtpInfo, const WebRtc_UWord8* rtpHeader)
+{
+ rtpInfo->header.payloadType = rtpHeader[1];
+ rtpInfo->header.sequenceNumber = (static_cast<WebRtc_UWord16>(rtpHeader[2])<<8) | rtpHeader[3];
+ rtpInfo->header.timestamp = (static_cast<WebRtc_UWord32>(rtpHeader[4])<<24) |
+ (static_cast<WebRtc_UWord32>(rtpHeader[5])<<16) |
+ (static_cast<WebRtc_UWord32>(rtpHeader[6])<<8) |
+ rtpHeader[7];
+ rtpInfo->header.ssrc = (static_cast<WebRtc_UWord32>(rtpHeader[8])<<24) |
+ (static_cast<WebRtc_UWord32>(rtpHeader[9])<<16) |
+ (static_cast<WebRtc_UWord32>(rtpHeader[10])<<8) |
+ rtpHeader[11];
+}
+
+void RTPStream::MakeRTPheader(WebRtc_UWord8* rtpHeader,
+ WebRtc_UWord8 payloadType, WebRtc_Word16 seqNo,
+ WebRtc_UWord32 timeStamp, WebRtc_UWord32 ssrc)
+{
+ rtpHeader[0]=(unsigned char)0x80;
+ rtpHeader[1]=(unsigned char)(payloadType & 0xFF);
+ rtpHeader[2]=(unsigned char)((seqNo>>8)&0xFF);
+ rtpHeader[3]=(unsigned char)((seqNo)&0xFF);
+ rtpHeader[4]=(unsigned char)((timeStamp>>24)&0xFF);
+ rtpHeader[5]=(unsigned char)((timeStamp>>16)&0xFF);
+
+ rtpHeader[6]=(unsigned char)((timeStamp>>8)&0xFF);
+ rtpHeader[7]=(unsigned char)(timeStamp & 0xFF);
+
+ rtpHeader[8]=(unsigned char)((ssrc>>24)&0xFF);
+ rtpHeader[9]=(unsigned char)((ssrc>>16)&0xFF);
+
+ rtpHeader[10]=(unsigned char)((ssrc>>8)&0xFF);
+ rtpHeader[11]=(unsigned char)(ssrc & 0xFF);
+}
+
+
+RTPPacket::RTPPacket(WebRtc_UWord8 payloadType, WebRtc_UWord32 timeStamp,
+ WebRtc_Word16 seqNo, const WebRtc_UWord8* payloadData,
+ WebRtc_UWord16 payloadSize, WebRtc_UWord32 frequency)
+ :
+payloadType(payloadType),
+timeStamp(timeStamp),
+seqNo(seqNo),
+payloadSize(payloadSize),
+frequency(frequency)
+{
+ if (payloadSize > 0)
+ {
+ this->payloadData = new WebRtc_UWord8[payloadSize];
+ memcpy(this->payloadData, payloadData, payloadSize);
+ }
+}
+
+RTPPacket::~RTPPacket()
+{
+ delete [] payloadData;
+}
+
+RTPBuffer::RTPBuffer()
+{
+ _queueRWLock = RWLockWrapper::CreateRWLock();
+}
+
+RTPBuffer::~RTPBuffer()
+{
+ delete _queueRWLock;
+}
+
+void
+RTPBuffer::Write(const WebRtc_UWord8 payloadType, const WebRtc_UWord32 timeStamp,
+ const WebRtc_Word16 seqNo, const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize, WebRtc_UWord32 frequency)
+{
+ RTPPacket *packet = new RTPPacket(payloadType, timeStamp, seqNo, payloadData, payloadSize, frequency);
+ _queueRWLock->AcquireLockExclusive();
+ _rtpQueue.push(packet);
+ _queueRWLock->ReleaseLockExclusive();
+}
+
+WebRtc_UWord16
+RTPBuffer::Read(WebRtcRTPHeader* rtpInfo,
+ WebRtc_Word8* payloadData,
+ WebRtc_UWord16 payloadSize,
+ WebRtc_UWord32* offset)
+{
+ _queueRWLock->AcquireLockShared();
+ RTPPacket *packet = _rtpQueue.front();
+ _rtpQueue.pop();
+ _queueRWLock->ReleaseLockShared();
+ rtpInfo->header.markerBit = 1;
+ rtpInfo->header.payloadType = packet->payloadType;
+ rtpInfo->header.sequenceNumber = packet->seqNo;
+ rtpInfo->header.ssrc = 0;
+ rtpInfo->header.timestamp = packet->timeStamp;
+ if (packet->payloadSize > 0 && payloadSize >= packet->payloadSize)
+ {
+ memcpy(payloadData, packet->payloadData, packet->payloadSize);
+ }
+ else
+ {
+ throw "Payload buffer too small";
+ exit(1);
+ }
+/*#ifdef WEBRTC_CODEC_G722
+ if(ACMCodecDB::_mycodecs[ACMCodecDB::g722].pltype == packet->payloadType)
+ {
+ *offset = (packet->timeStamp/(packet->frequency/1000))<<1;
+ }
+ else
+ {
+#endif*/
+ *offset = (packet->timeStamp/(packet->frequency/1000));
+/*#ifdef WEBRTC_CODEC_G722
+ }
+#endif*/
+ return packet->payloadSize;
+}
+
+bool
+RTPBuffer::EndOfFile() const
+{
+ _queueRWLock->AcquireLockShared();
+ bool eof = _rtpQueue.empty();
+ _queueRWLock->ReleaseLockShared();
+ return eof;
+}
+
+void RTPFile::Open(char *filename, const char *mode)
+{
+ if ((_rtpFile = fopen(filename, mode)) == NULL)
+ {
+ printf("Cannot write file %s.\n", filename);
+ throw "Unable to write file";
+ exit(1);
+ }
+}
+
+void RTPFile::Close()
+{
+ if (_rtpFile != NULL)
+ {
+ fclose(_rtpFile);
+ _rtpFile = NULL;
+ }
+}
+
+
+void RTPFile::WriteHeader()
+{
+ // Write data in a format that NetEQ and RTP Play can parse
+ fprintf(_rtpFile, "#!RTPencode%s\n", "1.0");
+ WebRtc_UWord32 dummy_variable = 0; // should be converted to network endian format, but does not matter when 0
+ fwrite(&dummy_variable, 4, 1, _rtpFile);
+ fwrite(&dummy_variable, 4, 1, _rtpFile);
+ fwrite(&dummy_variable, 4, 1, _rtpFile);
+ fwrite(&dummy_variable, 2, 1, _rtpFile);
+ fwrite(&dummy_variable, 2, 1, _rtpFile);
+ fflush(_rtpFile);
+}
+
+void RTPFile::ReadHeader()
+{
+ WebRtc_UWord32 start_sec, start_usec, source;
+ WebRtc_UWord16 port, padding;
+ char fileHeader[40];
+ fgets(fileHeader, 40, _rtpFile);
+ fread(&start_sec, 4, 1, _rtpFile);
+ start_sec=ntohl(start_sec);
+ fread(&start_usec, 4, 1, _rtpFile);
+ start_usec=ntohl(start_usec);
+ fread(&source, 4, 1, _rtpFile);
+ source=ntohl(source);
+ fread(&port, 2, 1, _rtpFile);
+ port=ntohs(port);
+ fread(&padding, 2, 1, _rtpFile);
+ padding=ntohs(padding);
+}
+
+void RTPFile::Write(const WebRtc_UWord8 payloadType, const WebRtc_UWord32 timeStamp,
+ const WebRtc_Word16 seqNo, const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize, WebRtc_UWord32 frequency)
+{
+ /* write RTP packet to file */
+ WebRtc_UWord8 rtpHeader[12];
+ MakeRTPheader(rtpHeader, payloadType, seqNo, timeStamp, 0);
+ WebRtc_UWord16 lengthBytes = htons(12 + payloadSize + 8);
+ WebRtc_UWord16 plen = htons(12 + payloadSize);
+ WebRtc_UWord32 offsetMs;
+/*#ifdef WEBRTC_CODEC_G722
+ if(ACMCodecDB::_mycodecs[ACMCodecDB::g722].pltype == payloadType)
+ {
+ offsetMs = (timeStamp/(frequency/1000))<<1;
+ }
+ else
+ {
+#endif*/
+ offsetMs = (timeStamp/(frequency/1000));
+/*#ifdef WEBRTC_CODEC_G722
+ }
+#endif*/
+ offsetMs = htonl(offsetMs);
+ fwrite(&lengthBytes, 2, 1, _rtpFile);
+ fwrite(&plen, 2, 1, _rtpFile);
+ fwrite(&offsetMs, 4, 1, _rtpFile);
+ fwrite(rtpHeader, 12, 1, _rtpFile);
+ fwrite(payloadData, 1, payloadSize, _rtpFile);
+}
+
+WebRtc_UWord16 RTPFile::Read(WebRtcRTPHeader* rtpInfo,
+ WebRtc_Word8* payloadData,
+ WebRtc_UWord16 payloadSize,
+ WebRtc_UWord32* offset)
+{
+ WebRtc_UWord16 lengthBytes;
+ WebRtc_UWord16 plen;
+ WebRtc_UWord8 rtpHeader[12];
+ fread(&lengthBytes, 2, 1, _rtpFile);
+ if (feof(_rtpFile))
+ {
+ _rtpEOF = true;
+ return 0;
+ }
+ fread(&plen, 2, 1, _rtpFile);
+ if (feof(_rtpFile))
+ {
+ _rtpEOF = true;
+ return 0;
+ }
+ fread(offset, 4, 1, _rtpFile);
+ if (feof(_rtpFile))
+ {
+ _rtpEOF = true;
+ return 0;
+ }
+ lengthBytes = ntohs(lengthBytes);
+ plen = ntohs(plen);
+ *offset = ntohl(*offset);
+ if (plen < 12)
+ {
+ throw "Unable to read RTP file";
+ exit(1);
+ }
+ fread(rtpHeader, 12, 1, _rtpFile);
+ if (feof(_rtpFile))
+ {
+ _rtpEOF = true;
+ return 0;
+ }
+ ParseRTPHeader(rtpInfo, rtpHeader);
+ rtpInfo->type.Audio.isCNG = false;
+ rtpInfo->type.Audio.channel = 1;
+ if (lengthBytes != plen + 8)
+ {
+ throw "Length parameters in RTP file doesn't match";
+ exit(1);
+ }
+ if (plen == 0)
+ {
+ return 0;
+ }
+ else if (lengthBytes - 20 > payloadSize)
+ {
+ throw "Payload buffer too small";
+ exit(1);
+ }
+ lengthBytes -= 20;
+ fread(payloadData, 1, lengthBytes, _rtpFile);
+ if (feof(_rtpFile))
+ {
+ _rtpEOF = true;
+ }
+ return lengthBytes;
+}
+
diff --git a/src/modules/audio_coding/main/test/RTPFile.h b/src/modules/audio_coding/main/test/RTPFile.h
new file mode 100644
index 0000000..23a43d0
--- /dev/null
+++ b/src/modules/audio_coding/main/test/RTPFile.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef RTPFILE_H
+#define RTPFILE_H
+
+#include "audio_coding_module.h"
+#include "module_common_types.h"
+#include "typedefs.h"
+#include "rw_lock_wrapper.h"
+#include <stdio.h>
+#include <queue>
+
+using namespace webrtc;
+
+class RTPStream
+{
+public:
+ virtual ~RTPStream(){}
+
+ virtual void Write(const WebRtc_UWord8 payloadType, const WebRtc_UWord32 timeStamp,
+ const WebRtc_Word16 seqNo, const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize, WebRtc_UWord32 frequency) = 0;
+ virtual WebRtc_UWord16 Read(WebRtcRTPHeader* rtpInfo,
+ WebRtc_Word8* payloadData,
+ WebRtc_UWord16 payloadSize,
+ WebRtc_UWord32* offset) = 0;
+ virtual bool EndOfFile() const = 0;
+
+protected:
+ void MakeRTPheader(WebRtc_UWord8* rtpHeader,
+ WebRtc_UWord8 payloadType, WebRtc_Word16 seqNo,
+ WebRtc_UWord32 timeStamp, WebRtc_UWord32 ssrc);
+ void ParseRTPHeader(WebRtcRTPHeader* rtpInfo, const WebRtc_UWord8* rtpHeader);
+};
+
+class RTPPacket
+{
+public:
+ RTPPacket(WebRtc_UWord8 payloadType, WebRtc_UWord32 timeStamp,
+ WebRtc_Word16 seqNo, const WebRtc_UWord8* payloadData,
+ WebRtc_UWord16 payloadSize, WebRtc_UWord32 frequency);
+ ~RTPPacket();
+ WebRtc_UWord8 payloadType;
+ WebRtc_UWord32 timeStamp;
+ WebRtc_Word16 seqNo;
+ WebRtc_UWord8* payloadData;
+ WebRtc_UWord16 payloadSize;
+ WebRtc_UWord32 frequency;
+};
+
+class RTPBuffer : public RTPStream
+{
+public:
+ RTPBuffer();
+ ~RTPBuffer();
+ void Write(const WebRtc_UWord8 payloadType, const WebRtc_UWord32 timeStamp,
+ const WebRtc_Word16 seqNo, const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize, WebRtc_UWord32 frequency);
+ WebRtc_UWord16 Read(WebRtcRTPHeader* rtpInfo,
+ WebRtc_Word8* payloadData,
+ WebRtc_UWord16 payloadSize,
+ WebRtc_UWord32* offset);
+ virtual bool EndOfFile() const;
+private:
+ RWLockWrapper* _queueRWLock;
+ std::queue<RTPPacket *> _rtpQueue;
+};
+
+class RTPFile : public RTPStream
+{
+public:
+ ~RTPFile(){}
+ RTPFile() : _rtpFile(NULL),_rtpEOF(false) {}
+ void Open(char *outFilename, const char *mode);
+ void Close();
+ void WriteHeader();
+ void ReadHeader();
+ void Write(const WebRtc_UWord8 payloadType, const WebRtc_UWord32 timeStamp,
+ const WebRtc_Word16 seqNo, const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize, WebRtc_UWord32 frequency);
+ WebRtc_UWord16 Read(WebRtcRTPHeader* rtpInfo,
+ WebRtc_Word8* payloadData,
+ WebRtc_UWord16 payloadSize,
+ WebRtc_UWord32* offset);
+ bool EndOfFile() const { return _rtpEOF; }
+private:
+ FILE* _rtpFile;
+ bool _rtpEOF;
+};
+
+#endif
diff --git a/src/modules/audio_coding/main/test/SpatialAudio.cpp b/src/modules/audio_coding/main/test/SpatialAudio.cpp
new file mode 100644
index 0000000..016ec0a
--- /dev/null
+++ b/src/modules/audio_coding/main/test/SpatialAudio.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <math.h>
+
+#include "SpatialAudio.h"
+#include "utility.h"
+#include "trace.h"
+#include "common_types.h"
+
+using namespace webrtc;
+
+#define NUM_PANN_COEFFS 10
+
+SpatialAudio::SpatialAudio(int testMode)
+{
+ _testMode = testMode;
+}
+
+SpatialAudio::~SpatialAudio()
+{
+ AudioCodingModule::Destroy(_acmLeft);
+ AudioCodingModule::Destroy(_acmRight);
+ AudioCodingModule::Destroy(_acmReceiver);
+ delete _channel;
+ _inFile.Close();
+ _outFile.Close();
+}
+
+WebRtc_Word16
+SpatialAudio::Setup()
+{
+ // Create ACMs and the Channel;
+ _acmLeft = AudioCodingModule::Create(1);
+ _acmRight = AudioCodingModule::Create(2);
+ _acmReceiver = AudioCodingModule::Create(3);
+ _channel = new Channel;
+
+ // Register callback for the sender side.
+ CHECK_ERROR(_acmLeft->RegisterTransportCallback(_channel));
+ CHECK_ERROR(_acmRight->RegisterTransportCallback(_channel));
+ // Register the receiver ACM in channel
+ _channel->RegisterReceiverACM(_acmReceiver);
+
+ WebRtc_Word8 audioFileName[MAX_FILE_NAME_LENGTH_BYTE];
+ WebRtc_UWord16 sampFreqHz = 32000;
+
+ strncpy(audioFileName, "./modules/audio_coding/main/test/testfile32kHz.pcm",
+ MAX_FILE_NAME_LENGTH_BYTE - 1);
+ if(_testMode == 1)
+ {
+ printf("Enter the input file [%s]: ", audioFileName);
+ PCMFile::ChooseFile(audioFileName, MAX_FILE_NAME_LENGTH_BYTE, &sampFreqHz);
+ }
+ _inFile.Open(audioFileName, sampFreqHz, "rb", false);
+
+ if(_testMode == 0)
+ {
+ strncpy(audioFileName, "./modules/audio_coding/main/test/res_autotests/out_spatial_autotest.pcm",
+ MAX_FILE_NAME_LENGTH_BYTE - 1);
+ }
+ else if(_testMode == 1)
+ {
+ printf("\n");
+ strncpy(audioFileName, "./modules/audio_coding/main/test/res_tests/testspatial_out.pcm",
+ MAX_FILE_NAME_LENGTH_BYTE - 1);
+ printf("Enter the output file [%s]: ", audioFileName);
+ PCMFile::ChooseFile(audioFileName, MAX_FILE_NAME_LENGTH_BYTE, &sampFreqHz);
+ }
+ else
+ {
+ strncpy(audioFileName, "./modules/audio_coding/main/test/res_tests/testspatial_out.pcm",
+ MAX_FILE_NAME_LENGTH_BYTE - 1);
+ }
+ _outFile.Open(audioFileName, sampFreqHz, "wb", false);
+ _outFile.SaveStereo(true);
+
+
+ // Register couple of codecs as receive codec
+ CodecInst codecInst;
+
+ _acmLeft->Codec((WebRtc_UWord8)0, codecInst);
+ codecInst.channels = 2;
+ CHECK_ERROR(_acmReceiver->RegisterReceiveCodec(codecInst));
+
+ _acmLeft->Codec((WebRtc_UWord8)3, codecInst);
+ codecInst.channels = 2;
+ CHECK_ERROR(_acmReceiver->RegisterReceiveCodec(codecInst));
+
+ _acmLeft->Codec((WebRtc_UWord8)1, codecInst);
+ CHECK_ERROR(_acmReceiver->RegisterReceiveCodec(codecInst));
+
+ _acmLeft->Codec((WebRtc_UWord8)4, codecInst);
+ CHECK_ERROR(_acmReceiver->RegisterReceiveCodec(codecInst));
+
+ return 0;
+}
+
+void
+SpatialAudio::Perform()
+{
+ if(_testMode == 0)
+ {
+ printf("Running SpatialAudio Test");
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1, "---------- SpatialAudio ----------");
+ }
+
+ Setup();
+
+ CodecInst codecInst;
+ _acmLeft->Codec((WebRtc_UWord8)1, codecInst);
+ CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
+ EncodeDecode();
+
+ WebRtc_Word16 pannCntr = 0;
+
+ double leftPanning[NUM_PANN_COEFFS] =
+ {1.00, 0.95, 0.90, 0.85, 0.80, 0.75, 0.70, 0.60, 0.55, 0.50};
+ double rightPanning[NUM_PANN_COEFFS] =
+ {0.50, 0.55, 0.60, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95, 1.00};
+
+ while((pannCntr + 1) < NUM_PANN_COEFFS)
+ {
+ _acmLeft->Codec((WebRtc_UWord8)0, codecInst);
+ codecInst.pacsize = 480;
+ CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
+ CHECK_ERROR(_acmRight->RegisterSendCodec(codecInst));
+
+ EncodeDecode(leftPanning[pannCntr], rightPanning[pannCntr]);
+ pannCntr++;
+
+ // Change codec
+ _acmLeft->Codec((WebRtc_UWord8)3, codecInst);
+ codecInst.pacsize = 320;
+ CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
+ CHECK_ERROR(_acmRight->RegisterSendCodec(codecInst));
+
+ EncodeDecode(leftPanning[pannCntr], rightPanning[pannCntr]);
+ pannCntr++;
+ if(_testMode == 0)
+ {
+ printf(".");
+ }
+ }
+
+ _acmLeft->Codec((WebRtc_UWord8)4, codecInst);
+ CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
+ EncodeDecode();
+
+ _acmLeft->Codec((WebRtc_UWord8)0, codecInst);
+ codecInst.pacsize = 480;
+ CHECK_ERROR(_acmLeft->RegisterSendCodec(codecInst));
+ CHECK_ERROR(_acmRight->RegisterSendCodec(codecInst));
+ pannCntr = NUM_PANN_COEFFS -1;
+ while(pannCntr >= 0)
+ {
+ EncodeDecode(leftPanning[pannCntr], rightPanning[pannCntr]);
+ pannCntr--;
+ if(_testMode == 0)
+ {
+ printf(".");
+ }
+ }
+ if(_testMode == 0)
+ {
+ printf("Done!\n");
+ }
+}
+
+void
+SpatialAudio::EncodeDecode(
+ const double leftPanning,
+ const double rightPanning)
+{
+ AudioFrame audioFrame;
+ WebRtc_Word32 outFileSampFreq = _outFile.SamplingFrequency();
+
+ const double rightToLeftRatio = rightPanning / leftPanning;
+
+ _channel->SetIsStereo(true);
+
+ while(!_inFile.EndOfFile())
+ {
+ _inFile.Read10MsData(audioFrame);
+ for(int n = 0; n < audioFrame._payloadDataLengthInSamples; n++)
+ {
+ audioFrame._payloadData[n] = (WebRtc_Word16)floor(
+ audioFrame._payloadData[n] * leftPanning + 0.5);
+ }
+ CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
+
+ for(int n = 0; n < audioFrame._payloadDataLengthInSamples; n++)
+ {
+ audioFrame._payloadData[n] = (WebRtc_Word16)floor(
+ audioFrame._payloadData[n] * rightToLeftRatio + 0.5);
+ }
+ CHECK_ERROR(_acmRight->Add10MsData(audioFrame));
+
+ CHECK_ERROR(_acmLeft->Process());
+ CHECK_ERROR(_acmRight->Process());
+
+ CHECK_ERROR(_acmReceiver->PlayoutData10Ms(outFileSampFreq, audioFrame));
+ _outFile.Write10MsData(audioFrame);
+ }
+ _inFile.Rewind();
+}
+
+void
+SpatialAudio::EncodeDecode()
+{
+ AudioFrame audioFrame;
+ WebRtc_Word32 outFileSampFreq = _outFile.SamplingFrequency();
+
+ _channel->SetIsStereo(false);
+
+ while(!_inFile.EndOfFile())
+ {
+ _inFile.Read10MsData(audioFrame);
+ CHECK_ERROR(_acmLeft->Add10MsData(audioFrame));
+
+ CHECK_ERROR(_acmLeft->Process());
+
+ CHECK_ERROR(_acmReceiver->PlayoutData10Ms(outFileSampFreq, audioFrame));
+ _outFile.Write10MsData(audioFrame);
+ }
+ _inFile.Rewind();
+}
+
+
diff --git a/src/modules/audio_coding/main/test/SpatialAudio.h b/src/modules/audio_coding/main/test/SpatialAudio.h
new file mode 100644
index 0000000..6a137d4
--- /dev/null
+++ b/src/modules/audio_coding/main/test/SpatialAudio.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_TEST_SPATIAL_AUDIO_H
+#define ACM_TEST_SPATIAL_AUDIO_H
+
+#include "ACMTest.h"
+#include "Channel.h"
+#include "PCMFile.h"
+#include "audio_coding_module.h"
+#include "utility.h"
+
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+
+
+class SpatialAudio : public ACMTest
+{
+public:
+ SpatialAudio(int testMode);
+ ~SpatialAudio();
+
+ void Perform();
+private:
+ WebRtc_Word16 Setup();
+ void EncodeDecode(double leftPanning, double rightPanning);
+ void EncodeDecode();
+
+ AudioCodingModule* _acmLeft;
+ AudioCodingModule* _acmRight;
+ AudioCodingModule* _acmReceiver;
+ Channel* _channel;
+ PCMFile _inFile;
+ PCMFile _outFile;
+ int _testMode;
+};
+#endif
diff --git a/src/modules/audio_coding/main/test/TestAllCodecs.cpp b/src/modules/audio_coding/main/test/TestAllCodecs.cpp
new file mode 100644
index 0000000..35e5b0a
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TestAllCodecs.cpp
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "TestAllCodecs.h"
+
+#include "audio_coding_module_typedefs.h"
+#include "common_types.h"
+#include "engine_configurations.h"
+#include <cassert>
+#include <iostream>
+#include "trace.h"
+#include "utility.h"
+
+// Class for simulating packet handling
+TestPack::TestPack():
+_receiverACM(NULL),
+_seqNo(0),
+_timeStampDiff(0),
+_lastInTimestamp(0),
+_totalBytes(0),
+_payloadSize(0)
+{
+}
+TestPack::~TestPack()
+{
+}
+
+void
+TestPack::RegisterReceiverACM(AudioCodingModule* acm)
+{
+ _receiverACM = acm;
+ return;
+}
+WebRtc_Word32
+TestPack::SendData(
+ const FrameType frameType,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation)
+{
+ WebRtcRTPHeader rtpInfo;
+ WebRtc_Word32 status;
+ WebRtc_UWord16 payloadDataSize = payloadSize;
+
+ rtpInfo.header.markerBit = false;
+ rtpInfo.header.ssrc = 0;
+ rtpInfo.header.sequenceNumber = _seqNo++;
+ rtpInfo.header.payloadType = payloadType;
+ rtpInfo.header.timestamp = timeStamp;
+ if(frameType == kAudioFrameCN)
+ {
+ rtpInfo.type.Audio.isCNG = true;
+ }
+ else
+ {
+ rtpInfo.type.Audio.isCNG = false;
+ }
+ if(frameType == kFrameEmpty)
+ {
+ // Skip this frame
+ return 0;
+ }
+
+ rtpInfo.type.Audio.channel = 1;
+ memcpy(_payloadData, payloadData, payloadDataSize);
+
+ status = _receiverACM->IncomingPacket((WebRtc_Word8*)_payloadData, payloadDataSize, rtpInfo);
+
+ _payloadSize = payloadDataSize;
+ _timeStampDiff = timeStamp - _lastInTimestamp;
+ _lastInTimestamp = timeStamp;
+ _totalBytes += payloadDataSize;
+ return status;
+}
+
+WebRtc_UWord16
+TestPack::GetPayloadSize()
+{
+ return _payloadSize;
+}
+
+
+WebRtc_UWord32
+TestPack::GetTimeStampDiff()
+{
+ return _timeStampDiff;
+}
+
+void
+TestPack::ResetPayloadSize()
+{
+ _payloadSize = 0;
+}
+
+TestAllCodecs::TestAllCodecs(int testMode):
+_acmA(NULL),
+_acmB(NULL),
+_channelA2B(NULL),
+_testCntr(0),
+_packSizeSamp(0),
+_packSizeBytes(0),
+_counter(0)
+{
+ // testMode = 0 for silent test (auto test)
+ _testMode = testMode;
+}
+
+using namespace std;
+TestAllCodecs::~TestAllCodecs()
+{
+ if(_acmA != NULL)
+ {
+ AudioCodingModule::Destroy(_acmA);
+ _acmA = NULL;
+ }
+ if(_acmB != NULL)
+ {
+ AudioCodingModule::Destroy(_acmB);
+ _acmB = NULL;
+ }
+ if(_channelA2B != NULL)
+ {
+ delete _channelA2B;
+ _channelA2B = NULL;
+ }
+}
+
+void TestAllCodecs::Perform()
+{
+
+ char file[] = "./modules/audio_coding/main/test/testfile32kHz.pcm";
+ _inFileA.Open(file, 32000, "rb");
+
+ if(_testMode == 0)
+ {
+ printf("Running All Codecs Test");
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1,
+ "---------- TestAllCodecs ----------");
+ }
+
+ _acmA = AudioCodingModule::Create(0);
+ _acmB = AudioCodingModule::Create(1);
+
+ _acmA->InitializeReceiver();
+ _acmB->InitializeReceiver();
+
+ WebRtc_UWord8 numEncoders = _acmA->NumberOfCodecs();
+ CodecInst myCodecParam;
+
+ for(WebRtc_UWord8 n = 0; n < numEncoders; n++)
+ {
+ _acmB->Codec(n, myCodecParam);
+ _acmB->RegisterReceiveCodec(myCodecParam);
+ }
+
+ // Create and connect the channel
+ _channelA2B = new TestPack;
+ _acmA->RegisterTransportCallback(_channelA2B);
+ _channelA2B->RegisterReceiverACM(_acmB);
+
+ // All codecs are tested for all allowed sampling frequencies, rates and packet sizes
+#ifdef WEBRTC_CODEC_GSMAMR
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecAMR[] = "AMR";
+ RegisterSendCodec('A', codecAMR, 8000, 4750, 160, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 4750, 320, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 4750, 480, 3);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 5150, 160, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 5150, 320, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 5150, 480, 3);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 5900, 160, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 5900, 320, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 5900, 480, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 6700, 160, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 6700, 320, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 6700, 480, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 7400, 160, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 7400, 320, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 7400, 480, 3);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 7950, 160, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 7950, 320, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 7950, 480, 3);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 10200, 160, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 10200, 320, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 10200, 480, 3);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 12200, 160, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 12200, 320, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMR, 8000, 12200, 480, 3);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_GSMAMRWB
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ char codecAMRWB[] = "AMR-WB";
+ OpenOutFile(_testCntr);
+ RegisterSendCodec('A', codecAMRWB, 16000, 7000, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 7000, 640, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 7000, 960, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 9000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 9000, 640, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 9000, 960, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 12000, 320, 3);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 12000, 640, 6);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 12000, 960, 8);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 14000, 320, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 14000, 640, 4);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 14000, 960, 5);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 16000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 16000, 640, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 16000, 960, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 18000, 320, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 18000, 640, 4);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 18000, 960, 5);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 20000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 20000, 640, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 20000, 960, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 23000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 23000, 640, 3);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 23000, 960, 3);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 24000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 24000, 640, 2);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecAMRWB, 16000, 24000, 960, 2);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_G722
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecG722[] = "G722";
+ RegisterSendCodec('A', codecG722, 16000, 64000, 160, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG722, 16000, 64000, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG722, 16000, 64000, 480, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG722, 16000, 64000, 640, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG722, 16000, 64000, 800, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG722, 16000, 64000, 960, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_G722_1
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecG7221_1[] = "G7221";
+ RegisterSendCodec('A', codecG7221_1, 16000, 32000, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7221_1, 16000, 24000, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7221_1, 16000, 16000, 320, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_G722_1C
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecG7221_2[] = "G7221";
+ RegisterSendCodec('A', codecG7221_2, 32000, 48000, 640, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7221_2, 32000, 32000, 640, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7221_2, 32000, 24000, 640, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_G729
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecG729[] = "G729";
+ RegisterSendCodec('A', codecG729, 8000, 8000, 80, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG729, 8000, 8000, 160, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG729, 8000, 8000, 240, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG729, 8000, 8000, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG729, 8000, 8000, 400, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG729, 8000, 8000, 480, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_G729_1
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecG7291[] = "G7291";
+ RegisterSendCodec('A', codecG7291, 16000, 8000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 8000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 8000, 960, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 12000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 12000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 12000, 960, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 14000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 14000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 14000, 960, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 16000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 16000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 16000, 960, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 18000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 18000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 18000, 960, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 20000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 20000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 20000, 960, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 22000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 22000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 22000, 960, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 24000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 24000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 24000, 960, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 26000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 26000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 26000, 960, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 28000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 28000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 28000, 960, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 30000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 30000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 30000, 960, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 32000, 320, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 32000, 640, 1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG7291, 16000, 32000, 960, 1);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_GSMFR
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecGSM[] = "GSM";
+ RegisterSendCodec('A', codecGSM, 8000, 13200, 160, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecGSM, 8000, 13200, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecGSM, 8000, 13200, 480, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_ILBC
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecILBC[] = "ILBC";
+ RegisterSendCodec('A', codecILBC, 8000, 13300, 240, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecILBC, 8000, 13300, 480, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecILBC, 8000, 15200, 160, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecILBC, 8000, 15200, 320, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+#if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX))
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecISAC[] = "ISAC";
+ RegisterSendCodec('A', codecISAC, 16000, -1, 480, -1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecISAC, 16000, -1, 960, -1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecISAC, 16000, 15000, 480, -1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecISAC, 16000, 32000, 960, -1);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_ISAC
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ RegisterSendCodec('A', codecISAC, 32000, -1, 960, -1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecISAC, 32000, 56000, 960, -1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecISAC, 32000, 37000, 960, -1);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecISAC, 32000, 32000, 960, -1);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_PCM16
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecL16[] = "L16";
+ RegisterSendCodec('A', codecL16, 8000, 128000, 80, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 8000, 128000, 160, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 8000, 128000, 240, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 8000, 128000, 320, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ RegisterSendCodec('A', codecL16, 16000, 256000, 160, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 16000, 256000, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 16000, 256000, 480, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 16000, 256000, 640, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ RegisterSendCodec('A', codecL16, 32000, 512000, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 32000, 512000, 640, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecPCMA[] = "PCMA";
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 80, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 160, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 240, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 400, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 480, 0);
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ Run(_channelA2B);
+ char codecPCMU[] = "PCMU";
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 80, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 160, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 240, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 400, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 480, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+#ifdef WEBRTC_CODEC_SPEEX
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecSPEEX[] = "SPEEX";
+ RegisterSendCodec('A', codecSPEEX, 8000, 2400, 160, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecSPEEX, 8000, 8000, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecSPEEX, 8000, 18200, 480, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ RegisterSendCodec('A', codecSPEEX, 16000, 4000, 320, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecSPEEX, 16000, 12800, 640, 0);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecSPEEX, 16000, 34200, 960, 0);
+ Run(_channelA2B);
+ _outFileB.Close();
+#endif
+
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf("Done!\n");
+ }
+
+ /* Print out all codecs that were not tested in the run */
+
+
+ if(_testMode != 0) {
+ printf("The following codecs was not included in the test:\n");
+#ifndef WEBRTC_CODEC_GSMAMR
+ printf(" GSMAMR\n");
+#endif
+#ifndef WEBRTC_CODEC_GSMAMRWB
+ printf(" GSMAMR-wb\n");
+#endif
+#ifndef WEBRTC_CODEC_G722
+ printf(" G.722\n");
+#endif
+#ifndef WEBRTC_CODEC_G722_1
+ printf(" G.722.1\n");
+#endif
+#ifndef WEBRTC_CODEC_G722_1C
+ printf(" G.722.1C\n");
+#endif
+#ifndef WEBRTC_CODEC_G729
+ printf(" G.729\n");
+#endif
+#ifndef WEBRTC_CODEC_G729_1
+ printf(" G.729.1\n");
+#endif
+#ifndef WEBRTC_CODEC_GSMFR
+ printf(" GSMFR\n");
+#endif
+#ifndef WEBRTC_CODEC_ILBC
+ printf(" iLBC\n");
+#endif
+#ifndef WEBRTC_CODEC_ISAC
+ printf(" ISAC float\n");
+#endif
+#ifndef WEBRTC_CODEC_ISACFX
+ printf(" ISAC fix\n");
+#endif
+#ifndef WEBRTC_CODEC_PCM16
+ printf(" PCM16\n");
+#endif
+#ifndef WEBRTC_CODEC_SPEEX
+ printf(" Speex\n");
+#endif
+
+ printf("\nTo complete the test, listen to the %d number of output files.\n", _testCntr);
+ }
+}
+
+// Register Codec to use in the test
+//
+// Input: side - which ACM to use, 'A' or 'B'
+// codecName - name to use when register the codec
+// samplingFreqHz - sampling frequency in Herz
+// rate - bitrate in bytes
+// packSize - packet size in samples
+// extraByte - if extra bytes needed compared to the bitrate
+// used when registering, can be an internal header
+// set to -1 if the codec is a variable rate codec
+WebRtc_Word16 TestAllCodecs::RegisterSendCodec(char side,
+ char* codecName,
+ WebRtc_Word32 samplingFreqHz,
+ int rate,
+ int packSize,
+ int extraByte)
+{
+ if(_testMode != 0) {
+ // Print out codec and settings
+ printf("codec: %s Freq: %d Rate: %d PackSize: %d", codecName, samplingFreqHz, rate, packSize);
+ }
+
+ // Store packetsize in samples, used to validate the recieved packet
+ _packSizeSamp = packSize;
+
+ // Store the expected packet size in bytes, used to validate the recieved packet
+ // If variable rate codec (extraByte == -1), set to -1 (65535)
+ if (extraByte != -1)
+ {
+ // Add 0.875 to always round up to a whole byte
+ _packSizeBytes = (WebRtc_UWord16)((float)(packSize*rate)/(float)(samplingFreqHz*8)+0.875)+extraByte;
+ }
+ else
+ {
+ // Packets will have a variable size
+ _packSizeBytes = -1;
+ }
+
+ // Set pointer to the ACM where to register the codec
+ AudioCodingModule* myACM;
+ switch(side)
+ {
+ case 'A':
+ {
+ myACM = _acmA;
+ break;
+ }
+ case 'B':
+ {
+ myACM = _acmB;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ if(myACM == NULL)
+ {
+ assert(false);
+ return -1;
+ }
+ CodecInst myCodecParam;
+
+ // Get all codec paramters before registering
+ CHECK_ERROR(AudioCodingModule::Codec(codecName, myCodecParam, samplingFreqHz));
+ myCodecParam.rate = rate;
+ myCodecParam.pacsize = packSize;
+ CHECK_ERROR(myACM->RegisterSendCodec(myCodecParam));
+
+ // initialization was succesful
+ return 0;
+}
+
+void TestAllCodecs::Run(TestPack* channel)
+{
+ AudioFrame audioFrame;
+
+ WebRtc_UWord16 SamplesIn10MsecA = _inFileA.PayloadLength10Ms();
+ WebRtc_UWord32 timestampA = 1;
+ WebRtc_Word32 outFreqHzB = _outFileB.SamplingFrequency();
+ WebRtc_UWord16 recSize;
+ WebRtc_UWord32 timeStampDiff;
+ channel->ResetPayloadSize();
+ int errorCount = 0;
+
+ // Only run 1 second for each test case
+ while((_counter<1000)&& (!_inFileA.EndOfFile()))
+ {
+ // Add 10 msec to ACM
+ _inFileA.Read10MsData(audioFrame);
+ CHECK_ERROR(_acmA->Add10MsData(audioFrame));
+
+ // Run sender side of ACM
+ CHECK_ERROR(_acmA->Process());
+
+ // Verify that the received packet size matches the settings
+ recSize = channel->GetPayloadSize();
+ if (recSize) {
+ if ((recSize != _packSizeBytes) && (_packSizeBytes < 65535)) {
+ errorCount++;
+ }
+
+ // Verify that the timestamp is updated with expected length
+ timeStampDiff = channel->GetTimeStampDiff();
+ if ((_counter > 10) && (timeStampDiff != _packSizeSamp))
+ errorCount++;
+ }
+
+
+ // Run received side of ACM
+ CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
+
+ // Write output speech to file
+ _outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
+ }
+
+ if (errorCount)
+ {
+ printf(" - test FAILED\n");
+ }
+ else if(_testMode != 0)
+ {
+ printf(" - test PASSED\n");
+ }
+
+ // Reset _counter
+ if (_counter == 1000) {
+ _counter = 0;
+ }
+ if (_inFileA.EndOfFile()) {
+ _inFileA.Rewind();
+ }
+}
+
+void TestAllCodecs::OpenOutFile(WebRtc_Word16 testNumber)
+{
+ char fileName[500] = "testallcodecs_out_";
+ char cntrStr[10];
+
+ sprintf(cntrStr, "%02d.pcm", testNumber);
+ strcat(fileName, cntrStr);
+ _outFileB.Open(fileName, 32000, "wb");
+}
+
+void TestAllCodecs::DisplaySendReceiveCodec()
+{
+ CodecInst myCodecParam;
+ _acmA->SendCodec(myCodecParam);
+ printf("%s -> ", myCodecParam.plname);
+ _acmB->ReceiveCodec(myCodecParam);
+ printf("%s\n", myCodecParam.plname);
+}
+
diff --git a/src/modules/audio_coding/main/test/TestAllCodecs.h b/src/modules/audio_coding/main/test/TestAllCodecs.h
new file mode 100644
index 0000000..958cefd
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TestAllCodecs.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef TEST_ALL_CODECS_H
+#define TEST_ALL_CODECS_H
+
+#include "ACMTest.h"
+#include "Channel.h"
+#include "PCMFile.h"
+
+class TestPack : public AudioPacketizationCallback
+{
+public:
+ TestPack();
+ ~TestPack();
+
+ void RegisterReceiverACM(AudioCodingModule* acm);
+
+ virtual WebRtc_Word32 SendData(const FrameType frameType,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation);
+
+ WebRtc_UWord16 GetPayloadSize();
+ WebRtc_UWord32 GetTimeStampDiff();
+ void ResetPayloadSize();
+
+private:
+ AudioCodingModule* _receiverACM;
+ WebRtc_Word16 _seqNo;
+ WebRtc_UWord8 _payloadData[60 * 32 * 2 * 2];
+ WebRtc_UWord32 _timeStampDiff;
+ WebRtc_UWord32 _lastInTimestamp;
+ WebRtc_UWord64 _totalBytes;
+ WebRtc_UWord16 _payloadSize;
+};
+
+class TestAllCodecs : public ACMTest
+{
+public:
+ TestAllCodecs(int testMode);
+ ~TestAllCodecs();
+
+ void Perform();
+private:
+ // The default value of '-1' indicates that the registration is based only on codec name
+ // and a sampling frequncy matching is not required. This is useful for codecs which support
+ // several sampling frequency.
+ WebRtc_Word16 RegisterSendCodec(char side,
+ char* codecName,
+ WebRtc_Word32 sampFreqHz,
+ int rate,
+ int packSize,
+ int extraByte);
+
+ void Run(TestPack* channel);
+ void OpenOutFile(WebRtc_Word16 testNumber);
+ void DisplaySendReceiveCodec();
+
+ WebRtc_Word32 SendData(
+ const FrameType frameType,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation);
+
+ int _testMode;
+
+ AudioCodingModule* _acmA;
+ AudioCodingModule* _acmB;
+
+ TestPack* _channelA2B;
+
+ PCMFile _inFileA;
+ PCMFile _outFileB;
+ WebRtc_Word16 _testCntr;
+ WebRtc_UWord16 _packSizeSamp;
+ WebRtc_UWord16 _packSizeBytes;
+ int _counter;
+};
+
+
+#endif // TEST_ALL_CODECS_H
+
diff --git a/src/modules/audio_coding/main/test/TestFEC.cpp b/src/modules/audio_coding/main/test/TestFEC.cpp
new file mode 100644
index 0000000..829e1de
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TestFEC.cpp
@@ -0,0 +1,627 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "TestFEC.h"
+
+#include "audio_coding_module_typedefs.h"
+#include "common_types.h"
+#include "engine_configurations.h"
+
+#include <cassert>
+#include <iostream>
+#include "trace.h"
+#include "utility.h"
+
+TestFEC::TestFEC(int testMode):
+_acmA(NULL),
+_acmB(NULL),
+_channelA2B(NULL),
+_testCntr(0)
+{
+ _testMode = testMode;
+}
+
+using namespace std;
+
+TestFEC::~TestFEC()
+{
+ if(_acmA != NULL)
+ {
+ AudioCodingModule::Destroy(_acmA);
+ _acmA = NULL;
+ }
+ if(_acmB != NULL)
+ {
+ AudioCodingModule::Destroy(_acmB);
+ _acmB = NULL;
+ }
+ if(_channelA2B != NULL)
+ {
+ delete _channelA2B;
+ _channelA2B = NULL;
+ }
+}
+
+void TestFEC::Perform()
+{
+
+ if(_testMode == 0)
+ {
+ printf("Running FEC Test");
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1,
+ "---------- TestFEC ----------");
+ }
+ char fileName[] = "./modules/audio_coding/main/test/testfile32kHz.pcm";
+ _inFileA.Open(fileName, 32000, "rb");
+
+
+ bool fecEnabled;
+
+ _acmA = AudioCodingModule::Create(0);
+ _acmB = AudioCodingModule::Create(1);
+
+ _acmA->InitializeReceiver();
+ _acmB->InitializeReceiver();
+
+ WebRtc_UWord8 numEncoders = _acmA->NumberOfCodecs();
+ CodecInst myCodecParam;
+ if(_testMode != 0)
+ {
+ printf("Registering codecs at receiver... \n");
+ }
+ for(WebRtc_UWord8 n = 0; n < numEncoders; n++)
+ {
+ _acmB->Codec(n, myCodecParam);
+ if(_testMode != 0)
+ {
+ printf("%s\n", myCodecParam.plname);
+ }
+ _acmB->RegisterReceiveCodec(myCodecParam);
+ }
+
+ // Create and connect the channel
+ _channelA2B = new Channel;
+ _acmA->RegisterTransportCallback(_channelA2B);
+ _channelA2B->RegisterReceiverACM(_acmB);
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+#ifndef WEBRTC_CODEC_G722
+ printf("G722 needs to be activated to run this test\n");
+ exit(-1);
+#endif
+ char nameG722[] = "G722";
+ RegisterSendCodec('A', nameG722, 16000);
+ char nameCN[] = "CN";
+ RegisterSendCodec('A', nameCN, 16000);
+ char nameRED[] = "RED";
+ RegisterSendCodec('A', nameRED);
+ OpenOutFile(_testCntr);
+ SetVAD(true, true, VADAggr);
+ _acmA->SetFECStatus(false);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+ _outFileB.Close();
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+ _acmA->SetFECStatus(true);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+ char nameISAC[] = "iSAC";
+ RegisterSendCodec('A',nameISAC, 16000);
+ OpenOutFile(_testCntr);
+ SetVAD(true, true, VADVeryAggr);
+ _acmA->SetFECStatus(false);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+ _outFileB.Close();
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+ _acmA->SetFECStatus(true);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+
+ RegisterSendCodec('A', nameISAC, 32000);
+ OpenOutFile(_testCntr);
+ SetVAD(true, true, VADVeryAggr);
+ _acmA->SetFECStatus(false);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+ _outFileB.Close();
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+ _acmA->SetFECStatus(true);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+
+ RegisterSendCodec('A', nameISAC, 32000);
+ OpenOutFile(_testCntr);
+ SetVAD(false, false, VADNormal);
+ _acmA->SetFECStatus(true);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+
+
+ RegisterSendCodec('A', nameISAC, 16000);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+
+ RegisterSendCodec('A', nameISAC, 32000);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+
+ RegisterSendCodec('A', nameISAC, 16000);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+ _outFileB.Close();
+
+
+
+
+
+
+
+ _channelA2B->SetFECTestWithPacketLoss(true);
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+
+ RegisterSendCodec('A',nameG722);
+ RegisterSendCodec('A', nameCN, 16000);
+ OpenOutFile(_testCntr);
+ SetVAD(true, true, VADAggr);
+ _acmA->SetFECStatus(false);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+ _outFileB.Close();
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+ _acmA->SetFECStatus(true);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+ RegisterSendCodec('A', nameISAC, 16000);
+ OpenOutFile(_testCntr);
+ SetVAD(true, true, VADVeryAggr);
+ _acmA->SetFECStatus(false);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+ _outFileB.Close();
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+ _acmA->SetFECStatus(true);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+ RegisterSendCodec('A', nameISAC, 32000);
+ OpenOutFile(_testCntr);
+ SetVAD(true, true, VADVeryAggr);
+ _acmA->SetFECStatus(false);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+ _outFileB.Close();
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+ _acmA->SetFECStatus(true);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ OpenOutFile(_testCntr);
+ Run();
+ _outFileB.Close();
+
+
+
+
+
+
+ if(_testMode != 0)
+ {
+ printf("=======================================================================\n");
+ printf("%d ",_testCntr++);
+ }
+ else
+ {
+ printf(".");
+ }
+ RegisterSendCodec('A', nameISAC, 32000);
+ OpenOutFile(_testCntr);
+ SetVAD(false, false, VADNormal);
+ _acmA->SetFECStatus(true);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+
+
+ RegisterSendCodec('A', nameISAC, 16000);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+
+ RegisterSendCodec('A', nameISAC, 32000);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+
+ RegisterSendCodec('A', nameISAC, 16000);
+ fecEnabled = _acmA->FECStatus();
+ if(_testMode != 0)
+ {
+ printf("FEC currently %s\n",(fecEnabled?"ON":"OFF"));
+ DisplaySendReceiveCodec();
+ }
+ Run();
+ _outFileB.Close();
+
+
+
+ if(_testMode == 0)
+ {
+ printf("Done!\n");
+ }
+}
+
+WebRtc_Word32 TestFEC::SetVAD(bool enableDTX, bool enableVAD, ACMVADMode vadMode)
+{
+ if(_testMode != 0)
+ {
+ printf("DTX %s; VAD %s; VAD-Mode %d\n",
+ enableDTX? "ON":"OFF",
+ enableVAD? "ON":"OFF",
+ (WebRtc_Word16)vadMode);
+ }
+ return _acmA->SetVAD(enableDTX, enableVAD, vadMode);
+}
+
+WebRtc_Word16 TestFEC::RegisterSendCodec(char side, char* codecName, WebRtc_Word32 samplingFreqHz)
+{
+ if(_testMode != 0)
+ {
+ if(samplingFreqHz > 0)
+ {
+ printf("Registering %s-%d for side %c\n", codecName, samplingFreqHz, side);
+ }
+ else
+ {
+ printf("Registering %s for side %c\n", codecName, side);
+ }
+ }
+ cout << flush;
+ AudioCodingModule* myACM;
+ switch(side)
+ {
+ case 'A':
+ {
+ myACM = _acmA;
+ break;
+ }
+ case 'B':
+ {
+ myACM = _acmB;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ if(myACM == NULL)
+ {
+ assert(false);
+ return -1;
+ }
+ CodecInst myCodecParam;
+
+ CHECK_ERROR(AudioCodingModule::Codec(codecName, myCodecParam, samplingFreqHz));
+
+ CHECK_ERROR(myACM->RegisterSendCodec(myCodecParam));
+
+ // initialization was succesful
+ return 0;
+}
+
+void TestFEC::Run()
+{
+ AudioFrame audioFrame;
+
+ WebRtc_UWord16 msecPassed = 0;
+ WebRtc_UWord32 secPassed = 0;
+ WebRtc_UWord16 SamplesIn10MsecA = _inFileA.PayloadLength10Ms();
+ WebRtc_UWord32 timestampA = 1;
+ WebRtc_Word32 outFreqHzB = _outFileB.SamplingFrequency();
+
+ while(!_inFileA.EndOfFile())
+ {
+ _inFileA.Read10MsData(audioFrame);
+ //audioFrame._timeStamp = timestampA;
+ //timestampA += SamplesIn10MsecA;
+ CHECK_ERROR(_acmA->Add10MsData(audioFrame));
+
+ CHECK_ERROR(_acmA->Process());
+
+ CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
+ _outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
+ msecPassed += 10;
+ if(msecPassed >= 1000)
+ {
+ msecPassed = 0;
+ secPassed++;
+ }
+ if(((secPassed%5) == 4) && (msecPassed == 0) && (_testCntr > 14))
+ {
+ printf("%3u:%3u ", secPassed, msecPassed);
+ _acmA->SetFECStatus(false);
+ printf("FEC currently %s\n",(_acmA->FECStatus()?"ON":"OFF"));
+ }
+ if(((secPassed%5) == 4) && (msecPassed >= 990) && (_testCntr > 14))
+ {
+ printf("%3u:%3u ", secPassed, msecPassed);
+ _acmA->SetFECStatus(true);
+ printf("FEC currently %s\n",(_acmA->FECStatus()?"ON":"OFF"));
+ }
+ }
+ _inFileA.Rewind();
+}
+
+void TestFEC::OpenOutFile(WebRtc_Word16 testNumber)
+{
+ char fileName[500] = "./modules/audio_coding/main/test/res_tests/TestFEC_outFile_";
+ char cntrStr[10];
+
+ if(_testMode == 0)
+ {
+ sprintf(fileName, "./modules/audio_coding/main/test/res_autotests/TestFEC_outFile_");
+ }
+ sprintf(cntrStr, "%02d.pcm", testNumber);
+ strcat(fileName, cntrStr);
+ _outFileB.Open(fileName, 32000, "wb");
+}
+
+void TestFEC::DisplaySendReceiveCodec()
+{
+ CodecInst myCodecParam;
+ _acmA->SendCodec(myCodecParam);
+ printf("%s -> ", myCodecParam.plname);
+ _acmB->ReceiveCodec(myCodecParam);
+ printf("%s\n", myCodecParam.plname);
+}
diff --git a/src/modules/audio_coding/main/test/TestFEC.h b/src/modules/audio_coding/main/test/TestFEC.h
new file mode 100644
index 0000000..09d1009
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TestFEC.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef TEST_FEC_H
+#define TEST_FEC_H
+
+#include "ACMTest.h"
+#include "Channel.h"
+#include "PCMFile.h"
+
+class TestFEC : public ACMTest
+{
+public:
+ TestFEC(int testMode);
+ ~TestFEC();
+
+ void Perform();
+private:
+ // The default value of '-1' indicates that the registration is based only on codec name
+ // and a sampling frequncy matching is not required. This is useful for codecs which support
+ // several sampling frequency.
+ WebRtc_Word16 RegisterSendCodec(char side, char* codecName, WebRtc_Word32 sampFreqHz = -1);
+ void Run();
+ void OpenOutFile(WebRtc_Word16 testNumber);
+ void DisplaySendReceiveCodec();
+ WebRtc_Word32 SetVAD(bool enableDTX, bool enableVAD, ACMVADMode vadMode);
+ AudioCodingModule* _acmA;
+ AudioCodingModule* _acmB;
+
+ Channel* _channelA2B;
+
+ PCMFile _inFileA;
+ PCMFile _outFileB;
+ WebRtc_Word16 _testCntr;
+ int _testMode;
+};
+
+
+#endif
+
diff --git a/src/modules/audio_coding/main/test/TestStereo.cpp b/src/modules/audio_coding/main/test/TestStereo.cpp
new file mode 100644
index 0000000..bb4c40b
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TestStereo.cpp
@@ -0,0 +1,553 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "TestStereo.h"
+
+#include "common_types.h"
+#include "audio_coding_module_typedefs.h"
+#include "engine_configurations.h"
+#include <iostream>
+#include "utility.h"
+#include <cassert>
+#include "trace.h"
+
+
+// Class for simulating packet handling
+TestPackStereo::TestPackStereo():
+_receiverACM(NULL),
+_seqNo(0),
+_timeStampDiff(0),
+_lastInTimestamp(0),
+_totalBytes(0),
+_payloadSize(0),
+_noChannels(1),
+_codecType(0)
+{
+}
+TestPackStereo::~TestPackStereo()
+{
+}
+
+void
+TestPackStereo::RegisterReceiverACM(AudioCodingModule* acm)
+{
+ _receiverACM = acm;
+ return;
+}
+
+
+WebRtc_Word32
+TestPackStereo::SendData(
+ const FrameType frameType,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation)
+{
+ WebRtcRTPHeader rtpInfo;
+ WebRtc_Word32 status;
+ WebRtc_UWord16 payloadDataSize = payloadSize;
+ WebRtc_UWord8 payloadDataMaster[60 * 32 * 2 * 2];
+ WebRtc_UWord8 payloadDataSlave[60 * 32 * 2 * 2];
+ bool twoBytePerSample = false;
+ bool oneBytePerSample = true;
+ bool frameBased = false;
+
+ rtpInfo.header.markerBit = false;
+ rtpInfo.header.ssrc = 0;
+ rtpInfo.header.sequenceNumber = _seqNo++;
+ rtpInfo.header.payloadType = payloadType;
+ rtpInfo.header.timestamp = timeStamp;
+ if(frameType == kFrameEmpty)
+ {
+ // Skip this frame
+ return 0;
+ }
+ if(frameType != kAudioFrameCN)
+ {
+ rtpInfo.type.Audio.isCNG = false;
+
+ // For stereo we need to call ACM with two incoming packets, one for each channel.
+ // Different packet-splitting depending on codec.
+ if (_codecType == 0) {
+ // one byte per sample
+ for (int i=0, j=0; i<payloadDataSize; i+=2, j++)
+ {
+ payloadDataMaster[j] = payloadData[i];
+ payloadDataSlave[j] = payloadData[i+1];
+ }
+ } else if (_codecType == 1) {
+ // two bytes per sample
+ for (int i=0, j=0; i<payloadDataSize; i+=4, j+=2)
+ {
+ payloadDataMaster[j] = payloadData[i];
+ payloadDataMaster[j+1] = payloadData[i+1];
+ payloadDataSlave[j] = payloadData[i+2];
+ payloadDataSlave[j+1] = payloadData[i+3];
+ }
+ } else if (_codecType == 2) {
+ // frameBased
+ memcpy(payloadDataMaster, &payloadData[0], payloadDataSize/2);
+ memcpy(payloadDataSlave, &payloadData[payloadDataSize/2], payloadDataSize/2);
+ } else if (_codecType == 3) {
+ // four bits per sample
+ for (int i=0, j=0; i<payloadDataSize; i+=2, j++)
+ {
+ payloadDataMaster[j] = (payloadData[i] & 0xF0) + (payloadData[i+1] >> 4);
+ payloadDataSlave[j] = ((payloadData[i] & 0x0F) << 4) + (payloadData[i+1] & 0x0F);
+ }
+ }
+ }
+ else
+ {
+ // If CNG packet, send the same packet to both master and slave.
+ rtpInfo.type.Audio.isCNG = true;
+ memcpy(payloadDataMaster, payloadData, payloadSize);
+ memcpy(payloadDataSlave, payloadData, payloadSize);
+ payloadDataSize = payloadSize*2;
+ }
+
+ // Call ACM with two packets, one for each channel
+ rtpInfo.type.Audio.channel = 1;
+ status = _receiverACM->IncomingPacket((WebRtc_Word8*)payloadDataMaster, payloadDataSize/2, rtpInfo);
+ rtpInfo.type.Audio.channel = 2;
+ status = _receiverACM->IncomingPacket((WebRtc_Word8*)payloadDataSlave, payloadDataSize/2, rtpInfo);
+
+ if (frameType != kAudioFrameCN) {
+ _payloadSize = payloadDataSize;
+ } else {
+ _payloadSize = -1;
+ }
+ _timeStampDiff = timeStamp - _lastInTimestamp;
+ _lastInTimestamp = timeStamp;
+ _totalBytes += payloadDataSize;
+ return status;
+}
+
+WebRtc_UWord16
+TestPackStereo::GetPayloadSize()
+{
+ return _payloadSize;
+}
+
+
+WebRtc_UWord32
+TestPackStereo::GetTimeStampDiff()
+{
+ return _timeStampDiff;
+}
+
+void
+TestPackStereo::ResetPayloadSize()
+{
+ _payloadSize = 0;
+}
+
+void
+TestPackStereo::SetCodecType(int codecType)
+{
+ _codecType = codecType;
+}
+
+TestStereo::TestStereo(int testMode):
+_acmA(NULL),
+_acmB(NULL),
+_channelA2B(NULL),
+_testCntr(0),
+_packSizeSamp(0),
+_packSizeBytes(0),
+_counter(0)
+{
+ // testMode = 0 for silent test (auto test)
+ _testMode = testMode;
+}
+
+using namespace std;
+TestStereo::~TestStereo()
+{
+ if(_acmA != NULL)
+ {
+ AudioCodingModule::Destroy(_acmA);
+ _acmA = NULL;
+ }
+ if(_acmB != NULL)
+ {
+ AudioCodingModule::Destroy(_acmB);
+ _acmB = NULL;
+ }
+ if(_channelA2B != NULL)
+ {
+ delete _channelA2B;
+ _channelA2B = NULL;
+ }
+}
+
+void TestStereo::Perform()
+{
+ char fileName[500];
+ WebRtc_UWord16 frequencyHz;
+
+ if(_testMode == 0)
+ {
+ printf("Running Stereo Test");
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1,
+ "---------- TestStereo ----------");
+ }
+
+ strcpy(fileName, "./modules/audio_coding/main/test/teststereo32kHz.pcm");
+ frequencyHz = 32000;
+
+ _inFileA.Open(fileName, frequencyHz, "rb");
+ _inFileA.ReadStereo(true);
+
+ _acmA = AudioCodingModule::Create(0);
+ _acmB = AudioCodingModule::Create(1);
+
+ _acmA->InitializeReceiver();
+ _acmB->InitializeReceiver();
+
+ WebRtc_UWord8 numEncoders = _acmA->NumberOfCodecs();
+ CodecInst myCodecParam;
+
+ for(WebRtc_UWord8 n = 0; n < numEncoders; n++)
+ {
+ _acmB->Codec(n, myCodecParam);
+ if(!strcmp(myCodecParam.plname, "L16") ||
+ !strcmp(myCodecParam.plname, "PCMA")||
+ !strcmp(myCodecParam.plname, "PCMU")||
+ !strcmp(myCodecParam.plname, "G722"))
+ {
+ myCodecParam.channels=2;
+ _acmB->RegisterReceiveCodec(myCodecParam);
+ }
+ }
+
+ // Create and connect the channel
+ _channelA2B = new TestPackStereo;
+ _acmA->RegisterTransportCallback(_channelA2B);
+ _channelA2B->RegisterReceiverACM(_acmB);
+
+ // All codecs are tested for all allowed sampling frequencies, rates and packet sizes
+#ifdef WEBRTC_CODEC_G722
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _channelA2B->SetCodecType(3);
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecG722[] = "G722";
+ RegisterSendCodec('A', codecG722, 16000, 64000, 160);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG722, 16000, 64000, 320);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG722, 16000, 64000, 480);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG722, 16000, 64000, 640);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG722, 16000, 64000, 800);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecG722, 16000, 64000, 960);
+ Run(_channelA2B);
+ _acmA->SetVAD(true, true, VADNormal);
+ RegisterSendCodec('A', codecG722, 16000, 64000, 320);
+ Run(_channelA2B);
+ _acmA->SetVAD(false, false, VADNormal);
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_PCM16
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _channelA2B->SetCodecType(1);
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecL16[] = "L16";
+ RegisterSendCodec('A', codecL16, 8000, 128000, 80);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 8000, 128000, 160);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 8000, 128000, 240);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 8000, 128000, 320);
+ Run(_channelA2B);
+ _acmA->SetVAD(true, true, VADNormal);
+ RegisterSendCodec('A', codecL16, 8000, 128000, 80);
+ Run(_channelA2B);
+ _acmA->SetVAD(false, false, VADNormal);
+ _outFileB.Close();
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ RegisterSendCodec('A', codecL16, 16000, 256000, 160);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 16000, 256000, 320);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 16000, 256000, 480);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 16000, 256000, 640);
+ Run(_channelA2B);
+ _acmA->SetVAD(true, true, VADNormal);
+ RegisterSendCodec('A', codecL16, 16000, 256000, 160);
+ Run(_channelA2B);
+ _acmA->SetVAD(false, false, VADNormal);
+ _outFileB.Close();
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ RegisterSendCodec('A', codecL16, 32000, 512000, 320);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecL16, 32000, 512000, 640);
+ Run(_channelA2B);
+ _acmA->SetVAD(true, true, VADNormal);
+ RegisterSendCodec('A', codecL16, 32000, 512000, 320);
+ Run(_channelA2B);
+ _acmA->SetVAD(false, false, VADNormal);
+ _outFileB.Close();
+#endif
+#define PCMA_AND_PCMU
+#ifdef PCMA_AND_PCMU
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _channelA2B->SetCodecType(0);
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ char codecPCMA[] = "PCMA";
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 80);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 160);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 240);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 320);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 400);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 480);
+ _acmA->SetVAD(true, true, VADNormal);
+ RegisterSendCodec('A', codecPCMA, 8000, 64000, 80);
+ Run(_channelA2B);
+ _acmA->SetVAD(false, false, VADNormal);
+ _outFileB.Close();
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+ _testCntr++;
+ OpenOutFile(_testCntr);
+ Run(_channelA2B);
+ char codecPCMU[] = "PCMU";
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 80);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 160);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 240);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 320);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 400);
+ Run(_channelA2B);
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 480);
+ _acmA->SetVAD(true, true, VADNormal);
+ RegisterSendCodec('A', codecPCMU, 8000, 64000, 80);
+ Run(_channelA2B);
+ _acmA->SetVAD(false, false, VADNormal);
+ Run(_channelA2B);
+ _outFileB.Close();
+ if(_testMode != 0) {
+ printf("=======================================================================\n");
+ } else {
+ printf(".");
+ }
+#endif
+
+ /* Print out which codecs were tested, and which were not, in the run */
+
+ if(_testMode != 0) {
+ printf("The following codecs was INCLUDED in the test:\n");
+#ifdef WEBRTC_CODEC_G722
+ printf(" G.722\n");
+#endif
+#ifdef WEBRTC_CODEC_PCM16
+ printf(" PCM16\n");
+#endif
+ printf(" G.711\n");
+
+ printf("\nTo complete the test, listen to the %d number of output files.\n", _testCntr);
+ } else {
+ printf("Done!\n");
+ }
+}
+
+// Register Codec to use in the test
+//
+// Input: side - which ACM to use, 'A' or 'B'
+// codecName - name to use when register the codec
+// samplingFreqHz - sampling frequency in Herz
+// rate - bitrate in bytes
+// packSize - packet size in samples
+// extraByte - if extra bytes needed compared to the bitrate
+// used when registering, can be an internal header
+// set to -1 if the codec is a variable rate codec
+WebRtc_Word16 TestStereo::RegisterSendCodec(char side,
+ char* codecName,
+ WebRtc_Word32 samplingFreqHz,
+ int rate,
+ int packSize)
+{
+ if(_testMode != 0) {
+ // Print out codec and settings
+ printf("codec: %s Freq: %d Rate: %d PackSize: %d", codecName, samplingFreqHz, rate, packSize);
+ }
+
+ // Store packetsize in samples, used to validate the recieved packet
+ _packSizeSamp = packSize;
+
+ // Store the expected packet size in bytes, used to validate the recieved packet
+ // Add 0.875 to always round up to a whole byte
+ _packSizeBytes = (WebRtc_UWord16)((float)(packSize*rate)/(float)(samplingFreqHz*8)+0.875);
+
+ // Set pointer to the ACM where to register the codec
+ AudioCodingModule* myACM;
+ switch(side)
+ {
+ case 'A':
+ {
+ myACM = _acmA;
+ break;
+ }
+ case 'B':
+ {
+ myACM = _acmB;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ if(myACM == NULL)
+ {
+ assert(false);
+ return -1;
+ }
+ CodecInst myCodecParam;
+
+ // Get all codec paramters before registering
+ CHECK_ERROR(AudioCodingModule::Codec(codecName, myCodecParam, samplingFreqHz));
+ myCodecParam.rate = rate;
+ myCodecParam.pacsize = packSize;
+ myCodecParam.channels = 2;
+ CHECK_ERROR(myACM->RegisterSendCodec(myCodecParam));
+
+ // initialization was succesful
+ return 0;
+}
+
+void TestStereo::Run(TestPackStereo* channel)
+{
+ AudioFrame audioFrame;
+
+ WebRtc_UWord16 SamplesIn10MsecA = _inFileA.PayloadLength10Ms();
+ WebRtc_UWord32 timestampA = 1;
+ WebRtc_Word32 outFreqHzB = _outFileB.SamplingFrequency();
+ WebRtc_UWord16 recSize;
+ WebRtc_UWord32 timeStampDiff;
+ channel->ResetPayloadSize();
+ int errorCount = 0;
+
+ // Only run 1 second for each test case
+ while((_counter<1000)&& (!_inFileA.EndOfFile()))
+ {
+ // Add 10 msec to ACM
+ _inFileA.Read10MsData(audioFrame);
+ CHECK_ERROR(_acmA->Add10MsData(audioFrame));
+
+ // Run sender side of ACM
+ CHECK_ERROR(_acmA->Process());
+
+ // Verify that the received packet size matches the settings
+ recSize = channel->GetPayloadSize();
+ if ((0<recSize) & (recSize<65535)) {
+ if ((recSize != _packSizeBytes*2) && (_packSizeBytes < 65535)) {
+ errorCount++;
+ }
+
+ // Verify that the timestamp is updated with expected length
+ timeStampDiff = channel->GetTimeStampDiff();
+ if ((_counter > 10) && (timeStampDiff != _packSizeSamp)) {
+ errorCount++;
+ }
+ }
+
+ // Run received side of ACM
+ CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
+
+ // Write output speech to file
+ _outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples*audioFrame._audioChannel);
+ }
+
+ if (errorCount)
+ {
+ printf(" - test FAILED\n");
+ }
+ else if(_testMode != 0)
+ {
+ printf(" - test PASSED\n");
+ }
+
+ // Reset _counter
+ if (_counter == 1000) {
+ _counter = 0;
+ }
+ if (_inFileA.EndOfFile()) {
+ _inFileA.Rewind();
+ }
+}
+
+void TestStereo::OpenOutFile(WebRtc_Word16 testNumber)
+{
+ char fileName[500] = "./modules/audio_coding/main/test/res_tests/teststereo_out_";
+ char cntrStr[10];
+
+ sprintf(cntrStr, "%02d.pcm", testNumber);
+ strcat(fileName, cntrStr);
+
+ _outFileB.Open(fileName, 32000, "wb");
+}
+
+void TestStereo::DisplaySendReceiveCodec()
+{
+ CodecInst myCodecParam;
+ _acmA->SendCodec(myCodecParam);
+ if(_testMode != 0) {
+ printf("%s -> ", myCodecParam.plname);
+ }
+ _acmB->ReceiveCodec(myCodecParam);
+ if(_testMode != 0) {
+ printf("%s\n", myCodecParam.plname);
+ }
+}
+
diff --git a/src/modules/audio_coding/main/test/TestStereo.h b/src/modules/audio_coding/main/test/TestStereo.h
new file mode 100644
index 0000000..07c32de
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TestStereo.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef TEST_STEREO_H
+#define TEST_STEREO_H
+
+#include "ACMTest.h"
+#include "Channel.h"
+#include "PCMFile.h"
+
+class TestPackStereo : public AudioPacketizationCallback
+{
+public:
+ TestPackStereo();
+ ~TestPackStereo();
+
+ void RegisterReceiverACM(AudioCodingModule* acm);
+
+ virtual WebRtc_Word32 SendData(const FrameType frameType,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation);
+
+ WebRtc_UWord16 GetPayloadSize();
+ WebRtc_UWord32 GetTimeStampDiff();
+ void ResetPayloadSize();
+ void SetCodecType(int codecType);
+
+
+private:
+ AudioCodingModule* _receiverACM;
+ WebRtc_Word16 _seqNo;
+ WebRtc_UWord8 _payloadData[60 * 32 * 2 * 2];
+ WebRtc_UWord32 _timeStampDiff;
+ WebRtc_UWord32 _lastInTimestamp;
+ WebRtc_UWord64 _totalBytes;
+ WebRtc_UWord16 _payloadSize;
+ WebRtc_UWord16 _noChannels;
+ int _codecType;
+};
+
+class TestStereo : public ACMTest
+{
+public:
+ TestStereo(int testMode);
+ ~TestStereo();
+
+ void Perform();
+private:
+ // The default value of '-1' indicates that the registration is based only on codec name
+ // and a sampling frequncy matching is not required. This is useful for codecs which support
+ // several sampling frequency.
+ WebRtc_Word16 RegisterSendCodec(char side,
+ char* codecName,
+ WebRtc_Word32 sampFreqHz,
+ int rate,
+ int packSize);
+
+ void Run(TestPackStereo* channel);
+ void OpenOutFile(WebRtc_Word16 testNumber);
+ void DisplaySendReceiveCodec();
+
+ WebRtc_Word32 SendData(
+ const FrameType frameType,
+ const WebRtc_UWord8 payloadType,
+ const WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation);
+
+ int _testMode;
+
+ AudioCodingModule* _acmA;
+ AudioCodingModule* _acmB;
+
+ TestPackStereo* _channelA2B;
+
+ PCMFile _inFileA;
+ PCMFile _outFileB;
+ PCMFile _inFileStereo;
+ WebRtc_Word16 _testCntr;
+ WebRtc_UWord16 _packSizeSamp;
+ WebRtc_UWord16 _packSizeBytes;
+ int _counter;
+ int _codecType;
+};
+
+
+#endif
+
diff --git a/src/modules/audio_coding/main/test/TestVADDTX.cpp b/src/modules/audio_coding/main/test/TestVADDTX.cpp
new file mode 100644
index 0000000..8186f62
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TestVADDTX.cpp
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "TestVADDTX.h"
+
+#include "common_types.h"
+#include "audio_coding_module_typedefs.h"
+#include "utility.h"
+#include "engine_configurations.h"
+#include <iostream>
+#include "trace.h"
+
+
+TestVADDTX::TestVADDTX(int testMode):
+_acmA(NULL),
+_acmB(NULL),
+_channelA2B(NULL),
+_testResults(0)
+{
+ //testMode == 1 for more extensive testing
+ //testMode == 0 for quick test (autotest)
+ _testMode = testMode;
+}
+
+using namespace std;
+TestVADDTX::~TestVADDTX()
+{
+ if(_acmA != NULL)
+ {
+ AudioCodingModule::Destroy(_acmA);
+ _acmA = NULL;
+ }
+ if(_acmB != NULL)
+ {
+ AudioCodingModule::Destroy(_acmB);
+ _acmB = NULL;
+ }
+ if(_channelA2B != NULL)
+ {
+ delete _channelA2B;
+ _channelA2B = NULL;
+ }
+}
+
+void TestVADDTX::Perform()
+{
+
+ if(_testMode == 0)
+ {
+ printf("Running VAD/DTX Test");
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1,
+ "---------- TestVADDTX ----------");
+ }
+ char fileName[] = "./modules/audio_coding/main/test/testfile32kHz.pcm";
+ _inFileA.Open(fileName, 32000, "rb");
+
+ _acmA = AudioCodingModule::Create(0);
+ _acmB = AudioCodingModule::Create(1);
+
+ _acmA->InitializeReceiver();
+ _acmB->InitializeReceiver();
+
+ WebRtc_UWord8 numEncoders = _acmA->NumberOfCodecs();
+ CodecInst myCodecParam;
+ if(_testMode != 0)
+ {
+ printf("Registering codecs at receiver... \n");
+ }
+ for(WebRtc_UWord8 n = 0; n < numEncoders; n++)
+ {
+ _acmB->Codec(n, myCodecParam);
+ if(_testMode != 0)
+ {
+ printf("%s\n", myCodecParam.plname);
+ }
+ _acmB->RegisterReceiveCodec(myCodecParam);
+ }
+
+ // Create and connect the channel
+ _channelA2B = new Channel;
+ _acmA->RegisterTransportCallback(_channelA2B);
+ _channelA2B->RegisterReceiverACM(_acmB);
+
+ _acmA->RegisterVADCallback(&_monitor);
+
+
+ WebRtc_Word16 testCntr = 1;
+ VADDTXstruct setDTX, getDTX, expectedDTX;
+ bool dtxReplaced;
+ WebRtc_Word16 testResults = 0;
+
+#ifdef WEBRTC_CODEC_ISAC
+ // Open outputfile
+ OpenOutFile(testCntr++);
+
+ // Register iSAC WB as send codec
+ char nameISAC[] = "ISAC";
+ RegisterSendCodec('A', nameISAC, 16000);
+
+ // Run the five test cased
+ runTestCases();
+
+ // Close file
+ _outFileB.Close();
+
+ // Open outputfile
+ OpenOutFile(testCntr++);
+
+ // Register iSAC SWB as send codec
+ RegisterSendCodec('A', nameISAC, 32000);
+
+ // Run the five test cased
+ runTestCases();
+
+ // Close file
+ _outFileB.Close();
+#endif
+#ifdef WEBRTC_CODEC_ILBC
+ // Open outputfile
+ OpenOutFile(testCntr++);
+
+ // Register iLBC as send codec
+ char nameILBC[] = "ilbc";
+ RegisterSendCodec('A', nameILBC);
+
+ // Run the five test cased
+ runTestCases();
+
+ // Close file
+ _outFileB.Close();
+
+#endif
+ if(_testMode) {
+ printf("Done!\n");
+ }
+
+ printf("VAD/DTX test completed with %d subtests failed\n", testResults);
+ if (testResults > 0)
+ {
+ printf("Press return\n\n", testResults);
+ getchar();
+ }
+}
+
+void TestVADDTX::runTestCases()
+{
+ if(_testMode != 0)
+ {
+ CodecInst myCodecParam;
+ _acmA->SendCodec(myCodecParam);
+ printf("%s\n", myCodecParam.plname);
+ }
+ else
+ {
+ printf(".");
+ }
+ // #1 DTX = OFF, VAD = ON, VADNormal
+ if(_testMode != 0)
+ printf("Test #1 ");
+ SetVAD(false, true, VADNormal);
+ Run();
+ _testResults += VerifyTest();
+
+ // #2 DTX = OFF, VAD = ON, VADAggr
+ if(_testMode != 0)
+ printf("Test #2 ");
+ SetVAD(false, true, VADAggr);
+ Run();
+ _testResults += VerifyTest();
+
+ // #3 DTX = ON, VAD = ON, VADLowBitrate
+ if(_testMode != 0)
+ printf("Test #3 ");
+ SetVAD(true, true, VADLowBitrate);
+ Run();
+ _testResults += VerifyTest();
+
+ // #4 DTX = ON, VAD = ON, VADVeryAggr
+ if(_testMode != 0)
+ printf("Test #4 ");
+ SetVAD(true, true, VADVeryAggr);
+ Run();
+ _testResults += VerifyTest();
+
+ // #5 DTX = ON, VAD = OFF, VADNormal
+ if(_testMode != 0)
+ printf("Test #5 ");
+ SetVAD(true, false, VADNormal);
+ Run();
+ _testResults += VerifyTest();
+
+}
+void TestVADDTX::runTestInternalDTX()
+{
+ // #6 DTX = ON, VAD = ON, VADNormal
+ if(_testMode != 0)
+ printf("Test #6 ");
+
+ SetVAD(true, true, VADNormal);
+ if(_acmA->ReplaceInternalDTXWithWebRtc(true) < 0) {
+ printf("Was not able to replace DTX since CN was not registered\n");
+ }
+ Run();
+ _testResults += VerifyTest();
+}
+
+void TestVADDTX::SetVAD(bool statusDTX, bool statusVAD, WebRtc_Word16 vadMode)
+{
+ WebRtc_Word32 status;
+ bool dtxEnabled, vadEnabled;
+ ACMVADMode vadModeSet;
+
+ status = _acmA->SetVAD(statusDTX, statusVAD, (ACMVADMode) vadMode);
+ status = _acmA->VAD(dtxEnabled, vadEnabled, vadModeSet);
+
+ if(_testMode != 0)
+ {
+ if(statusDTX != dtxEnabled)
+ {
+ printf("DTX: %s not the same as requested: %s\n",
+ dtxEnabled? "ON":"OFF", dtxEnabled? "OFF":"ON");
+ }
+ if((statusVAD == true) && (vadEnabled == false) ||
+ (statusVAD == false) && (vadEnabled == false) && (statusDTX == true))
+ {
+ printf("VAD: %s not the same as requested: %s\n",
+ vadEnabled? "ON":"OFF", vadEnabled? "OFF":"ON");
+ }
+ if(vadModeSet != vadMode)
+ {
+ printf("VAD mode: %d not the same as requested: %d\n",
+ (WebRtc_Word16)vadModeSet, (WebRtc_Word16)vadMode);
+ }
+ }
+
+ // Requested VAD/DTX settings
+ _setStruct.statusDTX = statusDTX;
+ _setStruct.statusVAD = statusVAD;
+ _setStruct.vadMode = (ACMVADMode) vadMode;
+
+ // VAD settings after setting VAD in ACM
+ _getStruct.statusDTX = dtxEnabled;
+ _getStruct.statusVAD = vadEnabled;
+ _getStruct.vadMode = vadModeSet;
+
+}
+
+VADDTXstruct TestVADDTX::GetVAD()
+{
+ VADDTXstruct retStruct;
+ WebRtc_Word32 status;
+ bool dtxEnabled, vadEnabled;
+ ACMVADMode vadModeSet;
+
+ status = _acmA->VAD(dtxEnabled, vadEnabled, vadModeSet);
+
+ retStruct.statusDTX = dtxEnabled;
+ retStruct.statusVAD = vadEnabled;
+ retStruct.vadMode = vadModeSet;
+ return retStruct;
+}
+
+WebRtc_Word16 TestVADDTX::RegisterSendCodec(char side,
+ char* codecName,
+ WebRtc_Word32 samplingFreqHz,
+ WebRtc_Word32 rateKbps)
+{
+ if(_testMode != 0)
+ {
+ printf("Registering %s for side %c\n", codecName, side);
+ }
+ cout << flush;
+ AudioCodingModule* myACM;
+ switch(side)
+ {
+ case 'A':
+ {
+ myACM = _acmA;
+ break;
+ }
+ case 'B':
+ {
+ myACM = _acmB;
+ break;
+ }
+ default:
+ return -1;
+ }
+
+ if(myACM == NULL)
+ {
+ return -1;
+ }
+
+ CodecInst myCodecParam;
+ for(WebRtc_Word16 codecCntr = 0; codecCntr < myACM->NumberOfCodecs();
+ codecCntr++)
+ {
+ CHECK_ERROR(myACM->Codec((WebRtc_UWord8)codecCntr, myCodecParam));
+ if(!STR_CASE_CMP(myCodecParam.plname, codecName))
+ {
+ if((samplingFreqHz == -1) || (myCodecParam.plfreq == samplingFreqHz))
+ {
+ if((rateKbps == -1) || (myCodecParam.rate == rateKbps))
+ {
+ break;
+ }
+ }
+ }
+ }
+
+ CHECK_ERROR(myACM->RegisterSendCodec(myCodecParam));
+
+ // initialization was succesful
+ return 0;
+}
+
+void TestVADDTX::Run()
+{
+ AudioFrame audioFrame;
+
+ WebRtc_UWord16 SamplesIn10MsecA = _inFileA.PayloadLength10Ms();
+ WebRtc_UWord32 timestampA = 1;
+ WebRtc_Word32 outFreqHzB = _outFileB.SamplingFrequency();
+
+ while(!_inFileA.EndOfFile())
+ {
+ _inFileA.Read10MsData(audioFrame);
+ audioFrame._timeStamp = timestampA;
+ timestampA += SamplesIn10MsecA;
+ CHECK_ERROR(_acmA->Add10MsData(audioFrame));
+
+ CHECK_ERROR(_acmA->Process());
+
+ CHECK_ERROR(_acmB->PlayoutData10Ms(outFreqHzB, audioFrame));
+ _outFileB.Write10MsData(audioFrame._payloadData, audioFrame._payloadDataLengthInSamples);
+ }
+#ifdef PRINT_STAT
+ _monitor.PrintStatistics(_testMode);
+#endif
+ _inFileA.Rewind();
+ _monitor.GetStatistics(_statCounter);
+ _monitor.ResetStatistics();
+}
+
+void TestVADDTX::OpenOutFile(WebRtc_Word16 testNumber)
+{
+ char fileName[500] = "./modules/audio_coding/main/test/res_tests/testVADDTX_outFile_";
+ char cntrStr[10];
+
+ if(_testMode == 0)
+ {
+ sprintf(fileName, "./modules/audio_coding/main/test/res_autotests/testVADDTX_outFile_");
+ }
+ sprintf(cntrStr, "%02d.pcm", testNumber);
+ strcat(fileName, cntrStr);
+ _outFileB.Open(fileName, 16000, "wb");
+}
+
+
+WebRtc_Word16 TestVADDTX::VerifyTest()
+{
+ // Verify empty frame result
+ WebRtc_UWord8 statusEF = 0;
+ WebRtc_UWord8 vadPattern = 0;
+ WebRtc_UWord8 emptyFramePattern[6];
+ CodecInst myCodecParam;
+ _acmA->SendCodec(myCodecParam);
+ bool dtxInUse = true;
+ bool isReplaced = false;
+ if ((STR_CASE_CMP(myCodecParam.plname,"G729") == 0) ||
+ (STR_CASE_CMP(myCodecParam.plname,"G723") == 0) ||
+ (STR_CASE_CMP(myCodecParam.plname,"AMR") == 0) ||
+ (STR_CASE_CMP(myCodecParam.plname,"AMR-wb") == 0) ||
+ (STR_CASE_CMP(myCodecParam.plname,"speex") == 0))
+ {
+ _acmA->IsInternalDTXReplacedWithWebRtc(isReplaced);
+ if (!isReplaced)
+ {
+ dtxInUse = false;
+ }
+ }
+
+ // Check for error in VAD/DTX settings
+ if (_getStruct.statusDTX != _setStruct.statusDTX){
+ // DTX status doesn't match expected
+ vadPattern |= 4;
+ }
+ if (_getStruct.statusDTX){
+ if ((!_getStruct.statusVAD && dtxInUse) || (!dtxInUse && (_getStruct.statusVAD !=_setStruct.statusVAD)))
+ {
+ // Missmatch in VAD setting
+ vadPattern |= 2;
+ }
+ } else {
+ if (_getStruct.statusVAD != _setStruct.statusVAD){
+ // VAD status doesn't match expected
+ vadPattern |= 2;
+ }
+ }
+ if (_getStruct.vadMode != _setStruct.vadMode){
+ // VAD Mode doesn't match expected
+ vadPattern |= 1;
+ }
+
+ // Set expected empty frame pattern
+ int ii;
+ for (ii = 0; ii < 6; ii++) {
+ emptyFramePattern[ii] = 0;
+ }
+ emptyFramePattern[0] = 1; // "kNoEncoding", not important to check. Codecs with packetsize != 80 samples will get this output.
+ emptyFramePattern[1] = 1; // Expect to always receive some frames labeled "kActiveNormalEncoded"
+ emptyFramePattern[2] = (((!_getStruct.statusDTX && _getStruct.statusVAD) || (!dtxInUse && _getStruct.statusDTX))); // "kPassiveNormalEncoded"
+ emptyFramePattern[3] = ((_getStruct.statusDTX && dtxInUse && (_acmA->SendFrequency() == 8000))); // "kPassiveDTXNB"
+ emptyFramePattern[4] = ((_getStruct.statusDTX && dtxInUse && (_acmA->SendFrequency() == 16000))); // "kPassiveDTXWB"
+ emptyFramePattern[5] = ((_getStruct.statusDTX && dtxInUse && (_acmA->SendFrequency() == 32000))); // "kPassiveDTXSWB"
+
+ // Check pattern 1-5 (skip 0)
+ for (int ii = 1; ii < 6; ii++)
+ {
+ if (emptyFramePattern[ii])
+ {
+ statusEF |= (_statCounter[ii] == 0);
+ }
+ else
+ {
+ statusEF |= (_statCounter[ii] > 0);
+ }
+ }
+ if ((statusEF == 0) && (vadPattern == 0))
+ {
+ if(_testMode != 0)
+ {
+ printf(" Test OK!\n");
+ }
+ return 0;
+ }
+ else
+ {
+ if (statusEF)
+ {
+ printf("\t\t\tUnexpected empty frame result!\n");
+ }
+ if (vadPattern)
+ {
+ printf("\t\t\tUnexpected SetVAD() result!\tDTX: %d\tVAD: %d\tMode: %d\n", (vadPattern >> 2) & 1, (vadPattern >> 1) & 1, vadPattern & 1);
+ }
+ return 1;
+ }
+}
+
+ActivityMonitor::ActivityMonitor()
+{
+ _counter[0] = _counter[1] = _counter[2] = _counter[3] = _counter[4] = _counter[5] = 0;
+}
+
+ActivityMonitor::~ActivityMonitor()
+{
+}
+
+WebRtc_Word32 ActivityMonitor::InFrameType(WebRtc_Word16 frameType)
+{
+ _counter[frameType]++;
+ return 0;
+}
+
+void ActivityMonitor::PrintStatistics(int testMode)
+{
+ if(testMode != 0)
+ {
+ printf("\n");
+ printf("kActiveNormalEncoded kPassiveNormalEncoded kPassiveDTXWB kPassiveDTXNB kPassiveDTXSWB kFrameEmpty\n");
+
+ printf("%19u", _counter[1]);
+ printf("%22u", _counter[2]);
+ printf("%14u", _counter[3]);
+ printf("%14u", _counter[4]);
+ printf("%14u", _counter[5]);
+ printf("%11u", _counter[0]);
+
+ printf("\n\n");
+ }
+}
+
+void ActivityMonitor::ResetStatistics()
+{
+ _counter[0] = _counter[1] = _counter[2] = _counter[3] = _counter[4] = _counter[5] = 0;
+}
+
+void ActivityMonitor::GetStatistics(WebRtc_UWord32* getCounter)
+{
+ for (int ii = 0; ii < 6; ii++)
+ {
+ getCounter[ii] = _counter[ii];
+ }
+}
diff --git a/src/modules/audio_coding/main/test/TestVADDTX.h b/src/modules/audio_coding/main/test/TestVADDTX.h
new file mode 100644
index 0000000..cf9088b
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TestVADDTX.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef TEST_VAD_DTX_H
+#define TEST_VAD_DTX_H
+
+#include "ACMTest.h"
+#include "Channel.h"
+#include "PCMFile.h"
+
+typedef struct
+{
+ bool statusDTX;
+ bool statusVAD;
+ ACMVADMode vadMode;
+} VADDTXstruct;
+
+class ActivityMonitor : public ACMVADCallback
+{
+public:
+ ActivityMonitor();
+ ~ActivityMonitor();
+ WebRtc_Word32 InFrameType(WebRtc_Word16 frameType);
+ void PrintStatistics(int testMode);
+ void ResetStatistics();
+ void GetStatistics(WebRtc_UWord32* getCounter);
+private:
+ // counting according to
+ /*enum WebRtcACMEncodingType
+ {
+ kNoEncoding,
+ kActiveNormalEncoded,
+ kPassiveNormalEncoded,
+ kPassiveDTXNB,
+ kPassiveDTXWB,
+ kPassiveDTXSWB
+ };*/
+ WebRtc_UWord32 _counter[6];
+};
+
+class TestVADDTX : public ACMTest
+{
+public:
+ TestVADDTX(int testMode);
+ ~TestVADDTX();
+
+ void Perform();
+private:
+ // Registration can be based on codec name only, codec name and sampling frequency, or
+ // codec name, sampling frequency and rate.
+ WebRtc_Word16 RegisterSendCodec(char side,
+ char* codecName,
+ WebRtc_Word32 samplingFreqHz = -1,
+ WebRtc_Word32 rateKhz = -1);
+ void Run();
+ void OpenOutFile(WebRtc_Word16 testNumber);
+ void runTestCases();
+ void runTestInternalDTX();
+ void SetVAD(bool statusDTX, bool statusVAD, WebRtc_Word16 vadMode);
+ VADDTXstruct GetVAD();
+ WebRtc_Word16 VerifyTest();//VADDTXstruct setDTX, VADDTXstruct getDTX);
+ AudioCodingModule* _acmA;
+ AudioCodingModule* _acmB;
+
+ Channel* _channelA2B;
+
+ PCMFile _inFileA;
+ PCMFile _outFileB;
+
+ ActivityMonitor _monitor;
+ WebRtc_UWord32 _statCounter[6];
+
+ int _testMode;
+ int _testResults;
+ VADDTXstruct _setStruct;
+ VADDTXstruct _getStruct;
+};
+
+
+#endif
diff --git a/src/modules/audio_coding/main/test/Tester.cpp b/src/modules/audio_coding/main/test/Tester.cpp
new file mode 100644
index 0000000..1049cad
--- /dev/null
+++ b/src/modules/audio_coding/main/test/Tester.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <vector>
+
+#include "audio_coding_module.h"
+#include "trace.h"
+
+#include "APITest.h"
+#include "EncodeDecodeTest.h"
+#include "EncodeToFileTest.h"
+#include "iSACTest.h"
+#include "SpatialAudio.h"
+#include "TestAllCodecs.h"
+#include "TestFEC.h"
+#include "TestStereo.h"
+#include "TestVADDTX.h"
+#include "TwoWayCommunication.h"
+
+// Be sure to create the following directories before running the tests:
+// ./modules/audio_coding/main/test/res_tests
+// ./modules/audio_coding/main/test/res_autotests
+
+// Choose what tests to run by defining one or more of the following:
+#define ACM_AUTO_TEST // Most common codecs and settings will be tested
+//#define ACM_TEST_ENC_DEC // You decide what to test in run time.
+ // Used for debugging and for testing while implementing.
+//#define ACM_TEST_TWO_WAY // Debugging
+//#define ACM_TEST_ALL_ENC_DEC // Loop through all defined codecs and settings
+//#define ACM_TEST_STEREO // Run stereo and spatial audio tests
+//#define ACM_TEST_VAD_DTX // Run all VAD/DTX tests
+//#define ACM_TEST_FEC // Test FEC (also called RED)
+//#define ACM_TEST_CODEC_SPEC_API // Only iSAC has codec specfic APIs in this version
+//#define ACM_TEST_FULL_API // Test all APIs with threads (long test)
+
+
+void PopulateTests(std::vector<ACMTest*>* tests)
+{
+
+ Trace::CreateTrace();
+ Trace::SetTraceFile("./modules/audio_coding/main/test/res_tests/test_trace.txt");
+
+ printf("The following tests will be executed:\n");
+#ifdef ACM_AUTO_TEST
+ printf(" ACM auto test\n");
+ tests->push_back(new EncodeDecodeTest(0));
+ tests->push_back(new TwoWayCommunication(0));
+ tests->push_back(new TestAllCodecs(0));
+ tests->push_back(new TestStereo(0));
+ tests->push_back(new SpatialAudio(0));
+ tests->push_back(new TestVADDTX(0));
+ tests->push_back(new TestFEC(0));
+ tests->push_back(new ISACTest(0));
+#endif
+#ifdef ACM_TEST_ENC_DEC
+ printf(" ACM encode-decode test\n");
+ tests->push_back(new EncodeDecodeTest(2));
+#endif
+#ifdef ACM_TEST_TWO_WAY
+ printf(" ACM two-way communication test\n");
+ tests->push_back(new TwoWayCommunication(1));
+#endif
+#ifdef ACM_TEST_ALL_ENC_DEC
+ printf(" ACM all codecs test\n");
+ tests->push_back(new TestAllCodecs(1));
+#endif
+#ifdef ACM_TEST_STEREO
+ printf(" ACM stereo test\n");
+ tests->push_back(new TestStereo(1));
+ tests->push_back(new SpatialAudio(2));
+#endif
+#ifdef ACM_TEST_VAD_DTX
+ printf(" ACM VAD-DTX test\n");
+ tests->push_back(new TestVADDTX(1));
+#endif
+#ifdef ACM_TEST_FEC
+ printf(" ACM FEC test\n");
+ tests->push_back(new TestFEC(1));
+#endif
+#ifdef ACM_TEST_CODEC_SPEC_API
+ printf(" ACM codec API test\n");
+ tests->push_back(new ISACTest(1));
+#endif
+#ifdef ACM_TEST_FULL_API
+ printf(" ACM full API test\n");
+ tests->push_back(new APITest());
+#endif
+ printf("\n");
+}
+
+int main()
+{
+ std::vector<ACMTest*> tests;
+ PopulateTests(&tests);
+ std::vector<ACMTest*>::iterator it;
+ WebRtc_Word8 version[5000];
+ version[0] = '\0';
+
+ WebRtc_UWord32 remainingBufferInByte = 4999;
+ WebRtc_UWord32 position = 0;
+ AudioCodingModule::GetVersion(version, remainingBufferInByte, position);
+
+ printf("%s\n", version);
+ for (it=tests.begin() ; it < tests.end(); it++)
+ {
+ try {
+
+ (*it)->Perform();
+ }
+ catch (char *except)
+ {
+ printf("Test failed with message: %s", except);
+ getchar();
+ return -1;
+ }
+ delete (*it);
+ }
+
+ Trace::ReturnTrace();
+ printf("ACM test completed\n");
+
+ return 0;
+}
diff --git a/src/modules/audio_coding/main/test/TimedTrace.cpp b/src/modules/audio_coding/main/test/TimedTrace.cpp
new file mode 100644
index 0000000..6bf301f
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TimedTrace.cpp
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "TimedTrace.h"
+#include <math.h>
+
+double TimedTrace::_timeEllapsedSec = 0;
+FILE* TimedTrace::_timedTraceFile = NULL;
+
+TimedTrace::TimedTrace()
+{
+
+}
+
+TimedTrace::~TimedTrace()
+{
+ if(_timedTraceFile != NULL)
+ {
+ fclose(_timedTraceFile);
+ }
+ _timedTraceFile = NULL;
+}
+
+WebRtc_Word16
+TimedTrace::SetUp(char* fileName)
+{
+ if(_timedTraceFile == NULL)
+ {
+ _timedTraceFile = fopen(fileName, "w");
+ }
+ if(_timedTraceFile == NULL)
+ {
+ return -1;
+ }
+ return 0;
+}
+
+void
+TimedTrace::SetTimeEllapsed(double timeEllapsedSec)
+{
+ _timeEllapsedSec = timeEllapsedSec;
+}
+
+double
+TimedTrace::TimeEllapsed()
+{
+ return _timeEllapsedSec;
+}
+
+void
+TimedTrace::Tick10Msec()
+{
+ _timeEllapsedSec += 0.010;
+}
+
+void
+TimedTrace::TimedLogg(char* message)
+{
+ unsigned int minutes = (WebRtc_UWord32)floor(_timeEllapsedSec / 60.0);
+ double seconds = _timeEllapsedSec - minutes * 60;
+ //char myFormat[100] = "%8.2f, %3u:%05.2f: %s\n";
+ if(_timedTraceFile != NULL)
+ {
+ fprintf(_timedTraceFile, "%8.2f, %3u:%05.2f: %s\n",
+ _timeEllapsedSec,
+ minutes,
+ seconds,
+ message);
+ }
+}
diff --git a/src/modules/audio_coding/main/test/TimedTrace.h b/src/modules/audio_coding/main/test/TimedTrace.h
new file mode 100644
index 0000000..d37d287
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TimedTrace.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef TIMED_TRACE_H
+#define TIMED_TRACE_H
+
+#include "typedefs.h"
+
+#include <cstdio>
+#include <cstdlib>
+
+
+class TimedTrace
+{
+public:
+ TimedTrace();
+ ~TimedTrace();
+
+ void SetTimeEllapsed(double myTime);
+ double TimeEllapsed();
+ void Tick10Msec();
+ WebRtc_Word16 SetUp(char* fileName);
+ void TimedLogg(char* message);
+
+private:
+ static double _timeEllapsedSec;
+ static FILE* _timedTraceFile;
+
+};
+
+#endif
diff --git a/src/modules/audio_coding/main/test/TwoWayCommunication.cpp b/src/modules/audio_coding/main/test/TwoWayCommunication.cpp
new file mode 100644
index 0000000..21d4012
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TwoWayCommunication.cpp
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cctype>
+#include <stdio.h>
+#include <string.h>
+
+#ifdef WIN32
+#include <Windows.h>
+#endif
+
+#include "TwoWayCommunication.h"
+#include "engine_configurations.h"
+#include "PCMFile.h"
+#include "utility.h"
+#include "trace.h"
+#include "common_types.h"
+
+using namespace webrtc;
+
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+
+TwoWayCommunication::TwoWayCommunication(int testMode)
+{
+ _testMode = testMode;
+}
+
+TwoWayCommunication::~TwoWayCommunication()
+{
+ AudioCodingModule::Destroy(_acmA);
+ AudioCodingModule::Destroy(_acmB);
+
+ AudioCodingModule::Destroy(_acmRefA);
+ AudioCodingModule::Destroy(_acmRefB);
+
+ delete _channel_A2B;
+ delete _channel_B2A;
+
+ delete _channelRef_A2B;
+ delete _channelRef_B2A;
+#ifdef WEBRTC_DTMF_DETECTION
+ if(_dtmfDetectorA != NULL)
+ {
+ delete _dtmfDetectorA;
+ }
+ if(_dtmfDetectorB != NULL)
+ {
+ delete _dtmfDetectorB;
+ }
+#endif
+ _inFileA.Close();
+ _inFileB.Close();
+ _outFileA.Close();
+ _outFileB.Close();
+ _outFileRefA.Close();
+ _outFileRefB.Close();
+}
+
+
+WebRtc_UWord8
+TwoWayCommunication::ChooseCodec(WebRtc_UWord8* codecID_A, WebRtc_UWord8* codecID_B)
+{
+ AudioCodingModule* tmpACM = AudioCodingModule::Create(0);
+ WebRtc_UWord8 noCodec = tmpACM->NumberOfCodecs();
+ CodecInst codecInst;
+ printf("List of Supported Codecs\n");
+ printf("========================\n");
+ for(WebRtc_UWord8 codecCntr = 0; codecCntr < noCodec; codecCntr++)
+ {
+ tmpACM->Codec(codecCntr, codecInst);
+ printf("%d- %s\n", codecCntr, codecInst.plname);
+ }
+ printf("\nChoose a send codec for side A [0]: ");
+ char myStr[15] = "";
+ fgets(myStr, 10, stdin);
+ *codecID_A = (WebRtc_UWord8)atoi(myStr);
+
+ printf("\nChoose a send codec for side B [0]: ");
+ fgets(myStr, 10, stdin);
+ *codecID_B = (WebRtc_UWord8)atoi(myStr);
+
+ AudioCodingModule::Destroy(tmpACM);
+ printf("\n");
+ return 0;
+}
+
+WebRtc_Word16
+TwoWayCommunication::ChooseFile(char* fileName, WebRtc_Word16 maxLen, WebRtc_UWord16* frequencyHz)
+{
+ WebRtc_Word8 tmpName[MAX_FILE_NAME_LENGTH_BYTE];
+ //strcpy(_fileName, "in.pcm");
+ //printf("\n\nPlease enter the input file: ");
+ fgets(tmpName, MAX_FILE_NAME_LENGTH_BYTE, stdin);
+ tmpName[MAX_FILE_NAME_LENGTH_BYTE-1] = '\0';
+ WebRtc_Word16 n = 0;
+
+ // removing leading spaces
+ while((isspace(tmpName[n]) || iscntrl(tmpName[n])) &&
+ (tmpName[n] != 0) &&
+ (n < MAX_FILE_NAME_LENGTH_BYTE))
+ {
+ n++;
+ }
+ if(n > 0)
+ {
+ memmove(tmpName, &tmpName[n], MAX_FILE_NAME_LENGTH_BYTE - n);
+ }
+
+ //removing trailing spaces
+ n = (WebRtc_Word16)(strlen(tmpName) - 1);
+ if(n >= 0)
+ {
+ while((isspace(tmpName[n]) || iscntrl(tmpName[n])) &&
+ (n >= 0))
+ {
+ n--;
+ }
+ }
+ if(n >= 0)
+ {
+ tmpName[n + 1] = '\0';
+ }
+
+ WebRtc_Word16 len = (WebRtc_Word16)strlen(tmpName);
+ if(len > maxLen)
+ {
+ return -1;
+ }
+ if(len > 0)
+ {
+ strncpy(fileName, tmpName, len+1);
+ }
+ printf("Enter the sampling frequency (in Hz) of the above file [%u]: ", *frequencyHz);
+ fgets(tmpName, 6, stdin);
+ WebRtc_UWord16 tmpFreq = (WebRtc_UWord16)atoi(tmpName);
+ if(tmpFreq > 0)
+ {
+ *frequencyHz = tmpFreq;
+ }
+ return 0;
+}
+
+WebRtc_Word16 TwoWayCommunication::SetUp()
+{
+ _acmA = AudioCodingModule::Create(1);
+ _acmB = AudioCodingModule::Create(2);
+
+ _acmRefA = AudioCodingModule::Create(3);
+ _acmRefB = AudioCodingModule::Create(4);
+
+ WebRtc_UWord8 codecID_A;
+ WebRtc_UWord8 codecID_B;
+
+ ChooseCodec(&codecID_A, &codecID_B);
+ CodecInst codecInst_A;
+ CodecInst codecInst_B;
+ CodecInst dummyCodec;
+ _acmA->Codec(codecID_A, codecInst_A);
+ _acmB->Codec(codecID_B, codecInst_B);
+
+ _acmA->Codec(6, dummyCodec);
+
+ //--- Set A codecs
+ CHECK_ERROR(_acmA->RegisterSendCodec(codecInst_A));
+ CHECK_ERROR(_acmA->RegisterReceiveCodec(codecInst_B));
+#ifdef WEBRTC_DTMF_DETECTION
+ _dtmfDetectorA = new(DTMFDetector);
+ CHECK_ERROR(_acmA->RegisterIncomingMessagesCallback(_dtmfDetectorA, ACMUSA));
+#endif
+ //--- Set ref-A codecs
+ CHECK_ERROR(_acmRefA->RegisterSendCodec(codecInst_A));
+ CHECK_ERROR(_acmRefA->RegisterReceiveCodec(codecInst_B));
+
+ //--- Set B codecs
+ CHECK_ERROR(_acmB->RegisterSendCodec(codecInst_B));
+ CHECK_ERROR(_acmB->RegisterReceiveCodec(codecInst_A));
+#ifdef WEBRTC_DTMF_DETECTION
+ _dtmfDetectorB = new(DTMFDetector);
+ CHECK_ERROR(_acmB->RegisterIncomingMessagesCallback(_dtmfDetectorB, ACMUSA));
+#endif
+
+ //--- Set ref-B codecs
+ CHECK_ERROR(_acmRefB->RegisterSendCodec(codecInst_B));
+ CHECK_ERROR(_acmRefB->RegisterReceiveCodec(codecInst_A));
+
+ char fileName[500];
+ char refFileName[500];
+ WebRtc_UWord16 frequencyHz;
+
+ //--- Input A
+ strcpy(fileName, "./modules/audio_coding/main/test/testfile32kHz.pcm");
+ frequencyHz = 32000;
+ printf("Enter input file at side A [%s]: ", fileName);
+ ChooseFile(fileName, 499, &frequencyHz);
+
+
+ _inFileA.Open(fileName, frequencyHz, "rb");
+
+ //--- Output A
+ strcpy(fileName, "outA.pcm");
+ frequencyHz = 16000;
+ printf("Enter output file at side A [%s]: ", fileName);
+ ChooseFile(fileName, 499, &frequencyHz);
+ _outFileA.Open(fileName, frequencyHz, "wb");
+ strcpy(refFileName, "ref_");
+ strcat(refFileName, fileName);
+ _outFileRefA.Open(refFileName, frequencyHz, "wb");
+
+ //--- Input B
+ strcpy(fileName, "./modules/audio_coding/main/test/testfile32kHz.pcm");
+ frequencyHz = 32000;
+ printf("\n\nEnter input file at side B [%s]: ", fileName);
+ ChooseFile(fileName, 499, &frequencyHz);
+ _inFileB.Open(fileName, frequencyHz, "rb");
+
+ //--- Output B
+ strcpy(fileName, "outB.pcm");
+ frequencyHz = 16000;
+ printf("Enter output file at side B [%s]: ", fileName);
+ ChooseFile(fileName, 499, &frequencyHz);
+ _outFileB.Open(fileName, frequencyHz, "wb");
+ strcpy(refFileName, "ref_");
+ strcat(refFileName, fileName);
+ _outFileRefB.Open(refFileName, frequencyHz, "wb");
+
+ //--- Set A-to-B channel
+ _channel_A2B = new Channel;
+ _acmA->RegisterTransportCallback(_channel_A2B);
+ _channel_A2B->RegisterReceiverACM(_acmB);
+ //--- Do the same for the reference
+ _channelRef_A2B = new Channel;
+ _acmRefA->RegisterTransportCallback(_channelRef_A2B);
+ _channelRef_A2B->RegisterReceiverACM(_acmRefB);
+
+ //--- Set B-to-A channel
+ _channel_B2A = new Channel;
+ _acmB->RegisterTransportCallback(_channel_B2A);
+ _channel_B2A->RegisterReceiverACM(_acmA);
+ //--- Do the same for reference
+ _channelRef_B2A = new Channel;
+ _acmRefB->RegisterTransportCallback(_channelRef_B2A);
+ _channelRef_B2A->RegisterReceiverACM(_acmRefA);
+
+ // The clicks will be more obvious when we
+ // are in FAX mode.
+ _acmB->SetPlayoutMode(fax);
+ _acmRefB->SetPlayoutMode(fax);
+
+ return 0;
+}
+
+WebRtc_Word16 TwoWayCommunication::SetUpAutotest()
+{
+ _acmA = AudioCodingModule::Create(1);
+ _acmB = AudioCodingModule::Create(2);
+
+ _acmRefA = AudioCodingModule::Create(3);
+ _acmRefB = AudioCodingModule::Create(4);
+
+ CodecInst codecInst_A;
+ CodecInst codecInst_B;
+ CodecInst dummyCodec;
+
+ _acmA->Codec("ISAC", codecInst_A, 16000);
+ _acmB->Codec("L16", codecInst_B, 8000);
+ _acmA->Codec(6, dummyCodec);
+
+ //--- Set A codecs
+ CHECK_ERROR(_acmA->RegisterSendCodec(codecInst_A));
+ CHECK_ERROR(_acmA->RegisterReceiveCodec(codecInst_B));
+#ifdef WEBRTC_DTMF_DETECTION
+ _dtmfDetectorA = new(DTMFDetector);
+ CHECK_ERROR(_acmA->RegisterIncomingMessagesCallback(_dtmfDetectorA, ACMUSA));
+#endif
+
+ //--- Set ref-A codecs
+ CHECK_ERROR(_acmRefA->RegisterSendCodec(codecInst_A));
+ CHECK_ERROR(_acmRefA->RegisterReceiveCodec(codecInst_B));
+
+ //--- Set B codecs
+ CHECK_ERROR(_acmB->RegisterSendCodec(codecInst_B));
+ CHECK_ERROR(_acmB->RegisterReceiveCodec(codecInst_A));
+#ifdef WEBRTC_DTMF_DETECTION
+ _dtmfDetectorB = new(DTMFDetector);
+ CHECK_ERROR(_acmB->RegisterIncomingMessagesCallback(_dtmfDetectorB, ACMUSA));
+#endif
+
+ //--- Set ref-B codecs
+ CHECK_ERROR(_acmRefB->RegisterSendCodec(codecInst_B));
+ CHECK_ERROR(_acmRefB->RegisterReceiveCodec(codecInst_A));
+
+ char fileName[500];
+ char refFileName[500];
+ WebRtc_UWord16 frequencyHz;
+
+
+ //--- Input A
+ strcpy(fileName, "./modules/audio_coding/main/test/testfile32kHz.pcm");
+ frequencyHz = 16000;
+ _inFileA.Open(fileName, frequencyHz, "rb");
+
+ //--- Output A
+ strcpy(fileName, "./modules/audio_coding/main/test/res_autotests/outAutotestA.pcm");
+ frequencyHz = 16000;
+ _outFileA.Open(fileName, frequencyHz, "wb");
+ strcpy(refFileName, "./modules/audio_coding/main/test/res_autotests/ref_outAutotestA.pcm");
+ _outFileRefA.Open(refFileName, frequencyHz, "wb");
+
+ //--- Input B
+ strcpy(fileName, "./modules/audio_coding/main/test/testfile32kHz.pcm");
+ frequencyHz = 16000;
+ _inFileB.Open(fileName, frequencyHz, "rb");
+
+ //--- Output B
+ strcpy(fileName, "./modules/audio_coding/main/test/res_autotests/outAutotestB.pcm");
+ frequencyHz = 16000;
+ _outFileB.Open(fileName, frequencyHz, "wb");
+ strcpy(refFileName, "./modules/audio_coding/main/test/res_autotests/ref_outAutotestB.pcm");
+ _outFileRefB.Open(refFileName, frequencyHz, "wb");
+
+ //--- Set A-to-B channel
+ _channel_A2B = new Channel;
+ _acmA->RegisterTransportCallback(_channel_A2B);
+ _channel_A2B->RegisterReceiverACM(_acmB);
+ //--- Do the same for the reference
+ _channelRef_A2B = new Channel;
+ _acmRefA->RegisterTransportCallback(_channelRef_A2B);
+ _channelRef_A2B->RegisterReceiverACM(_acmRefB);
+
+ //--- Set B-to-A channel
+ _channel_B2A = new Channel;
+ _acmB->RegisterTransportCallback(_channel_B2A);
+ _channel_B2A->RegisterReceiverACM(_acmA);
+ //--- Do the same for reference
+ _channelRef_B2A = new Channel;
+ _acmRefB->RegisterTransportCallback(_channelRef_B2A);
+ _channelRef_B2A->RegisterReceiverACM(_acmRefA);
+
+ // The clicks will be more obvious when we
+ // are in FAX mode.
+ _acmB->SetPlayoutMode(fax);
+ _acmRefB->SetPlayoutMode(fax);
+
+ return 0;
+}
+
+void
+TwoWayCommunication::Perform()
+{
+ if(_testMode == 0)
+ {
+ printf("Running TwoWayCommunication Test");
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1, "---------- TwoWayCommunication ----------");
+ SetUpAutotest();
+ }
+ else
+ {
+ SetUp();
+ }
+ unsigned int msecPassed = 0;
+ unsigned int secPassed = 0;
+
+ WebRtc_Word32 outFreqHzA = _outFileA.SamplingFrequency();
+ WebRtc_Word32 outFreqHzB = _outFileB.SamplingFrequency();
+
+ AudioFrame audioFrame;
+
+ CodecInst codecInst_B;
+ CodecInst dummy;
+
+ _acmB->SendCodec(codecInst_B);
+
+ if(_testMode != 0)
+ {
+ printf("\n");
+ printf("sec:msec A B\n");
+ printf("-------- ----- -----\n");
+ }
+
+ while(!_inFileA.EndOfFile() && !_inFileB.EndOfFile())
+ {
+ _inFileA.Read10MsData(audioFrame);
+ _acmA->Add10MsData(audioFrame);
+ _acmRefA->Add10MsData(audioFrame);
+
+ _inFileB.Read10MsData(audioFrame);
+ _acmB->Add10MsData(audioFrame);
+ _acmRefB->Add10MsData(audioFrame);
+
+
+ _acmA->Process();
+ _acmB->Process();
+ _acmRefA->Process();
+ _acmRefB->Process();
+
+ _acmA->PlayoutData10Ms(outFreqHzA, audioFrame);
+ _outFileA.Write10MsData(audioFrame);
+
+ _acmRefA->PlayoutData10Ms(outFreqHzA, audioFrame);
+ _outFileRefA.Write10MsData(audioFrame);
+
+ _acmB->PlayoutData10Ms(outFreqHzB, audioFrame);
+ _outFileB.Write10MsData(audioFrame);
+
+ _acmRefB->PlayoutData10Ms(outFreqHzB, audioFrame);
+ _outFileRefB.Write10MsData(audioFrame);
+
+ msecPassed += 10;
+ if(msecPassed >= 1000)
+ {
+ msecPassed = 0;
+ secPassed++;
+ }
+ if(((secPassed%5) == 4) && (msecPassed == 0))
+ {
+ if(_testMode != 0)
+ {
+ printf("%3u:%3u ", secPassed, msecPassed);
+ }
+ _acmA->ResetEncoder();
+ if(_testMode == 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1, "---------- Errors epected");
+ printf(".");
+ }
+ else
+ {
+ printf("Reset Encoder (click in side B) ");
+ printf("Initialize Sender (no audio in side A)\n");
+ }
+ CHECK_ERROR(_acmB->InitializeSender());
+ }
+ if(((secPassed%5) == 4) && (msecPassed >= 990))
+ {
+ if(_testMode == 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1, "----- END: Errors epected");
+ printf(".");
+ }
+ else
+ {
+ printf("%3u:%3u ", secPassed, msecPassed);
+ printf(" ");
+ printf("Register Send Codec (audio back in side A)\n");
+ }
+ CHECK_ERROR(_acmB->RegisterSendCodec(codecInst_B));
+ CHECK_ERROR(_acmB->SendCodec(dummy));
+ }
+ if(((secPassed%7) == 6) && (msecPassed == 0))
+ {
+ CHECK_ERROR(_acmB->ResetDecoder());
+ if(_testMode == 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1, "---------- Errors epected");
+ printf(".");
+ }
+ else
+ {
+ printf("%3u:%3u ", secPassed, msecPassed);
+ printf("Initialize Receiver (no audio in side A) ");
+ printf("Reset Decoder\n");
+ }
+ CHECK_ERROR(_acmA->InitializeReceiver());
+ }
+ if(((secPassed%7) == 6) && (msecPassed >= 990))
+ {
+ if(_testMode == 0)
+ {
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1, "----- END: Errors epected");
+ printf(".");
+ }
+ else
+ {
+ printf("%3u:%3u ", secPassed, msecPassed);
+ printf("Register Receive Coded (audio back in side A)\n");
+ }
+ CHECK_ERROR(_acmA->RegisterReceiveCodec(codecInst_B));
+ }
+ //Sleep(9);
+ }
+ if(_testMode == 0)
+ {
+ printf("Done!\n");
+ }
+
+#ifdef WEBRTC_DTMF_DETECTION
+ printf("\nDTMF at Side A\n");
+ _dtmfDetectorA->PrintDetectedDigits();
+
+ printf("\nDTMF at Side B\n");
+ _dtmfDetectorB->PrintDetectedDigits();
+#endif
+
+
+}
+
diff --git a/src/modules/audio_coding/main/test/TwoWayCommunication.h b/src/modules/audio_coding/main/test/TwoWayCommunication.h
new file mode 100644
index 0000000..66ede04
--- /dev/null
+++ b/src/modules/audio_coding/main/test/TwoWayCommunication.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef TWO_WAY_COMMUNICATION_H
+#define TWO_WAY_COMMUNICATION_H
+
+#include "ACMTest.h"
+#include "Channel.h"
+#include "PCMFile.h"
+#include "audio_coding_module.h"
+#include "utility.h"
+
+
+class TwoWayCommunication : public ACMTest
+{
+public:
+ TwoWayCommunication(int testMode = 1);
+ ~TwoWayCommunication();
+
+ void Perform();
+private:
+ WebRtc_UWord8 ChooseCodec(WebRtc_UWord8* codecID_A, WebRtc_UWord8* codecID_B);
+ WebRtc_Word16 ChooseFile(char* fileName, WebRtc_Word16 maxLen, WebRtc_UWord16* frequencyHz);
+ WebRtc_Word16 SetUp();
+ WebRtc_Word16 SetUpAutotest();
+
+ AudioCodingModule* _acmA;
+ AudioCodingModule* _acmB;
+
+ AudioCodingModule* _acmRefA;
+ AudioCodingModule* _acmRefB;
+
+ Channel* _channel_A2B;
+ Channel* _channel_B2A;
+
+ Channel* _channelRef_A2B;
+ Channel* _channelRef_B2A;
+
+ PCMFile _inFileA;
+ PCMFile _inFileB;
+
+ PCMFile _outFileA;
+ PCMFile _outFileB;
+
+ PCMFile _outFileRefA;
+ PCMFile _outFileRefB;
+
+ DTMFDetector* _dtmfDetectorA;
+ DTMFDetector* _dtmfDetectorB;
+
+ int _testMode;
+};
+
+
+#endif
diff --git a/src/modules/audio_coding/main/test/iSACTest.cpp b/src/modules/audio_coding/main/test/iSACTest.cpp
new file mode 100644
index 0000000..b1c0119
--- /dev/null
+++ b/src/modules/audio_coding/main/test/iSACTest.cpp
@@ -0,0 +1,597 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cctype>
+#include <stdio.h>
+#include <string.h>
+
+#if _WIN32
+#include <windows.h>
+#elif WEBRTC_LINUX
+#include <ctime>
+#else
+#include <sys/time.h>
+#include <time.h>
+#endif
+
+#include "event_wrapper.h"
+#include "iSACTest.h"
+#include "utility.h"
+#include "trace.h"
+
+#include "tick_util.h"
+
+
+void SetISACConfigDefault(
+ ACMTestISACConfig& isacConfig)
+{
+ isacConfig.currentRateBitPerSec = 0;
+ isacConfig.currentFrameSizeMsec = 0;
+ isacConfig.maxRateBitPerSec = 0;
+ isacConfig.maxPayloadSizeByte = 0;
+ isacConfig.encodingMode = -1;
+ isacConfig.initRateBitPerSec = 0;
+ isacConfig.initFrameSizeInMsec = 0;
+ isacConfig.enforceFrameSize = false;
+ return;
+}
+
+
+WebRtc_Word16 SetISAConfig(
+ ACMTestISACConfig& isacConfig,
+ AudioCodingModule* acm,
+ int testMode)
+{
+
+ if((isacConfig.currentRateBitPerSec != 0) ||
+ (isacConfig.currentFrameSizeMsec != 0))
+ {
+ CodecInst sendCodec;
+ acm->SendCodec(sendCodec);
+ if(isacConfig.currentRateBitPerSec < 0)
+ {
+ sendCodec.rate = -1;
+ CHECK_ERROR(acm->RegisterSendCodec(sendCodec));
+ if(testMode != 0)
+ {
+ printf("ISAC-%s Registered in adaptive (channel-dependent) mode.\n",
+ (sendCodec.plfreq == 32000)? "swb":"wb");
+ }
+ }
+ else
+ {
+
+ if(isacConfig.currentRateBitPerSec != 0)
+ {
+ sendCodec.rate = isacConfig.currentRateBitPerSec;
+ }
+ if(isacConfig.currentFrameSizeMsec != 0)
+ {
+ sendCodec.pacsize = isacConfig.currentFrameSizeMsec *
+ (sendCodec.plfreq / 1000);
+ }
+ CHECK_ERROR(acm->RegisterSendCodec(sendCodec));
+ if(testMode != 0)
+ {
+ printf("Target rate is set to %d bit/sec with frame-size %d ms \n",
+ (int)isacConfig.currentRateBitPerSec,
+ (int)sendCodec.pacsize / (sendCodec.plfreq / 1000));
+ }
+ }
+ }
+
+ if(isacConfig.maxRateBitPerSec > 0)
+ {
+ CHECK_ERROR(acm->SetISACMaxRate(isacConfig.maxRateBitPerSec));
+ if(testMode != 0)
+ {
+ printf("Max rate is set to %u bit/sec\n",
+ isacConfig.maxRateBitPerSec);
+ }
+ }
+ if(isacConfig.maxPayloadSizeByte > 0)
+ {
+ CHECK_ERROR(acm->SetISACMaxPayloadSize(isacConfig.maxPayloadSizeByte));
+ if(testMode != 0)
+ {
+ printf("Max payload-size is set to %u bit/sec\n",
+ isacConfig.maxPayloadSizeByte);
+ }
+ }
+ if((isacConfig.initFrameSizeInMsec != 0) ||
+ (isacConfig.initRateBitPerSec != 0))
+ {
+ CHECK_ERROR(acm->ConfigISACBandwidthEstimator(
+ (WebRtc_UWord8)isacConfig.initFrameSizeInMsec,
+ (WebRtc_UWord16)isacConfig.initRateBitPerSec,
+ isacConfig.enforceFrameSize));
+ if((isacConfig.initFrameSizeInMsec != 0) && (testMode != 0))
+ {
+ printf("Initialize BWE to %d msec frame-size\n",
+ isacConfig.initFrameSizeInMsec);
+ }
+ if((isacConfig.initRateBitPerSec != 0) && (testMode != 0))
+ {
+ printf("Initialize BWE to %u bit/sec send-bandwidth\n",
+ isacConfig.initRateBitPerSec);
+ }
+ }
+
+ return 0;
+}
+
+
+ISACTest::ISACTest(int testMode)
+{
+ _testMode = testMode;
+}
+
+ISACTest::~ISACTest()
+{
+ AudioCodingModule::Destroy(_acmA);
+ AudioCodingModule::Destroy(_acmB);
+
+ delete _channel_A2B;
+ delete _channel_B2A;
+}
+
+
+WebRtc_Word16
+ISACTest::Setup()
+{
+ int codecCntr;
+ CodecInst codecParam;
+
+ _acmA = AudioCodingModule::Create(1);
+ _acmB = AudioCodingModule::Create(2);
+
+ for(codecCntr = 0; codecCntr < AudioCodingModule::NumberOfCodecs(); codecCntr++)
+ {
+ AudioCodingModule::Codec(codecCntr, codecParam);
+ if(!STR_CASE_CMP(codecParam.plname, "ISAC") && codecParam.plfreq == 16000)
+ {
+ memcpy(&_paramISAC16kHz, &codecParam, sizeof(CodecInst));
+ _idISAC16kHz = codecCntr;
+ }
+ if(!STR_CASE_CMP(codecParam.plname, "ISAC") && codecParam.plfreq == 32000)
+ {
+ memcpy(&_paramISAC32kHz, &codecParam, sizeof(CodecInst));
+ _idISAC32kHz = codecCntr;
+ }
+ }
+
+ // register both iSAC-wb & iSAC-swb in both sides as receiver codecs
+ CHECK_ERROR(_acmA->RegisterReceiveCodec(_paramISAC16kHz));
+ CHECK_ERROR(_acmA->RegisterReceiveCodec(_paramISAC32kHz));
+ CHECK_ERROR(_acmB->RegisterReceiveCodec(_paramISAC16kHz));
+ CHECK_ERROR(_acmB->RegisterReceiveCodec(_paramISAC32kHz));
+
+ //--- Set A-to-B channel
+ _channel_A2B = new Channel;
+ CHECK_ERROR(_acmA->RegisterTransportCallback(_channel_A2B));
+ _channel_A2B->RegisterReceiverACM(_acmB);
+
+ //--- Set B-to-A channel
+ _channel_B2A = new Channel;
+ CHECK_ERROR(_acmB->RegisterTransportCallback(_channel_B2A));
+ _channel_B2A->RegisterReceiverACM(_acmA);
+
+ strncpy(_fileNameSWB, "./modules/audio_coding/main/test/testfile32kHz.pcm",
+ MAX_FILE_NAME_LENGTH_BYTE);
+
+ _acmB->RegisterSendCodec(_paramISAC16kHz);
+ _acmA->RegisterSendCodec(_paramISAC32kHz);
+
+ if(_testMode != 0)
+ {
+ printf("Side A Send Codec\n");
+ printf("%s %d\n", _paramISAC32kHz.plname, _paramISAC32kHz.plfreq);
+
+ printf("Side B Send Codec\n");
+ printf("%s %d\n", _paramISAC16kHz.plname, _paramISAC16kHz.plfreq);
+ }
+
+ _inFileA.Open(_fileNameSWB, 32000, "rb");
+ if(_testMode == 0)
+ {
+ char fileNameA[] = "./modules/audio_coding/main/test/res_autotests/testisac_a.pcm";
+ char fileNameB[] = "./modules/audio_coding/main/test/res_autotests/testisac_b.pcm";
+ _outFileA.Open(fileNameA, 32000, "wb");
+ _outFileB.Open(fileNameB, 32000, "wb");
+ }
+ else
+ {
+ char fileNameA[] = "./modules/audio_coding/main/test/res_tests/testisac_a.pcm";
+ char fileNameB[] = "./modules/audio_coding/main/test/res_tests/testisac_b.pcm";
+ _outFileA.Open(fileNameA, 32000, "wb");
+ _outFileB.Open(fileNameB, 32000, "wb");
+ }
+
+ while(!_inFileA.EndOfFile())
+ {
+ Run10ms();
+ }
+ CodecInst receiveCodec;
+ CHECK_ERROR(_acmA->ReceiveCodec(receiveCodec));
+ if(_testMode != 0)
+ {
+ printf("Side A Receive Codec\n");
+ printf("%s %d\n", receiveCodec.plname, receiveCodec.plfreq);
+ }
+
+ CHECK_ERROR(_acmB->ReceiveCodec(receiveCodec));
+ if(_testMode != 0)
+ {
+ printf("Side B Receive Codec\n");
+ printf("%s %d\n", receiveCodec.plname, receiveCodec.plfreq);
+ }
+
+ _inFileA.Close();
+ _outFileA.Close();
+ _outFileB.Close();
+
+ return 0;
+}
+
+
+void
+ISACTest::Perform()
+{
+ if(_testMode == 0)
+ {
+ printf("Running iSAC Test");
+ WEBRTC_TRACE(webrtc::kTraceStateInfo, webrtc::kTraceAudioCoding, -1, "---------- iSACTest ----------");
+ }
+
+ Setup();
+
+ WebRtc_Word16 testNr = 0;
+ ACMTestISACConfig wbISACConfig;
+ ACMTestISACConfig swbISACConfig;
+
+ SetISACConfigDefault(wbISACConfig);
+ SetISACConfigDefault(swbISACConfig);
+
+ wbISACConfig.currentRateBitPerSec = -1;
+ swbISACConfig.currentRateBitPerSec = -1;
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+ if (_testMode != 0)
+ {
+ SetISACConfigDefault(wbISACConfig);
+ SetISACConfigDefault(swbISACConfig);
+
+ wbISACConfig.currentRateBitPerSec = -1;
+ swbISACConfig.currentRateBitPerSec = -1;
+ wbISACConfig.initRateBitPerSec = 13000;
+ wbISACConfig.initFrameSizeInMsec = 60;
+ swbISACConfig.initRateBitPerSec = 20000;
+ swbISACConfig.initFrameSizeInMsec = 30;
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+ SetISACConfigDefault(wbISACConfig);
+ SetISACConfigDefault(swbISACConfig);
+
+ wbISACConfig.currentRateBitPerSec = 20000;
+ swbISACConfig.currentRateBitPerSec = 48000;
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+ wbISACConfig.currentRateBitPerSec = 16000;
+ swbISACConfig.currentRateBitPerSec = 30000;
+ wbISACConfig.currentFrameSizeMsec = 60;
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+ }
+
+ SetISACConfigDefault(wbISACConfig);
+ SetISACConfigDefault(swbISACConfig);
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+ int dummy;
+ if((_testMode == 0) || (_testMode == 1))
+ {
+ swbISACConfig.maxPayloadSizeByte = (WebRtc_UWord16)200;
+ wbISACConfig.maxPayloadSizeByte = (WebRtc_UWord16)200;
+ }
+ else
+ {
+ printf("Enter the max payload-size for side A: ");
+ scanf("%d", &dummy);
+ swbISACConfig.maxPayloadSizeByte = (WebRtc_UWord16)dummy;
+ printf("Enter the max payload-size for side B: ");
+ scanf("%d", &dummy);
+ wbISACConfig.maxPayloadSizeByte = (WebRtc_UWord16)dummy;
+ }
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+ _acmA->ResetEncoder();
+ _acmB->ResetEncoder();
+ SetISACConfigDefault(wbISACConfig);
+ SetISACConfigDefault(swbISACConfig);
+
+ if((_testMode == 0) || (_testMode == 1))
+ {
+ swbISACConfig.maxRateBitPerSec = (WebRtc_UWord32)48000;
+ wbISACConfig.maxRateBitPerSec = (WebRtc_UWord32)48000;
+ }
+ else
+ {
+ printf("Enter the max rate for side A: ");
+ scanf("%d", &dummy);
+ swbISACConfig.maxRateBitPerSec = (WebRtc_UWord32)dummy;
+ printf("Enter the max rate for side B: ");
+ scanf("%d", &dummy);
+ wbISACConfig.maxRateBitPerSec = (WebRtc_UWord32)dummy;
+ }
+
+ testNr++;
+ EncodeDecode(testNr, wbISACConfig, swbISACConfig);
+
+
+ testNr++;
+ if(_testMode == 0)
+ {
+ SwitchingSamplingRate(testNr, 4);
+ printf("Done!\n");
+ }
+ else
+ {
+ SwitchingSamplingRate(testNr, 80);
+ }
+}
+
+
+void
+ISACTest::Run10ms()
+{
+ AudioFrame audioFrame;
+
+ _inFileA.Read10MsData(audioFrame);
+ CHECK_ERROR(_acmA->Add10MsData(audioFrame));
+
+ CHECK_ERROR(_acmB->Add10MsData(audioFrame));
+
+ CHECK_ERROR(_acmA->Process());
+ CHECK_ERROR(_acmB->Process());
+
+ CHECK_ERROR(_acmA->PlayoutData10Ms(32000, audioFrame));
+ _outFileA.Write10MsData(audioFrame);
+
+ CHECK_ERROR(_acmB->PlayoutData10Ms(32000, audioFrame));
+ _outFileB.Write10MsData(audioFrame);
+}
+
+void
+ISACTest::EncodeDecode(
+ int testNr,
+ ACMTestISACConfig& wbISACConfig,
+ ACMTestISACConfig& swbISACConfig)
+{
+ if(_testMode == 0)
+ {
+ printf(".");
+ }
+ else
+ {
+ printf("\nTest %d:\n\n", testNr);
+ }
+ char fileNameOut[MAX_FILE_NAME_LENGTH_BYTE];
+
+ // Files in Side A
+ _inFileA.Open(_fileNameSWB, 32000, "rb", true);
+ if(_testMode == 0)
+ {
+ sprintf(fileNameOut,
+ "./modules/audio_coding/main/test/res_autotests/out_iSACTest_%s_%02d.pcm",
+ "A",
+ testNr);
+ }
+ else
+ {
+ sprintf(fileNameOut,
+ "./modules/audio_coding/main/test/res_tests/out%s_%02d.pcm",
+ "A",
+ testNr);
+ }
+ _outFileA.Open(fileNameOut, 32000, "wb");
+
+ // Files in Side B
+ _inFileB.Open(_fileNameSWB, 32000, "rb", true);
+ if(_testMode == 0)
+ {
+ sprintf(fileNameOut,
+ "./modules/audio_coding/main/test/res_autotests/out_iSACTest_%s_%02d.pcm",
+ "B",
+ testNr);
+ }
+ else
+ {
+ sprintf(fileNameOut,
+ "./modules/audio_coding/main/test/res_tests/out%s_%02d.pcm",
+ "B",
+ testNr);
+ }
+ _outFileB.Open(fileNameOut, 32000, "wb");
+
+ CHECK_ERROR(_acmA->RegisterSendCodec(_paramISAC16kHz));
+ CHECK_ERROR(_acmA->RegisterSendCodec(_paramISAC32kHz));
+
+ CHECK_ERROR(_acmB->RegisterSendCodec(_paramISAC32kHz));
+ CHECK_ERROR(_acmB->RegisterSendCodec(_paramISAC16kHz));
+ if(_testMode != 0)
+ {
+ printf("Side A Sending Super-Wideband \n");
+ printf("Side B Sending Wideband\n\n");
+ }
+
+ SetISAConfig(swbISACConfig, _acmA, _testMode);
+ SetISAConfig(wbISACConfig, _acmB, _testMode);
+
+ bool adaptiveMode = false;
+ if((swbISACConfig.currentRateBitPerSec == -1) ||
+ (wbISACConfig.currentRateBitPerSec == -1))
+ {
+ adaptiveMode = true;
+ }
+ _myTimer.Reset();
+ _channel_A2B->ResetStats();
+ _channel_B2A->ResetStats();
+
+ char currentTime[500];
+ if(_testMode == 2) printf("\n");
+ CodecInst sendCodec;
+ EventWrapper* myEvent = EventWrapper::Create();
+ myEvent->StartTimer(true, 10);
+ while(!(_inFileA.EndOfFile() || _inFileA.Rewinded()))
+ {
+ Run10ms();
+ _myTimer.Tick10ms();
+ _myTimer.CurrentTimeHMS(currentTime);
+ if(_testMode == 2) printf("\r%s ", currentTime);
+
+ if((adaptiveMode) && (_testMode != 0))
+ {
+ myEvent->Wait(5000);
+
+ _acmA->SendCodec(sendCodec);
+ if(_testMode == 2) printf("[%d] ", sendCodec.rate);
+ _acmB->SendCodec(sendCodec);
+ if(_testMode == 2) printf("[%d] ", sendCodec.rate);
+ }
+ }
+
+ if(_testMode != 0)
+ {
+ printf("\n\nSide A statistics\n\n");
+ _channel_A2B->PrintStats(_paramISAC32kHz);
+
+ printf("\n\nSide B statistics\n\n");
+ _channel_B2A->PrintStats(_paramISAC16kHz);
+ }
+
+ _channel_A2B->ResetStats();
+ _channel_B2A->ResetStats();
+
+ if(_testMode != 0) printf("\n");
+ _outFileA.Close();
+ _outFileB.Close();
+ _inFileA.Close();
+ _inFileB.Close();
+}
+
+void
+ISACTest::SwitchingSamplingRate(
+ int testNr,
+ int maxSampRateChange)
+{
+ char fileNameOut[MAX_FILE_NAME_LENGTH_BYTE];
+
+ // Files in Side A
+ _inFileA.Open(_fileNameSWB, 32000, "rb");
+ if(_testMode == 0)
+ {
+ sprintf(fileNameOut,
+ "./modules/audio_coding/main/test/res_autotests/out_iSACTest_%s_%02d.pcm",
+ "A",
+ testNr);
+ }
+ else
+ {
+ printf("\nTest %d", testNr);
+ printf(" Alternate between WB and SWB at the sender Side\n\n");
+ sprintf(fileNameOut,
+ "./modules/audio_coding/main/test/res_tests/out%s_%02d.pcm",
+ "A",
+ testNr);
+ }
+ _outFileA.Open(fileNameOut, 32000, "wb", true);
+
+ // Files in Side B
+ _inFileB.Open(_fileNameSWB, 32000, "rb");
+ if(_testMode == 0)
+ {
+ sprintf(fileNameOut,
+ "./modules/audio_coding/main/test/res_autotests/out_iSACTest_%s_%02d.pcm",
+ "B",
+ testNr);
+ }
+ else
+ {
+ sprintf(fileNameOut, "./modules/audio_coding/main/test/res_tests/out%s_%02d.pcm",
+ "B",
+ testNr);
+ }
+ _outFileB.Open(fileNameOut, 32000, "wb", true);
+
+ CHECK_ERROR(_acmA->RegisterSendCodec(_paramISAC32kHz));
+ CHECK_ERROR(_acmB->RegisterSendCodec(_paramISAC16kHz));
+ if(_testMode != 0)
+ {
+ printf("Side A Sending Super-Wideband \n");
+ printf("Side B Sending Wideband\n");
+ }
+
+ int numSendCodecChanged = 0;
+ _myTimer.Reset();
+ char currentTime[50];
+ while(numSendCodecChanged < (maxSampRateChange<<1))
+ {
+ Run10ms();
+ _myTimer.Tick10ms();
+ _myTimer.CurrentTimeHMS(currentTime);
+ if(_testMode == 2) printf("\r%s", currentTime);
+ if(_inFileA.EndOfFile())
+ {
+ if(_inFileA.SamplingFrequency() == 16000)
+ {
+ if(_testMode != 0) printf("\nSide A switched to Send Super-Wideband\n");
+ _inFileA.Close();
+ _inFileA.Open(_fileNameSWB, 32000, "rb");
+ CHECK_ERROR(_acmA->RegisterSendCodec(_paramISAC32kHz));
+ }
+ else
+ {
+ if(_testMode != 0) printf("\nSide A switched to Send Wideband\n");
+ _inFileA.Close();
+ _inFileA.Open(_fileNameSWB, 32000, "rb");
+ CHECK_ERROR(_acmA->RegisterSendCodec(_paramISAC16kHz));
+ }
+ numSendCodecChanged++;
+ }
+
+ if(_inFileB.EndOfFile())
+ {
+ if(_inFileB.SamplingFrequency() == 16000)
+ {
+ if(_testMode != 0) printf("\nSide B switched to Send Super-Wideband\n");
+ _inFileB.Close();
+ _inFileB.Open(_fileNameSWB, 32000, "rb");
+ CHECK_ERROR(_acmB->RegisterSendCodec(_paramISAC32kHz));
+ }
+ else
+ {
+ if(_testMode != 0) printf("\nSide B switched to Send Wideband\n");
+ _inFileB.Close();
+ _inFileB.Open(_fileNameSWB, 32000, "rb");
+ CHECK_ERROR(_acmB->RegisterSendCodec(_paramISAC16kHz));
+ }
+ numSendCodecChanged++;
+ }
+ }
+ _outFileA.Close();
+ _outFileB.Close();
+ _inFileA.Close();
+ _inFileB.Close();
+}
diff --git a/src/modules/audio_coding/main/test/iSACTest.h b/src/modules/audio_coding/main/test/iSACTest.h
new file mode 100644
index 0000000..c6d4b9c
--- /dev/null
+++ b/src/modules/audio_coding/main/test/iSACTest.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_ISAC_TEST_H
+#define ACM_ISAC_TEST_H
+
+#include "ACMTest.h"
+#include "Channel.h"
+#include "PCMFile.h"
+#include "audio_coding_module.h"
+#include "utility.h"
+#include "common_types.h"
+
+#define MAX_FILE_NAME_LENGTH_BYTE 500
+#define NO_OF_CLIENTS 15
+
+struct ACMTestISACConfig
+{
+ WebRtc_Word32 currentRateBitPerSec;
+ WebRtc_Word16 currentFrameSizeMsec;
+ WebRtc_UWord32 maxRateBitPerSec;
+ WebRtc_Word16 maxPayloadSizeByte;
+ WebRtc_Word16 encodingMode;
+ WebRtc_UWord32 initRateBitPerSec;
+ WebRtc_Word16 initFrameSizeInMsec;
+ bool enforceFrameSize;
+};
+
+
+
+class ISACTest : public ACMTest
+{
+public:
+ ISACTest(int testMode);
+ ~ISACTest();
+
+ void Perform();
+private:
+ WebRtc_Word16 Setup();
+ WebRtc_Word16 SetupConference();
+ WebRtc_Word16 RunConference();
+
+
+ void Run10ms();
+
+ void EncodeDecode(
+ int testNr,
+ ACMTestISACConfig& wbISACConfig,
+ ACMTestISACConfig& swbISACConfig);
+
+ void TestBWE(
+ int testNr);
+
+ void SwitchingSamplingRate(
+ int testNr,
+ int maxSampRateChange);
+
+ AudioCodingModule* _acmA;
+ AudioCodingModule* _acmB;
+
+ Channel* _channel_A2B;
+ Channel* _channel_B2A;
+
+ PCMFile _inFileA;
+ PCMFile _inFileB;
+
+ PCMFile _outFileA;
+ PCMFile _outFileB;
+
+ WebRtc_UWord8 _idISAC16kHz;
+ WebRtc_UWord8 _idISAC32kHz;
+ CodecInst _paramISAC16kHz;
+ CodecInst _paramISAC32kHz;
+
+ char _fileNameWB[MAX_FILE_NAME_LENGTH_BYTE];
+ char _fileNameSWB[MAX_FILE_NAME_LENGTH_BYTE];
+
+ ACMTestTimer _myTimer;
+ int _testMode;
+
+ AudioCodingModule* _defaultACM32;
+ AudioCodingModule* _defaultACM16;
+
+ AudioCodingModule* _confACM[NO_OF_CLIENTS];
+ AudioCodingModule* _clientACM[NO_OF_CLIENTS];
+ Channel* _conf2Client[NO_OF_CLIENTS];
+ Channel* _client2Conf[NO_OF_CLIENTS];
+
+ PCMFile _clientOutFile[NO_OF_CLIENTS];
+};
+
+
+#endif
diff --git a/src/modules/audio_coding/main/test/utility.cpp b/src/modules/audio_coding/main/test/utility.cpp
new file mode 100644
index 0000000..c654019
--- /dev/null
+++ b/src/modules/audio_coding/main/test/utility.cpp
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "utility.h"
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+
+#include "audio_coding_module.h"
+#include "common_types.h"
+
+
+#define NUM_CODECS_WITH_FIXED_PAYLOAD_TYPE 13
+
+
+ACMTestTimer::ACMTestTimer() :
+_msec(0),
+_sec(0),
+_min(0),
+_hour(0)
+{
+ return;
+}
+
+ACMTestTimer::~ACMTestTimer()
+{
+ return;
+}
+
+void ACMTestTimer::Reset()
+{
+ _msec = 0;
+ _sec = 0;
+ _min = 0;
+ _hour = 0;
+ return;
+}
+void ACMTestTimer::Tick10ms()
+{
+ _msec += 10;
+ Adjust();
+ return;
+}
+
+void ACMTestTimer::Tick1ms()
+{
+ _msec++;
+ Adjust();
+ return;
+}
+
+void ACMTestTimer::Tick100ms()
+{
+ _msec += 100;
+ Adjust();
+ return;
+}
+
+void ACMTestTimer::Tick1sec()
+{
+ _sec++;
+ Adjust();
+ return;
+}
+
+void ACMTestTimer::CurrentTimeHMS(char* currTime)
+{
+ sprintf(currTime, "%4lu:%02u:%06.3f", _hour, _min, (double)_sec + (double)_msec / 1000.);
+ return;
+}
+
+void ACMTestTimer::CurrentTime(
+ unsigned long& h,
+ unsigned char& m,
+ unsigned char& s,
+ unsigned short& ms)
+{
+ h = _hour;
+ m = _min;
+ s = _sec;
+ ms = _msec;
+ return;
+}
+
+void ACMTestTimer::Adjust()
+{
+ unsigned int n;
+ if(_msec >= 1000)
+ {
+ n = _msec / 1000;
+ _msec -= (1000 * n);
+ _sec += n;
+ }
+ if(_sec >= 60)
+ {
+ n = _sec / 60;
+ _sec -= (n * 60);
+ _min += n;
+ }
+ if(_min >= 60)
+ {
+ n = _min / 60;
+ _min -= (n * 60);
+ _hour += n;
+ }
+}
+
+
+WebRtc_Word16
+ChooseCodec(
+ CodecInst& codecInst)
+{
+
+ PrintCodecs();
+ //AudioCodingModule* tmpACM = AudioCodingModule::Create(0);
+ WebRtc_UWord8 noCodec = AudioCodingModule::NumberOfCodecs();
+ WebRtc_Word8 codecID;
+ bool outOfRange = false;
+ char myStr[15] = "";
+ do
+ {
+ printf("\nChoose a codec [0]: ");
+ fgets(myStr, 10, stdin);
+ codecID = atoi(myStr);
+ if((codecID < 0) || (codecID >= noCodec))
+ {
+ printf("\nOut of range.\n");
+ outOfRange = true;
+ }
+ } while(outOfRange);
+
+ CHECK_ERROR(AudioCodingModule::Codec((WebRtc_UWord8)codecID, codecInst));
+ return 0;
+}
+
+void
+PrintCodecs()
+{
+ WebRtc_UWord8 noCodec = AudioCodingModule::NumberOfCodecs();
+
+ CodecInst codecInst;
+ printf("No Name [Hz] [bps]\n");
+ for(WebRtc_UWord8 codecCntr = 0; codecCntr < noCodec; codecCntr++)
+ {
+ AudioCodingModule::Codec(codecCntr, codecInst);
+ printf("%2d- %-18s %5d %6d\n",
+ codecCntr, codecInst.plname, codecInst.plfreq, codecInst.rate);
+ }
+
+}
+
+CircularBuffer::CircularBuffer(WebRtc_UWord32 len):
+_buffIsFull(false),
+_calcAvg(false),
+_calcVar(false),
+_sum(0),
+_sumSqr(0),
+_idx(0),
+_buff(NULL)
+{
+ _buff = new(double[len]);
+ if(_buff == NULL)
+ {
+ _buffLen = 0;
+ }
+ else
+ {
+ for(WebRtc_UWord32 n = 0; n < len; n++)
+ {
+ _buff[n] = 0;
+ }
+ _buffLen = len;
+ }
+}
+
+CircularBuffer::~CircularBuffer()
+{
+ if(_buff != NULL)
+ {
+ delete [] _buff;
+ _buff = NULL;
+ }
+}
+
+void
+CircularBuffer::Update(
+ const double newVal)
+{
+ assert(_buffLen > 0);
+
+ // store the value that is going to be overwritten
+ double oldVal = _buff[_idx];
+ // record the new value
+ _buff[_idx] = newVal;
+ // increment the index, to point to where we would
+ // write next
+ _idx++;
+ // it is a circular buffer, if we are at the end
+ // we have to cycle to the beginning
+ if(_idx >= _buffLen)
+ {
+ // flag that the buffer is filled up.
+ _buffIsFull = true;
+ _idx = 0;
+ }
+
+ // Update
+
+ if(_calcAvg)
+ {
+ // for the average we have to update
+ // the sum
+ _sum += (newVal - oldVal);
+ }
+
+ if(_calcVar)
+ {
+ // to calculate variance we have to update
+ // the sum of squares
+ _sumSqr += (double)(newVal - oldVal) * (double)(newVal + oldVal);
+ }
+}
+
+void
+CircularBuffer::SetArithMean(
+ bool enable)
+{
+ assert(_buffLen > 0);
+
+ if(enable && !_calcAvg)
+ {
+ WebRtc_UWord32 lim;
+ if(_buffIsFull)
+ {
+ lim = _buffLen;
+ }
+ else
+ {
+ lim = _idx;
+ }
+ _sum = 0;
+ for(WebRtc_UWord32 n = 0; n < lim; n++)
+ {
+ _sum += _buff[n];
+ }
+ }
+ _calcAvg = enable;
+}
+
+void
+CircularBuffer::SetVariance(
+ bool enable)
+{
+ assert(_buffLen > 0);
+
+ if(enable && !_calcVar)
+ {
+ WebRtc_UWord32 lim;
+ if(_buffIsFull)
+ {
+ lim = _buffLen;
+ }
+ else
+ {
+ lim = _idx;
+ }
+ _sumSqr = 0;
+ for(WebRtc_UWord32 n = 0; n < lim; n++)
+ {
+ _sumSqr += _buff[n] * _buff[n];
+ }
+ }
+ _calcAvg = enable;
+}
+
+WebRtc_Word16
+CircularBuffer::ArithMean(double& mean)
+{
+ assert(_buffLen > 0);
+
+ if(_buffIsFull)
+ {
+
+ mean = _sum / (double)_buffLen;
+ return 0;
+ }
+ else
+ {
+ if(_idx > 0)
+ {
+ mean = _sum / (double)_idx;
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+
+ }
+}
+
+WebRtc_Word16
+CircularBuffer::Variance(double& var)
+{
+ assert(_buffLen > 0);
+
+ if(_buffIsFull)
+ {
+ var = _sumSqr / (double)_buffLen;
+ return 0;
+ }
+ else
+ {
+ if(_idx > 0)
+ {
+ var = _sumSqr / (double)_idx;
+ return 0;
+ }
+ else
+ {
+ return -1;
+ }
+ }
+}
+
+
+
+bool
+FixedPayloadTypeCodec(const char* payloadName)
+{
+ char fixPayloadTypeCodecs[NUM_CODECS_WITH_FIXED_PAYLOAD_TYPE][32] = {
+ "PCMU",
+ "PCMA",
+ "GSM",
+ "G723",
+ "DVI4",
+ "LPC",
+ "PCMA",
+ "G722",
+ "QCELP",
+ "CN",
+ "MPA",
+ "G728",
+ "G729"
+ };
+
+ for(int n = 0; n < NUM_CODECS_WITH_FIXED_PAYLOAD_TYPE; n++)
+ {
+ if(!STR_CASE_CMP(payloadName, fixPayloadTypeCodecs[n]))
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+DTMFDetector::DTMFDetector()
+{
+ for(WebRtc_Word16 n = 0; n < 1000; n++)
+ {
+ _toneCntr[n] = 0;
+ }
+}
+
+DTMFDetector::~DTMFDetector()
+{
+}
+
+WebRtc_Word32 DTMFDetector::IncomingDtmf(const WebRtc_UWord8 digitDtmf, const bool /* toneEnded */)
+{
+ fprintf(stdout, "%d-",digitDtmf);
+ _toneCntr[digitDtmf]++;
+ return 0;
+}
+
+void DTMFDetector::PrintDetectedDigits()
+{
+ for(WebRtc_Word16 n = 0; n < 1000; n++)
+ {
+ if(_toneCntr[n] > 0)
+ {
+ fprintf(stdout, "%d %u msec, \n", n, _toneCntr[n]*10);
+ }
+ }
+ fprintf(stdout, "\n");
+ return;
+}
+
+void
+VADCallback::Reset()
+{
+ for(int n = 0; n < 6; n++)
+ {
+ _numFrameTypes[n] = 0;
+ }
+}
+
+VADCallback::VADCallback()
+{
+ for(int n = 0; n < 6; n++)
+ {
+ _numFrameTypes[n] = 0;
+ }
+}
+
+void
+VADCallback::PrintFrameTypes()
+{
+ fprintf(stdout, "No encoding.................. %d\n", _numFrameTypes[0]);
+ fprintf(stdout, "Active normal encoded........ %d\n", _numFrameTypes[1]);
+ fprintf(stdout, "Passive normal encoded....... %d\n", _numFrameTypes[2]);
+ fprintf(stdout, "Passive DTX wideband......... %d\n", _numFrameTypes[3]);
+ fprintf(stdout, "Passive DTX narrowband....... %d\n", _numFrameTypes[4]);
+ fprintf(stdout, "Passive DTX super-wideband... %d\n", _numFrameTypes[5]);
+}
+
+WebRtc_Word32
+VADCallback::InFrameType(
+ WebRtc_Word16 frameType)
+{
+ _numFrameTypes[frameType]++;
+ return 0;
+}
diff --git a/src/modules/audio_coding/main/test/utility.h b/src/modules/audio_coding/main/test/utility.h
new file mode 100644
index 0000000..b25de44
--- /dev/null
+++ b/src/modules/audio_coding/main/test/utility.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef ACM_TEST_UTILITY_H
+#define ACM_TEST_UTILITY_H
+
+#include "audio_coding_module.h"
+
+//-----------------------------
+#define CHECK_ERROR(f) \
+ do { \
+ if(f < 0) { \
+ char errString[500]; \
+ sprintf(errString, "Error Calling API in file %s at line %d \n", \
+ __FILE__, __LINE__); \
+ throw errString; \
+ } \
+ }while(0)
+
+//-----------------------------
+#define CHECK_PROTECTED(f) \
+ do { \
+ if(f >= 0) { \
+ char errString[500]; \
+ sprintf(errString, "Error Calling API in file %s at line %d \n", \
+ __FILE__, __LINE__); \
+ throw errString; \
+ } \
+ else { \
+ printf("An expected error is caught.\n"); \
+ } \
+ }while(0)
+
+//----------------------------
+#define CHECK_ERROR_MT(f) \
+ do { \
+ if(f < 0) { \
+ fprintf(stderr, "Error Calling API in file %s at line %d \n", \
+ __FILE__, __LINE__); \
+ } \
+ }while(0)
+
+//----------------------------
+#define CHECK_PROTECTED_MT(f) \
+ do { \
+ if(f >= 0) { \
+ fprintf(stderr, "Error Calling API in file %s at line %d \n", \
+ __FILE__, __LINE__); \
+ } \
+ else { \
+ printf("An expected error is caught.\n"); \
+ } \
+ }while(0)
+
+
+
+#ifdef WIN32
+ /* Exclude rarely-used stuff from Windows headers */
+ //#define WIN32_LEAN_AND_MEAN
+ /* OS-dependent case-insensitive string comparison */
+ #define STR_CASE_CMP(x,y) ::_stricmp(x,y)
+#else
+ /* OS-dependent case-insensitive string comparison */
+ #define STR_CASE_CMP(x,y) ::strcasecmp(x,y)
+#endif
+
+#define DESTROY_ACM(acm) \
+ do { \
+ if(acm != NULL) { \
+ AudioCodingModule::Destroy(acm); \
+ acm = NULL; \
+ } \
+ } while(0)
+
+
+#define DELETE_POINTER(p) \
+ do { \
+ if(p != NULL) { \
+ delete p; \
+ p = NULL; \
+ } \
+ } while(0)
+
+using namespace webrtc;
+
+class ACMTestTimer
+{
+public:
+ ACMTestTimer();
+ ~ACMTestTimer();
+
+ void Reset();
+ void Tick10ms();
+ void Tick1ms();
+ void Tick100ms();
+ void Tick1sec();
+ void CurrentTimeHMS(
+ char* currTime);
+ void CurrentTime(
+ unsigned long& h,
+ unsigned char& m,
+ unsigned char& s,
+ unsigned short& ms);
+
+private:
+ void Adjust();
+
+ unsigned short _msec;
+ unsigned char _sec;
+ unsigned char _min;
+ unsigned long _hour;
+};
+
+
+
+class CircularBuffer
+{
+public:
+ CircularBuffer(WebRtc_UWord32 len);
+ ~CircularBuffer();
+
+ void SetArithMean(
+ bool enable);
+ void SetVariance(
+ bool enable);
+
+ void Update(
+ const double newVal);
+ void IsBufferFull();
+
+ WebRtc_Word16 Variance(double& var);
+ WebRtc_Word16 ArithMean(double& mean);
+
+protected:
+ double* _buff;
+ WebRtc_UWord32 _idx;
+ WebRtc_UWord32 _buffLen;
+
+ bool _buffIsFull;
+ bool _calcAvg;
+ bool _calcVar;
+ double _sum;
+ double _sumSqr;
+};
+
+
+
+
+
+WebRtc_Word16 ChooseCodec(
+ CodecInst& codecInst);
+
+void PrintCodecs();
+
+bool FixedPayloadTypeCodec(const char* payloadName);
+
+
+
+
+class DTMFDetector: public AudioCodingFeedback
+{
+public:
+ DTMFDetector();
+ ~DTMFDetector();
+ // used for inband DTMF detection
+ WebRtc_Word32 IncomingDtmf(const WebRtc_UWord8 digitDtmf, const bool toneEnded);
+ void PrintDetectedDigits();
+
+private:
+ WebRtc_UWord32 _toneCntr[1000];
+
+};
+
+
+
+
+class VADCallback : public ACMVADCallback
+{
+public:
+ VADCallback();
+ ~VADCallback(){}
+
+ WebRtc_Word32 InFrameType(
+ WebRtc_Word16 frameType);
+
+ void PrintFrameTypes();
+ void Reset();
+
+private:
+ WebRtc_UWord32 _numFrameTypes[6];
+};
+
+
+
+#endif // ACM_TEST_UTILITY_H