Move src/ -> webrtc/
TBR=niklas.enbom@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/915006
git-svn-id: http://webrtc.googlecode.com/svn/trunk/webrtc@2963 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/voice_engine/Android.mk b/voice_engine/Android.mk
new file mode 100644
index 0000000..5040f17
--- /dev/null
+++ b/voice_engine/Android.mk
@@ -0,0 +1,88 @@
+# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+include $(LOCAL_PATH)/../../../../android-webrtc.mk
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE := libwebrtc_voe_core
+LOCAL_MODULE_TAGS := optional
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_SRC_FILES := \
+ audio_frame_operations.cc \
+ channel.cc \
+ channel_manager.cc \
+ channel_manager_base.cc \
+ dtmf_inband.cc \
+ dtmf_inband_queue.cc \
+ level_indicator.cc \
+ monitor_module.cc \
+ output_mixer.cc \
+ ref_count.cc \
+ shared_data.cc \
+ statistics.cc \
+ transmit_mixer.cc \
+ utility.cc \
+ voe_audio_processing_impl.cc \
+ voe_base_impl.cc \
+ voe_call_report_impl.cc \
+ voe_codec_impl.cc \
+ voe_dtmf_impl.cc \
+ voe_encryption_impl.cc \
+ voe_external_media_impl.cc \
+ voe_file_impl.cc \
+ voe_hardware_impl.cc \
+ voe_neteq_stats_impl.cc \
+ voe_network_impl.cc \
+ voe_rtp_rtcp_impl.cc \
+ voe_video_sync_impl.cc \
+ voe_volume_control_impl.cc \
+ voice_engine_impl.cc
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+ $(MY_WEBRTC_COMMON_DEFS) \
+ '-DWEBRTC_ANDROID_OPENSLES'
+
+LOCAL_C_INCLUDES := \
+ $(LOCAL_PATH)/../interface \
+ $(LOCAL_PATH)/../../.. \
+ $(LOCAL_PATH)/../../../common_audio/resampler/include \
+ $(LOCAL_PATH)/../../../common_audio/signal_processing/include \
+ $(LOCAL_PATH)/../../../modules/interface \
+ $(LOCAL_PATH)/../../../modules/audio_coding/main/interface \
+ $(LOCAL_PATH)/../../../modules/audio_conference_mixer/interface \
+ $(LOCAL_PATH)/../../../modules/audio_device/main/interface \
+ $(LOCAL_PATH)/../../../modules/audio_device/main/source \
+ $(LOCAL_PATH)/../../../modules/audio_processing/include \
+ $(LOCAL_PATH)/../../../modules/media_file/interface \
+ $(LOCAL_PATH)/../../../modules/rtp_rtcp/interface \
+ $(LOCAL_PATH)/../../../modules/udp_transport/interface \
+ $(LOCAL_PATH)/../../../modules/utility/interface \
+ $(LOCAL_PATH)/../../../system_wrappers/interface
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libdl \
+ libstlport
+
+ifeq ($(TARGET_OS)-$(TARGET_SIMULATOR),linux-true)
+LOCAL_LDLIBS += -ldl -lpthread
+endif
+
+ifneq ($(TARGET_SIMULATOR),true)
+LOCAL_SHARED_LIBRARIES += libdl
+endif
+
+ifndef NDK_ROOT
+include external/stlport/libstlport.mk
+endif
+include $(BUILD_STATIC_LIBRARY)
diff --git a/voice_engine/OWNERS b/voice_engine/OWNERS
new file mode 100644
index 0000000..a07ced3
--- /dev/null
+++ b/voice_engine/OWNERS
@@ -0,0 +1,4 @@
+henrikg@webrtc.org
+henrika@webrtc.org
+niklas.enbom@webrtc.org
+xians@webrtc.org
diff --git a/voice_engine/channel.cc b/voice_engine/channel.cc
new file mode 100644
index 0000000..4370b74
--- /dev/null
+++ b/voice_engine/channel.cc
@@ -0,0 +1,6648 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "channel.h"
+
+#include "audio_device.h"
+#include "audio_frame_operations.h"
+#include "audio_processing.h"
+#include "critical_section_wrapper.h"
+#include "output_mixer.h"
+#include "process_thread.h"
+#include "rtp_dump.h"
+#include "statistics.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "utility.h"
+#include "voe_base.h"
+#include "voe_external_media.h"
+#include "voe_rtp_rtcp.h"
+
+#if defined(_WIN32)
+#include <Qos.h>
+#endif
+
+namespace webrtc
+{
+
+namespace voe
+{
+
+WebRtc_Word32
+Channel::SendData(FrameType frameType,
+ WebRtc_UWord8 payloadType,
+ WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
+ " payloadSize=%u, fragmentation=0x%x)",
+ frameType, payloadType, timeStamp, payloadSize, fragmentation);
+
+ if (_includeAudioLevelIndication)
+ {
+ assert(_rtpAudioProc.get() != NULL);
+ // Store current audio level in the RTP/RTCP module.
+ // The level will be used in combination with voice-activity state
+ // (frameType) to add an RTP header extension
+ _rtpRtcpModule->SetAudioLevel(_rtpAudioProc->level_estimator()->RMS());
+ }
+
+ // Push data from ACM to RTP/RTCP-module to deliver audio frame for
+ // packetization.
+ // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
+ if (_rtpRtcpModule->SendOutgoingData((FrameType&)frameType,
+ payloadType,
+ timeStamp,
+ // Leaving the time when this frame was
+ // received from the capture device as
+ // undefined for voice for now.
+ -1,
+ payloadData,
+ payloadSize,
+ fragmentation) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
+ "Channel::SendData() failed to send data to RTP/RTCP module");
+ return -1;
+ }
+
+ _lastLocalTimeStamp = timeStamp;
+ _lastPayloadType = payloadType;
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::InFrameType(WebRtc_Word16 frameType)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::InFrameType(frameType=%d)", frameType);
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+ // 1 indicates speech
+ _sendFrameType = (frameType == 1) ? 1 : 0;
+ return 0;
+}
+
+#ifdef WEBRTC_DTMF_DETECTION
+int
+Channel::IncomingDtmf(const WebRtc_UWord8 digitDtmf, const bool end)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::IncomingDtmf(digitDtmf=%u, end=%d)",
+ digitDtmf, end);
+
+ if (digitDtmf != 999)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (_telephoneEventDetectionPtr)
+ {
+ _telephoneEventDetectionPtr->OnReceivedTelephoneEventInband(
+ _channelId, digitDtmf, end);
+ }
+ }
+
+ return 0;
+}
+#endif
+
+WebRtc_Word32
+Channel::OnRxVadDetected(const int vadDecision)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::OnRxVadDetected(vadDecision=%d)", vadDecision);
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (_rxVadObserverPtr)
+ {
+ _rxVadObserverPtr->OnRxVad(_channelId, vadDecision);
+ }
+
+ return 0;
+}
+
+int
+Channel::SendPacket(int channel, const void *data, int len)
+{
+ channel = VoEChannelId(channel);
+ assert(channel == _channelId);
+
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SendPacket(channel=%d, len=%d)", channel, len);
+
+ if (_transportPtr == NULL)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SendPacket() failed to send RTP packet due to"
+ " invalid transport object");
+ return -1;
+ }
+
+ // Insert extra RTP packet using if user has called the InsertExtraRTPPacket
+ // API
+ if (_insertExtraRTPPacket)
+ {
+ WebRtc_UWord8* rtpHdr = (WebRtc_UWord8*)data;
+ WebRtc_UWord8 M_PT(0);
+ if (_extraMarkerBit)
+ {
+ M_PT = 0x80; // set the M-bit
+ }
+ M_PT += _extraPayloadType; // set the payload type
+ *(++rtpHdr) = M_PT; // modify the M|PT-byte within the RTP header
+ _insertExtraRTPPacket = false; // insert one packet only
+ }
+
+ WebRtc_UWord8* bufferToSendPtr = (WebRtc_UWord8*)data;
+ WebRtc_Word32 bufferLength = len;
+
+ // Dump the RTP packet to a file (if RTP dump is enabled).
+ if (_rtpDumpOut.DumpPacket((const WebRtc_UWord8*)data, len) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendPacket() RTP dump to output file failed");
+ }
+
+ // SRTP or External encryption
+ if (_encrypting)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_encryptionPtr)
+ {
+ if (!_encryptionRTPBufferPtr)
+ {
+ // Allocate memory for encryption buffer one time only
+ _encryptionRTPBufferPtr =
+ new WebRtc_UWord8[kVoiceEngineMaxIpPacketSizeBytes];
+ }
+
+ // Perform encryption (SRTP or external)
+ WebRtc_Word32 encryptedBufferLength = 0;
+ _encryptionPtr->encrypt(_channelId,
+ bufferToSendPtr,
+ _encryptionRTPBufferPtr,
+ bufferLength,
+ (int*)&encryptedBufferLength);
+ if (encryptedBufferLength <= 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ENCRYPTION_FAILED,
+ kTraceError, "Channel::SendPacket() encryption failed");
+ return -1;
+ }
+
+ // Replace default data buffer with encrypted buffer
+ bufferToSendPtr = _encryptionRTPBufferPtr;
+ bufferLength = encryptedBufferLength;
+ }
+ }
+
+ // Packet transmission using WebRtc socket transport
+ if (!_externalTransport)
+ {
+ int n = _transportPtr->SendPacket(channel, bufferToSendPtr,
+ bufferLength);
+ if (n < 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendPacket() RTP transmission using WebRtc"
+ " sockets failed");
+ return -1;
+ }
+ return n;
+ }
+
+ // Packet transmission using external transport transport
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ int n = _transportPtr->SendPacket(channel,
+ bufferToSendPtr,
+ bufferLength);
+ if (n < 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendPacket() RTP transmission using external"
+ " transport failed");
+ return -1;
+ }
+ return n;
+ }
+}
+
+int
+Channel::SendRTCPPacket(int channel, const void *data, int len)
+{
+ channel = VoEChannelId(channel);
+ assert(channel == _channelId);
+
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SendRTCPPacket(channel=%d, len=%d)", channel, len);
+
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (_transportPtr == NULL)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendRTCPPacket() failed to send RTCP packet"
+ " due to invalid transport object");
+ return -1;
+ }
+ }
+
+ WebRtc_UWord8* bufferToSendPtr = (WebRtc_UWord8*)data;
+ WebRtc_Word32 bufferLength = len;
+
+ // Dump the RTCP packet to a file (if RTP dump is enabled).
+ if (_rtpDumpOut.DumpPacket((const WebRtc_UWord8*)data, len) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendPacket() RTCP dump to output file failed");
+ }
+
+ // SRTP or External encryption
+ if (_encrypting)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_encryptionPtr)
+ {
+ if (!_encryptionRTCPBufferPtr)
+ {
+ // Allocate memory for encryption buffer one time only
+ _encryptionRTCPBufferPtr =
+ new WebRtc_UWord8[kVoiceEngineMaxIpPacketSizeBytes];
+ }
+
+ // Perform encryption (SRTP or external).
+ WebRtc_Word32 encryptedBufferLength = 0;
+ _encryptionPtr->encrypt_rtcp(_channelId,
+ bufferToSendPtr,
+ _encryptionRTCPBufferPtr,
+ bufferLength,
+ (int*)&encryptedBufferLength);
+ if (encryptedBufferLength <= 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ENCRYPTION_FAILED, kTraceError,
+ "Channel::SendRTCPPacket() encryption failed");
+ return -1;
+ }
+
+ // Replace default data buffer with encrypted buffer
+ bufferToSendPtr = _encryptionRTCPBufferPtr;
+ bufferLength = encryptedBufferLength;
+ }
+ }
+
+ // Packet transmission using WebRtc socket transport
+ if (!_externalTransport)
+ {
+ int n = _transportPtr->SendRTCPPacket(channel,
+ bufferToSendPtr,
+ bufferLength);
+ if (n < 0)
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendRTCPPacket() transmission using WebRtc"
+ " sockets failed");
+ return -1;
+ }
+ return n;
+ }
+
+ // Packet transmission using external transport transport
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ int n = _transportPtr->SendRTCPPacket(channel,
+ bufferToSendPtr,
+ bufferLength);
+ if (n < 0)
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendRTCPPacket() transmission using external"
+ " transport failed");
+ return -1;
+ }
+ return n;
+ }
+
+ return len;
+}
+
+void
+Channel::IncomingRTPPacket(const WebRtc_Word8* incomingRtpPacket,
+ const WebRtc_Word32 rtpPacketLength,
+ const char* fromIP,
+ const WebRtc_UWord16 fromPort)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::IncomingRTPPacket(rtpPacketLength=%d,"
+ " fromIP=%s, fromPort=%u)",
+ rtpPacketLength, fromIP, fromPort);
+
+ // Store playout timestamp for the received RTP packet
+ // to be used for upcoming delay estimations
+ WebRtc_UWord32 playoutTimestamp(0);
+ if (GetPlayoutTimeStamp(playoutTimestamp) == 0)
+ {
+ _playoutTimeStampRTP = playoutTimestamp;
+ }
+
+ WebRtc_UWord8* rtpBufferPtr = (WebRtc_UWord8*)incomingRtpPacket;
+ WebRtc_Word32 rtpBufferLength = rtpPacketLength;
+
+ // SRTP or External decryption
+ if (_decrypting)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_encryptionPtr)
+ {
+ if (!_decryptionRTPBufferPtr)
+ {
+ // Allocate memory for decryption buffer one time only
+ _decryptionRTPBufferPtr =
+ new WebRtc_UWord8[kVoiceEngineMaxIpPacketSizeBytes];
+ }
+
+ // Perform decryption (SRTP or external)
+ WebRtc_Word32 decryptedBufferLength = 0;
+ _encryptionPtr->decrypt(_channelId,
+ rtpBufferPtr,
+ _decryptionRTPBufferPtr,
+ rtpBufferLength,
+ (int*)&decryptedBufferLength);
+ if (decryptedBufferLength <= 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_DECRYPTION_FAILED, kTraceError,
+ "Channel::IncomingRTPPacket() decryption failed");
+ return;
+ }
+
+ // Replace default data buffer with decrypted buffer
+ rtpBufferPtr = _decryptionRTPBufferPtr;
+ rtpBufferLength = decryptedBufferLength;
+ }
+ }
+
+ // Dump the RTP packet to a file (if RTP dump is enabled).
+ if (_rtpDumpIn.DumpPacket(rtpBufferPtr,
+ (WebRtc_UWord16)rtpBufferLength) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendPacket() RTP dump to input file failed");
+ }
+
+ // Deliver RTP packet to RTP/RTCP module for parsing
+ // The packet will be pushed back to the channel thru the
+ // OnReceivedPayloadData callback so we don't push it to the ACM here
+ if (_rtpRtcpModule->IncomingPacket((const WebRtc_UWord8*)rtpBufferPtr,
+ (WebRtc_UWord16)rtpBufferLength) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceWarning,
+ "Channel::IncomingRTPPacket() RTP packet is invalid");
+ return;
+ }
+}
+
+void
+Channel::IncomingRTCPPacket(const WebRtc_Word8* incomingRtcpPacket,
+ const WebRtc_Word32 rtcpPacketLength,
+ const char* fromIP,
+ const WebRtc_UWord16 fromPort)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::IncomingRTCPPacket(rtcpPacketLength=%d, fromIP=%s,"
+ " fromPort=%u)",
+ rtcpPacketLength, fromIP, fromPort);
+
+ // Temporary buffer pointer and size for decryption
+ WebRtc_UWord8* rtcpBufferPtr = (WebRtc_UWord8*)incomingRtcpPacket;
+ WebRtc_Word32 rtcpBufferLength = rtcpPacketLength;
+
+ // Store playout timestamp for the received RTCP packet
+ // which will be read by the GetRemoteRTCPData API
+ WebRtc_UWord32 playoutTimestamp(0);
+ if (GetPlayoutTimeStamp(playoutTimestamp) == 0)
+ {
+ _playoutTimeStampRTCP = playoutTimestamp;
+ }
+
+ // SRTP or External decryption
+ if (_decrypting)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_encryptionPtr)
+ {
+ if (!_decryptionRTCPBufferPtr)
+ {
+ // Allocate memory for decryption buffer one time only
+ _decryptionRTCPBufferPtr =
+ new WebRtc_UWord8[kVoiceEngineMaxIpPacketSizeBytes];
+ }
+
+ // Perform decryption (SRTP or external).
+ WebRtc_Word32 decryptedBufferLength = 0;
+ _encryptionPtr->decrypt_rtcp(_channelId,
+ rtcpBufferPtr,
+ _decryptionRTCPBufferPtr,
+ rtcpBufferLength,
+ (int*)&decryptedBufferLength);
+ if (decryptedBufferLength <= 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_DECRYPTION_FAILED, kTraceError,
+ "Channel::IncomingRTCPPacket() decryption failed");
+ return;
+ }
+
+ // Replace default data buffer with decrypted buffer
+ rtcpBufferPtr = _decryptionRTCPBufferPtr;
+ rtcpBufferLength = decryptedBufferLength;
+ }
+ }
+
+ // Dump the RTCP packet to a file (if RTP dump is enabled).
+ if (_rtpDumpIn.DumpPacket(rtcpBufferPtr,
+ (WebRtc_UWord16)rtcpBufferLength) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::SendPacket() RTCP dump to input file failed");
+ }
+
+ // Deliver RTCP packet to RTP/RTCP module for parsing
+ if (_rtpRtcpModule->IncomingPacket((const WebRtc_UWord8*)rtcpBufferPtr,
+ (WebRtc_UWord16)rtcpBufferLength) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceWarning,
+ "Channel::IncomingRTPPacket() RTCP packet is invalid");
+ return;
+ }
+}
+
+void
+Channel::OnReceivedTelephoneEvent(const WebRtc_Word32 id,
+ const WebRtc_UWord8 event,
+ const bool endOfEvent)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::OnReceivedTelephoneEvent(id=%d, event=%u,"
+ " endOfEvent=%d)", id, event, endOfEvent);
+
+#ifdef WEBRTC_DTMF_DETECTION
+ if (_outOfBandTelephoneEventDetecion)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_telephoneEventDetectionPtr)
+ {
+ _telephoneEventDetectionPtr->OnReceivedTelephoneEventOutOfBand(
+ _channelId, event, endOfEvent);
+ }
+ }
+#endif
+}
+
+void
+Channel::OnPlayTelephoneEvent(const WebRtc_Word32 id,
+ const WebRtc_UWord8 event,
+ const WebRtc_UWord16 lengthMs,
+ const WebRtc_UWord8 volume)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::OnPlayTelephoneEvent(id=%d, event=%u, lengthMs=%u,"
+ " volume=%u)", id, event, lengthMs, volume);
+
+ if (!_playOutbandDtmfEvent || (event > 15))
+ {
+ // Ignore callback since feedback is disabled or event is not a
+ // Dtmf tone event.
+ return;
+ }
+
+ assert(_outputMixerPtr != NULL);
+
+ // Start playing out the Dtmf tone (if playout is enabled).
+ // Reduce length of tone with 80ms to the reduce risk of echo.
+ _outputMixerPtr->PlayDtmfTone(event, lengthMs - 80, volume);
+}
+
+void
+Channel::OnIncomingSSRCChanged(const WebRtc_Word32 id,
+ const WebRtc_UWord32 SSRC)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::OnIncomingSSRCChanged(id=%d, SSRC=%d)",
+ id, SSRC);
+
+ WebRtc_Word32 channel = VoEChannelId(id);
+ assert(channel == _channelId);
+
+ // Reset RTP-module counters since a new incoming RTP stream is detected
+ _rtpRtcpModule->ResetReceiveDataCountersRTP();
+ _rtpRtcpModule->ResetStatisticsRTP();
+
+ if (_rtpObserver)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_rtpObserverPtr)
+ {
+ // Send new SSRC to registered observer using callback
+ _rtpObserverPtr->OnIncomingSSRCChanged(channel, SSRC);
+ }
+ }
+}
+
+void Channel::OnIncomingCSRCChanged(const WebRtc_Word32 id,
+ const WebRtc_UWord32 CSRC,
+ const bool added)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::OnIncomingCSRCChanged(id=%d, CSRC=%d, added=%d)",
+ id, CSRC, added);
+
+ WebRtc_Word32 channel = VoEChannelId(id);
+ assert(channel == _channelId);
+
+ if (_rtpObserver)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_rtpObserverPtr)
+ {
+ _rtpObserverPtr->OnIncomingCSRCChanged(channel, CSRC, added);
+ }
+ }
+}
+
+void
+Channel::OnApplicationDataReceived(const WebRtc_Word32 id,
+ const WebRtc_UWord8 subType,
+ const WebRtc_UWord32 name,
+ const WebRtc_UWord16 length,
+ const WebRtc_UWord8* data)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::OnApplicationDataReceived(id=%d, subType=%u,"
+ " name=%u, length=%u)",
+ id, subType, name, length);
+
+ WebRtc_Word32 channel = VoEChannelId(id);
+ assert(channel == _channelId);
+
+ if (_rtcpObserver)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_rtcpObserverPtr)
+ {
+ _rtcpObserverPtr->OnApplicationDataReceived(channel,
+ subType,
+ name,
+ data,
+ length);
+ }
+ }
+}
+
+WebRtc_Word32
+Channel::OnInitializeDecoder(
+ const WebRtc_Word32 id,
+ const WebRtc_Word8 payloadType,
+ const char payloadName[RTP_PAYLOAD_NAME_SIZE],
+ const int frequency,
+ const WebRtc_UWord8 channels,
+ const WebRtc_UWord32 rate)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::OnInitializeDecoder(id=%d, payloadType=%d, "
+ "payloadName=%s, frequency=%u, channels=%u, rate=%u)",
+ id, payloadType, payloadName, frequency, channels, rate);
+
+ assert(VoEChannelId(id) == _channelId);
+
+ CodecInst receiveCodec = {0};
+ CodecInst dummyCodec = {0};
+
+ receiveCodec.pltype = payloadType;
+ receiveCodec.plfreq = frequency;
+ receiveCodec.channels = channels;
+ receiveCodec.rate = rate;
+ strncpy(receiveCodec.plname, payloadName, RTP_PAYLOAD_NAME_SIZE - 1);
+
+ _audioCodingModule.Codec(payloadName, dummyCodec, frequency, channels);
+ receiveCodec.pacsize = dummyCodec.pacsize;
+
+ // Register the new codec to the ACM
+ if (_audioCodingModule.RegisterReceiveCodec(receiveCodec) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Channel::OnInitializeDecoder() invalid codec ("
+ "pt=%d, name=%s) received - 1", payloadType, payloadName);
+ _engineStatisticsPtr->SetLastError(VE_AUDIO_CODING_MODULE_ERROR);
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+Channel::OnPacketTimeout(const WebRtc_Word32 id)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::OnPacketTimeout(id=%d)", id);
+
+ CriticalSectionScoped cs(_callbackCritSectPtr);
+ if (_voiceEngineObserverPtr)
+ {
+ if (_receiving || _externalTransport)
+ {
+ WebRtc_Word32 channel = VoEChannelId(id);
+ assert(channel == _channelId);
+ // Ensure that next OnReceivedPacket() callback will trigger
+ // a VE_PACKET_RECEIPT_RESTARTED callback.
+ _rtpPacketTimedOut = true;
+ // Deliver callback to the observer
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::OnPacketTimeout() => "
+ "CallbackOnError(VE_RECEIVE_PACKET_TIMEOUT)");
+ _voiceEngineObserverPtr->CallbackOnError(channel,
+ VE_RECEIVE_PACKET_TIMEOUT);
+ }
+ }
+}
+
+void
+Channel::OnReceivedPacket(const WebRtc_Word32 id,
+ const RtpRtcpPacketType packetType)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::OnReceivedPacket(id=%d, packetType=%d)",
+ id, packetType);
+
+ assert(VoEChannelId(id) == _channelId);
+
+ // Notify only for the case when we have restarted an RTP session.
+ if (_rtpPacketTimedOut && (kPacketRtp == packetType))
+ {
+ CriticalSectionScoped cs(_callbackCritSectPtr);
+ if (_voiceEngineObserverPtr)
+ {
+ WebRtc_Word32 channel = VoEChannelId(id);
+ assert(channel == _channelId);
+ // Reset timeout mechanism
+ _rtpPacketTimedOut = false;
+ // Deliver callback to the observer
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::OnPacketTimeout() =>"
+ " CallbackOnError(VE_PACKET_RECEIPT_RESTARTED)");
+ _voiceEngineObserverPtr->CallbackOnError(
+ channel,
+ VE_PACKET_RECEIPT_RESTARTED);
+ }
+ }
+}
+
+void
+Channel::OnPeriodicDeadOrAlive(const WebRtc_Word32 id,
+ const RTPAliveType alive)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::OnPeriodicDeadOrAlive(id=%d, alive=%d)", id, alive);
+
+ if (!_connectionObserver)
+ return;
+
+ WebRtc_Word32 channel = VoEChannelId(id);
+ assert(channel == _channelId);
+
+ // Use Alive as default to limit risk of false Dead detections
+ bool isAlive(true);
+
+ // Always mark the connection as Dead when the module reports kRtpDead
+ if (kRtpDead == alive)
+ {
+ isAlive = false;
+ }
+
+ // It is possible that the connection is alive even if no RTP packet has
+ // been received for a long time since the other side might use VAD/DTX
+ // and a low SID-packet update rate.
+ if ((kRtpNoRtp == alive) && _playing)
+ {
+ // Detect Alive for all NetEQ states except for the case when we are
+ // in PLC_CNG state.
+ // PLC_CNG <=> background noise only due to long expand or error.
+ // Note that, the case where the other side stops sending during CNG
+ // state will be detected as Alive. Dead is is not set until after
+ // missing RTCP packets for at least twelve seconds (handled
+ // internally by the RTP/RTCP module).
+ isAlive = (_outputSpeechType != AudioFrame::kPLCCNG);
+ }
+
+ UpdateDeadOrAliveCounters(isAlive);
+
+ // Send callback to the registered observer
+ if (_connectionObserver)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (_connectionObserverPtr)
+ {
+ _connectionObserverPtr->OnPeriodicDeadOrAlive(channel, isAlive);
+ }
+ }
+}
+
+WebRtc_Word32
+Channel::OnReceivedPayloadData(const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const WebRtcRTPHeader* rtpHeader)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::OnReceivedPayloadData(payloadSize=%d,"
+ " payloadType=%u, audioChannel=%u)",
+ payloadSize,
+ rtpHeader->header.payloadType,
+ rtpHeader->type.Audio.channel);
+
+ if (!_playing)
+ {
+ // Avoid inserting into NetEQ when we are not playing. Count the
+ // packet as discarded.
+ WEBRTC_TRACE(kTraceStream, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "received packet is discarded since playing is not"
+ " activated");
+ _numberOfDiscardedPackets++;
+ return 0;
+ }
+
+ // Push the incoming payload (parsed and ready for decoding) into the ACM
+ if (_audioCodingModule.IncomingPacket(payloadData,
+ payloadSize,
+ *rtpHeader) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceWarning,
+ "Channel::OnReceivedPayloadData() unable to push data to the ACM");
+ return -1;
+ }
+
+ // Update the packet delay
+ UpdatePacketDelay(rtpHeader->header.timestamp,
+ rtpHeader->header.sequenceNumber);
+
+ return 0;
+}
+
+WebRtc_Word32 Channel::GetAudioFrame(const WebRtc_Word32 id,
+ AudioFrame& audioFrame)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetAudioFrame(id=%d)", id);
+
+ // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
+ if (_audioCodingModule.PlayoutData10Ms(audioFrame.sample_rate_hz_,
+ audioFrame) == -1)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::GetAudioFrame() PlayoutData10Ms() failed!");
+ // In all likelihood, the audio in this frame is garbage. We return an
+ // error so that the audio mixer module doesn't add it to the mix. As
+ // a result, it won't be played out and the actions skipped here are
+ // irrelevant.
+ return -1;
+ }
+
+ if (_RxVadDetection)
+ {
+ UpdateRxVadDetection(audioFrame);
+ }
+
+ // Convert module ID to internal VoE channel ID
+ audioFrame.id_ = VoEChannelId(audioFrame.id_);
+ // Store speech type for dead-or-alive detection
+ _outputSpeechType = audioFrame.speech_type_;
+
+ // Perform far-end AudioProcessing module processing on the received signal
+ if (_rxApmIsEnabled)
+ {
+ ApmProcessRx(audioFrame);
+ }
+
+ // Output volume scaling
+ if (_outputGain < 0.99f || _outputGain > 1.01f)
+ {
+ AudioFrameOperations::ScaleWithSat(_outputGain, audioFrame);
+ }
+
+ // Scale left and/or right channel(s) if stereo and master balance is
+ // active
+
+ if (_panLeft != 1.0f || _panRight != 1.0f)
+ {
+ if (audioFrame.num_channels_ == 1)
+ {
+ // Emulate stereo mode since panning is active.
+ // The mono signal is copied to both left and right channels here.
+ AudioFrameOperations::MonoToStereo(&audioFrame);
+ }
+ // For true stereo mode (when we are receiving a stereo signal), no
+ // action is needed.
+
+ // Do the panning operation (the audio frame contains stereo at this
+ // stage)
+ AudioFrameOperations::Scale(_panLeft, _panRight, audioFrame);
+ }
+
+ // Mix decoded PCM output with file if file mixing is enabled
+ if (_outputFilePlaying)
+ {
+ MixAudioWithFile(audioFrame, audioFrame.sample_rate_hz_);
+ }
+
+ // Place channel in on-hold state (~muted) if on-hold is activated
+ if (_outputIsOnHold)
+ {
+ AudioFrameOperations::Mute(audioFrame);
+ }
+
+ // External media
+ if (_outputExternalMedia)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ const bool isStereo = (audioFrame.num_channels_ == 2);
+ if (_outputExternalMediaCallbackPtr)
+ {
+ _outputExternalMediaCallbackPtr->Process(
+ _channelId,
+ kPlaybackPerChannel,
+ (WebRtc_Word16*)audioFrame.data_,
+ audioFrame.samples_per_channel_,
+ audioFrame.sample_rate_hz_,
+ isStereo);
+ }
+ }
+
+ // Record playout if enabled
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ if (_outputFileRecording && _outputFileRecorderPtr)
+ {
+ _outputFileRecorderPtr->RecordAudioToFile(audioFrame);
+ }
+ }
+
+ // Measure audio level (0-9)
+ _outputAudioLevel.ComputeLevel(audioFrame);
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::NeededFrequency(const WebRtc_Word32 id)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::NeededFrequency(id=%d)", id);
+
+ int highestNeeded = 0;
+
+ // Determine highest needed receive frequency
+ WebRtc_Word32 receiveFrequency = _audioCodingModule.ReceiveFrequency();
+
+ // Return the bigger of playout and receive frequency in the ACM.
+ if (_audioCodingModule.PlayoutFrequency() > receiveFrequency)
+ {
+ highestNeeded = _audioCodingModule.PlayoutFrequency();
+ }
+ else
+ {
+ highestNeeded = receiveFrequency;
+ }
+
+ // Special case, if we're playing a file on the playout side
+ // we take that frequency into consideration as well
+ // This is not needed on sending side, since the codec will
+ // limit the spectrum anyway.
+ if (_outputFilePlaying)
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+ if (_outputFilePlayerPtr && _outputFilePlaying)
+ {
+ if(_outputFilePlayerPtr->Frequency()>highestNeeded)
+ {
+ highestNeeded=_outputFilePlayerPtr->Frequency();
+ }
+ }
+ }
+
+ return(highestNeeded);
+}
+
+WebRtc_Word32
+Channel::CreateChannel(Channel*& channel,
+ const WebRtc_Word32 channelId,
+ const WebRtc_UWord32 instanceId)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId,channelId),
+ "Channel::CreateChannel(channelId=%d, instanceId=%d)",
+ channelId, instanceId);
+
+ channel = new Channel(channelId, instanceId);
+ if (channel == NULL)
+ {
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice,
+ VoEId(instanceId,channelId),
+ "Channel::CreateChannel() unable to allocate memory for"
+ " channel");
+ return -1;
+ }
+ return 0;
+}
+
+void
+Channel::PlayNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::PlayNotification(id=%d, durationMs=%d)",
+ id, durationMs);
+
+ // Not implement yet
+}
+
+void
+Channel::RecordNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::RecordNotification(id=%d, durationMs=%d)",
+ id, durationMs);
+
+ // Not implement yet
+}
+
+void
+Channel::PlayFileEnded(const WebRtc_Word32 id)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::PlayFileEnded(id=%d)", id);
+
+ if (id == _inputFilePlayerId)
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ _inputFilePlaying = false;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::PlayFileEnded() => input file player module is"
+ " shutdown");
+ }
+ else if (id == _outputFilePlayerId)
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ _outputFilePlaying = false;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::PlayFileEnded() => output file player module is"
+ " shutdown");
+ }
+}
+
+void
+Channel::RecordFileEnded(const WebRtc_Word32 id)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::RecordFileEnded(id=%d)", id);
+
+ assert(id == _outputFileRecorderId);
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ _outputFileRecording = false;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::RecordFileEnded() => output file recorder module is"
+ " shutdown");
+}
+
+Channel::Channel(const WebRtc_Word32 channelId,
+ const WebRtc_UWord32 instanceId) :
+ _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _instanceId(instanceId),
+ _channelId(channelId),
+ _audioCodingModule(*AudioCodingModule::Create(
+ VoEModuleId(instanceId, channelId))),
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ _numSocketThreads(KNumSocketThreads),
+ _socketTransportModule(*UdpTransport::Create(
+ VoEModuleId(instanceId, channelId), _numSocketThreads)),
+#endif
+#ifdef WEBRTC_SRTP
+ _srtpModule(*SrtpModule::CreateSrtpModule(VoEModuleId(instanceId,
+ channelId))),
+#endif
+ _rtpDumpIn(*RtpDump::CreateRtpDump()),
+ _rtpDumpOut(*RtpDump::CreateRtpDump()),
+ _outputAudioLevel(),
+ _externalTransport(false),
+ _inputFilePlayerPtr(NULL),
+ _outputFilePlayerPtr(NULL),
+ _outputFileRecorderPtr(NULL),
+ // Avoid conflict with other channels by adding 1024 - 1026,
+ // won't use as much as 1024 channels.
+ _inputFilePlayerId(VoEModuleId(instanceId, channelId) + 1024),
+ _outputFilePlayerId(VoEModuleId(instanceId, channelId) + 1025),
+ _outputFileRecorderId(VoEModuleId(instanceId, channelId) + 1026),
+ _inputFilePlaying(false),
+ _outputFilePlaying(false),
+ _outputFileRecording(false),
+ _inbandDtmfQueue(VoEModuleId(instanceId, channelId)),
+ _inbandDtmfGenerator(VoEModuleId(instanceId, channelId)),
+ _inputExternalMedia(false),
+ _outputExternalMedia(false),
+ _inputExternalMediaCallbackPtr(NULL),
+ _outputExternalMediaCallbackPtr(NULL),
+ _encryptionRTPBufferPtr(NULL),
+ _decryptionRTPBufferPtr(NULL),
+ _encryptionRTCPBufferPtr(NULL),
+ _decryptionRTCPBufferPtr(NULL),
+ _timeStamp(0), // This is just an offset, RTP module will add it's own random offset
+ _sendTelephoneEventPayloadType(106),
+ _playoutTimeStampRTP(0),
+ _playoutTimeStampRTCP(0),
+ _numberOfDiscardedPackets(0),
+ _engineStatisticsPtr(NULL),
+ _outputMixerPtr(NULL),
+ _transmitMixerPtr(NULL),
+ _moduleProcessThreadPtr(NULL),
+ _audioDeviceModulePtr(NULL),
+ _voiceEngineObserverPtr(NULL),
+ _callbackCritSectPtr(NULL),
+ _transportPtr(NULL),
+ _encryptionPtr(NULL),
+ _rtpAudioProc(NULL),
+ _rxAudioProcessingModulePtr(NULL),
+#ifdef WEBRTC_DTMF_DETECTION
+ _telephoneEventDetectionPtr(NULL),
+#endif
+ _rxVadObserverPtr(NULL),
+ _oldVadDecision(-1),
+ _sendFrameType(0),
+ _rtpObserverPtr(NULL),
+ _rtcpObserverPtr(NULL),
+ _outputIsOnHold(false),
+ _externalPlayout(false),
+ _inputIsOnHold(false),
+ _playing(false),
+ _sending(false),
+ _receiving(false),
+ _mixFileWithMicrophone(false),
+ _rtpObserver(false),
+ _rtcpObserver(false),
+ _mute(false),
+ _panLeft(1.0f),
+ _panRight(1.0f),
+ _outputGain(1.0f),
+ _encrypting(false),
+ _decrypting(false),
+ _playOutbandDtmfEvent(false),
+ _playInbandDtmfEvent(false),
+ _inbandTelephoneEventDetection(false),
+ _outOfBandTelephoneEventDetecion(false),
+ _extraPayloadType(0),
+ _insertExtraRTPPacket(false),
+ _extraMarkerBit(false),
+ _lastLocalTimeStamp(0),
+ _lastPayloadType(0),
+ _includeAudioLevelIndication(false),
+ _rtpPacketTimedOut(false),
+ _rtpPacketTimeOutIsEnabled(false),
+ _rtpTimeOutSeconds(0),
+ _connectionObserver(false),
+ _connectionObserverPtr(NULL),
+ _countAliveDetections(0),
+ _countDeadDetections(0),
+ _outputSpeechType(AudioFrame::kNormalSpeech),
+ _averageDelayMs(0),
+ _previousSequenceNumber(0),
+ _previousTimestamp(0),
+ _recPacketDelayMs(20),
+ _RxVadDetection(false),
+ _rxApmIsEnabled(false),
+ _rxAgcIsEnabled(false),
+ _rxNsIsEnabled(false)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::Channel() - ctor");
+ _inbandDtmfQueue.ResetDtmf();
+ _inbandDtmfGenerator.Init();
+ _outputAudioLevel.Clear();
+
+ RtpRtcp::Configuration configuration;
+ configuration.id = VoEModuleId(instanceId, channelId);
+ configuration.audio = true;
+ configuration.incoming_data = this;
+ configuration.incoming_messages = this;
+ configuration.outgoing_transport = this;
+ configuration.rtcp_feedback = this;
+ configuration.audio_messages = this;
+
+ _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
+
+ // Create far end AudioProcessing Module
+ _rxAudioProcessingModulePtr = AudioProcessing::Create(
+ VoEModuleId(instanceId, channelId));
+}
+
+Channel::~Channel()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::~Channel() - dtor");
+
+ if (_outputExternalMedia)
+ {
+ DeRegisterExternalMediaProcessing(kPlaybackPerChannel);
+ }
+ if (_inputExternalMedia)
+ {
+ DeRegisterExternalMediaProcessing(kRecordingPerChannel);
+ }
+ StopSend();
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ StopReceiving();
+ // De-register packet callback to ensure we're not in a callback when
+ // deleting channel state, avoids race condition and deadlock.
+ if (_socketTransportModule.InitializeReceiveSockets(NULL, 0, NULL, NULL, 0)
+ != 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "~Channel() failed to de-register receive callback");
+ }
+#endif
+ StopPlayout();
+
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+ if (_inputFilePlayerPtr)
+ {
+ _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+ _inputFilePlayerPtr->StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+ _inputFilePlayerPtr = NULL;
+ }
+ if (_outputFilePlayerPtr)
+ {
+ _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+ _outputFilePlayerPtr->StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+ _outputFilePlayerPtr = NULL;
+ }
+ if (_outputFileRecorderPtr)
+ {
+ _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+ _outputFileRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ }
+ }
+
+ // The order to safely shutdown modules in a channel is:
+ // 1. De-register callbacks in modules
+ // 2. De-register modules in process thread
+ // 3. Destroy modules
+ if (_audioCodingModule.RegisterTransportCallback(NULL) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "~Channel() failed to de-register transport callback"
+ " (Audio coding module)");
+ }
+ if (_audioCodingModule.RegisterVADCallback(NULL) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "~Channel() failed to de-register VAD callback"
+ " (Audio coding module)");
+ }
+#ifdef WEBRTC_DTMF_DETECTION
+ if (_audioCodingModule.RegisterIncomingMessagesCallback(NULL) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "~Channel() failed to de-register incoming messages "
+ "callback (Audio coding module)");
+ }
+#endif
+ // De-register modules in process thread
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (_moduleProcessThreadPtr->DeRegisterModule(&_socketTransportModule)
+ == -1)
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "~Channel() failed to deregister socket module");
+ }
+#endif
+ if (_moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()) == -1)
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "~Channel() failed to deregister RTP/RTCP module");
+ }
+
+ // Destroy modules
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ UdpTransport::Destroy(
+ &_socketTransportModule);
+#endif
+ AudioCodingModule::Destroy(&_audioCodingModule);
+#ifdef WEBRTC_SRTP
+ SrtpModule::DestroySrtpModule(&_srtpModule);
+#endif
+ if (_rxAudioProcessingModulePtr != NULL)
+ {
+ AudioProcessing::Destroy(_rxAudioProcessingModulePtr); // far end APM
+ _rxAudioProcessingModulePtr = NULL;
+ }
+
+ // End of modules shutdown
+
+ // Delete other objects
+ RtpDump::DestroyRtpDump(&_rtpDumpIn);
+ RtpDump::DestroyRtpDump(&_rtpDumpOut);
+ delete [] _encryptionRTPBufferPtr;
+ delete [] _decryptionRTPBufferPtr;
+ delete [] _encryptionRTCPBufferPtr;
+ delete [] _decryptionRTCPBufferPtr;
+ delete &_callbackCritSect;
+ delete &_fileCritSect;
+}
+
+WebRtc_Word32
+Channel::Init()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::Init()");
+
+ // --- Initial sanity
+
+ if ((_engineStatisticsPtr == NULL) ||
+ (_moduleProcessThreadPtr == NULL))
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::Init() must call SetEngineInformation() first");
+ return -1;
+ }
+
+ // --- Add modules to process thread (for periodic schedulation)
+
+ const bool processThreadFail =
+ ((_moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get()) != 0) ||
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ (_moduleProcessThreadPtr->RegisterModule(
+ &_socketTransportModule) != 0));
+#else
+ false);
+#endif
+ if (processThreadFail)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CANNOT_INIT_CHANNEL, kTraceError,
+ "Channel::Init() modules not registered");
+ return -1;
+ }
+ // --- ACM initialization
+
+ if ((_audioCodingModule.InitializeReceiver() == -1) ||
+#ifdef WEBRTC_CODEC_AVT
+ // out-of-band Dtmf tones are played out by default
+ (_audioCodingModule.SetDtmfPlayoutStatus(true) == -1) ||
+#endif
+ (_audioCodingModule.InitializeSender() == -1))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "Channel::Init() unable to initialize the ACM - 1");
+ return -1;
+ }
+
+ // --- RTP/RTCP module initialization
+
+ // Ensure that RTCP is enabled by default for the created channel.
+ // Note that, the module will keep generating RTCP until it is explicitly
+ // disabled by the user.
+ // After StopListen (when no sockets exists), RTCP packets will no longer
+ // be transmitted since the Transport object will then be invalid.
+
+ const bool rtpRtcpFail =
+ ((_rtpRtcpModule->SetTelephoneEventStatus(false, true, true) == -1) ||
+ // RTCP is enabled by default
+ (_rtpRtcpModule->SetRTCPStatus(kRtcpCompound) == -1));
+ if (rtpRtcpFail)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "Channel::Init() RTP/RTCP module not initialized");
+ return -1;
+ }
+
+ // --- Register all permanent callbacks
+ const bool fail =
+ (_audioCodingModule.RegisterTransportCallback(this) == -1) ||
+ (_audioCodingModule.RegisterVADCallback(this) == -1);
+
+ if (fail)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CANNOT_INIT_CHANNEL, kTraceError,
+ "Channel::Init() callbacks not registered");
+ return -1;
+ }
+
+ // --- Register all supported codecs to the receiving side of the
+ // RTP/RTCP module
+
+ CodecInst codec;
+ const WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
+
+ for (int idx = 0; idx < nSupportedCodecs; idx++)
+ {
+ // Open up the RTP/RTCP receiver for all supported codecs
+ if ((_audioCodingModule.Codec(idx, codec) == -1) ||
+ (_rtpRtcpModule->RegisterReceivePayload(codec) == -1))
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::Init() unable to register %s (%d/%d/%d/%d) "
+ "to RTP/RTCP receiver",
+ codec.plname, codec.pltype, codec.plfreq,
+ codec.channels, codec.rate);
+ }
+ else
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::Init() %s (%d/%d/%d/%d) has been added to "
+ "the RTP/RTCP receiver",
+ codec.plname, codec.pltype, codec.plfreq,
+ codec.channels, codec.rate);
+ }
+
+ // Ensure that PCMU is used as default codec on the sending side
+ if (!STR_CASE_CMP(codec.plname, "PCMU") && (codec.channels == 1))
+ {
+ SetSendCodec(codec);
+ }
+
+ // Register default PT for outband 'telephone-event'
+ if (!STR_CASE_CMP(codec.plname, "telephone-event"))
+ {
+ if ((_rtpRtcpModule->RegisterSendPayload(codec) == -1) ||
+ (_audioCodingModule.RegisterReceiveCodec(codec) == -1))
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::Init() failed to register outband "
+ "'telephone-event' (%d/%d) correctly",
+ codec.pltype, codec.plfreq);
+ }
+ }
+
+ if (!STR_CASE_CMP(codec.plname, "CN"))
+ {
+ if ((_audioCodingModule.RegisterSendCodec(codec) == -1) ||
+ (_audioCodingModule.RegisterReceiveCodec(codec) == -1) ||
+ (_rtpRtcpModule->RegisterSendPayload(codec) == -1))
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::Init() failed to register CN (%d/%d) "
+ "correctly - 1",
+ codec.pltype, codec.plfreq);
+ }
+ }
+#ifdef WEBRTC_CODEC_RED
+ // Register RED to the receiving side of the ACM.
+ // We will not receive an OnInitializeDecoder() callback for RED.
+ if (!STR_CASE_CMP(codec.plname, "RED"))
+ {
+ if (_audioCodingModule.RegisterReceiveCodec(codec) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::Init() failed to register RED (%d/%d) "
+ "correctly",
+ codec.pltype, codec.plfreq);
+ }
+ }
+#endif
+ }
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ // Ensure that the WebRtcSocketTransport implementation is used as
+ // Transport on the sending side
+ {
+ // A lock is needed here since users can call
+ // RegisterExternalTransport() at the same time.
+ CriticalSectionScoped cs(&_callbackCritSect);
+ _transportPtr = &_socketTransportModule;
+ }
+#endif
+
+ // Initialize the far end AP module
+ // Using 8 kHz as initial Fs, the same as in transmission. Might be
+ // changed at the first receiving audio.
+ if (_rxAudioProcessingModulePtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_NO_MEMORY, kTraceCritical,
+ "Channel::Init() failed to create the far-end AudioProcessing"
+ " module");
+ return -1;
+ }
+
+ if (_rxAudioProcessingModulePtr->set_sample_rate_hz(8000))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceWarning,
+ "Channel::Init() failed to set the sample rate to 8K for"
+ " far-end AP module");
+ }
+
+ if (_rxAudioProcessingModulePtr->set_num_channels(1, 1) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOUNDCARD_ERROR, kTraceWarning,
+ "Init() failed to set channels for the primary audio stream");
+ }
+
+ if (_rxAudioProcessingModulePtr->high_pass_filter()->Enable(
+ WEBRTC_VOICE_ENGINE_RX_HP_DEFAULT_STATE) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceWarning,
+ "Channel::Init() failed to set the high-pass filter for"
+ " far-end AP module");
+ }
+
+ if (_rxAudioProcessingModulePtr->noise_suppression()->set_level(
+ (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_MODE) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceWarning,
+ "Init() failed to set noise reduction level for far-end"
+ " AP module");
+ }
+ if (_rxAudioProcessingModulePtr->noise_suppression()->Enable(
+ WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_STATE) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceWarning,
+ "Init() failed to set noise reduction state for far-end"
+ " AP module");
+ }
+
+ if (_rxAudioProcessingModulePtr->gain_control()->set_mode(
+ (GainControl::Mode)WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_MODE) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceWarning,
+ "Init() failed to set AGC mode for far-end AP module");
+ }
+ if (_rxAudioProcessingModulePtr->gain_control()->Enable(
+ WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_STATE) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceWarning,
+ "Init() failed to set AGC state for far-end AP module");
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SetEngineInformation(Statistics& engineStatistics,
+ OutputMixer& outputMixer,
+ voe::TransmitMixer& transmitMixer,
+ ProcessThread& moduleProcessThread,
+ AudioDeviceModule& audioDeviceModule,
+ VoiceEngineObserver* voiceEngineObserver,
+ CriticalSectionWrapper* callbackCritSect)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetEngineInformation()");
+ _engineStatisticsPtr = &engineStatistics;
+ _outputMixerPtr = &outputMixer;
+ _transmitMixerPtr = &transmitMixer,
+ _moduleProcessThreadPtr = &moduleProcessThread;
+ _audioDeviceModulePtr = &audioDeviceModule;
+ _voiceEngineObserverPtr = voiceEngineObserver;
+ _callbackCritSectPtr = callbackCritSect;
+ return 0;
+}
+
+WebRtc_Word32
+Channel::UpdateLocalTimeStamp()
+{
+
+ _timeStamp += _audioFrame.samples_per_channel_;
+ return 0;
+}
+
+WebRtc_Word32
+Channel::StartPlayout()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StartPlayout()");
+ if (_playing)
+ {
+ return 0;
+ }
+ // Add participant as candidates for mixing.
+ if (_outputMixerPtr->SetMixabilityStatus(*this, true) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
+ "StartPlayout() failed to add participant to mixer");
+ return -1;
+ }
+
+ _playing = true;
+
+ if (RegisterFilePlayingToMixer() != 0)
+ return -1;
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::StopPlayout()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StopPlayout()");
+ if (!_playing)
+ {
+ return 0;
+ }
+ // Remove participant as candidates for mixing
+ if (_outputMixerPtr->SetMixabilityStatus(*this, false) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
+ "StartPlayout() failed to remove participant from mixer");
+ return -1;
+ }
+
+ _playing = false;
+ _outputAudioLevel.Clear();
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::StartSend()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StartSend()");
+ {
+ // A lock is needed because |_sending| can be accessed or modified by
+ // another thread at the same time.
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_sending)
+ {
+ return 0;
+ }
+ _sending = true;
+ }
+
+ if (_rtpRtcpModule->SetSendingStatus(true) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "StartSend() RTP/RTCP failed to start sending");
+ CriticalSectionScoped cs(&_callbackCritSect);
+ _sending = false;
+ return -1;
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::StopSend()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StopSend()");
+ {
+ // A lock is needed because |_sending| can be accessed or modified by
+ // another thread at the same time.
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (!_sending)
+ {
+ return 0;
+ }
+ _sending = false;
+ }
+
+ // Reset sending SSRC and sequence number and triggers direct transmission
+ // of RTCP BYE
+ if (_rtpRtcpModule->SetSendingStatus(false) == -1 ||
+ _rtpRtcpModule->ResetSendDataCountersRTP() == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
+ "StartSend() RTP/RTCP failed to stop sending");
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::StartReceiving()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StartReceiving()");
+ if (_receiving)
+ {
+ return 0;
+ }
+ // If external transport is used, we will only initialize/set the variables
+ // after this section, since we are not using the WebRtc transport but
+ // still need to keep track of e.g. if we are receiving.
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_externalTransport)
+ {
+ if (!_socketTransportModule.ReceiveSocketsInitialized())
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKETS_NOT_INITED, kTraceError,
+ "StartReceive() must set local receiver first");
+ return -1;
+ }
+ if (_socketTransportModule.StartReceiving(KNumberOfSocketBuffers) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+ "StartReceiving() failed to start receiving");
+ return -1;
+ }
+ }
+#endif
+ _receiving = true;
+ _numberOfDiscardedPackets = 0;
+ return 0;
+}
+
+WebRtc_Word32
+Channel::StopReceiving()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StopReceiving()");
+ if (!_receiving)
+ {
+ return 0;
+ }
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_externalTransport &&
+ _socketTransportModule.ReceiveSocketsInitialized())
+ {
+ if (_socketTransportModule.StopReceiving() != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+ "StopReceiving() failed to stop receiving.");
+ return -1;
+ }
+ }
+#endif
+ bool dtmfDetection = _rtpRtcpModule->TelephoneEvent();
+ // Recover DTMF detection status.
+ WebRtc_Word32 ret = _rtpRtcpModule->SetTelephoneEventStatus(dtmfDetection,
+ true, true);
+ if (ret != 0) {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "StopReceiving() failed to restore telephone-event status.");
+ }
+ RegisterReceiveCodecsToRTPModule();
+ _receiving = false;
+ return 0;
+}
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32
+Channel::SetLocalReceiver(const WebRtc_UWord16 rtpPort,
+ const WebRtc_UWord16 rtcpPort,
+ const char ipAddr[64],
+ const char multicastIpAddr[64])
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetLocalReceiver()");
+
+ if (_externalTransport)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "SetLocalReceiver() conflict with external transport");
+ return -1;
+ }
+
+ if (_sending)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ALREADY_SENDING, kTraceError,
+ "SetLocalReceiver() already sending");
+ return -1;
+ }
+ if (_receiving)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ALREADY_LISTENING, kTraceError,
+ "SetLocalReceiver() already receiving");
+ return -1;
+ }
+
+ if (_socketTransportModule.InitializeReceiveSockets(this,
+ rtpPort,
+ ipAddr,
+ multicastIpAddr,
+ rtcpPort) != 0)
+ {
+ UdpTransport::ErrorCode lastSockError(
+ _socketTransportModule.LastError());
+ switch (lastSockError)
+ {
+ case UdpTransport::kIpAddressInvalid:
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_IP_ADDRESS, kTraceError,
+ "SetLocalReceiver() invalid IP address");
+ break;
+ case UdpTransport::kSocketInvalid:
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_ERROR, kTraceError,
+ "SetLocalReceiver() invalid socket");
+ break;
+ case UdpTransport::kPortInvalid:
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_PORT_NMBR, kTraceError,
+ "SetLocalReceiver() invalid port");
+ break;
+ case UdpTransport::kFailedToBindPort:
+ _engineStatisticsPtr->SetLastError(
+ VE_BINDING_SOCKET_TO_LOCAL_ADDRESS_FAILED, kTraceError,
+ "SetLocalReceiver() binding failed");
+ break;
+ default:
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_ERROR, kTraceError,
+ "SetLocalReceiver() undefined socket error");
+ break;
+ }
+ return -1;
+ }
+ return 0;
+}
+#endif
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32
+Channel::GetLocalReceiver(int& port, int& RTCPport, char ipAddr[64])
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetLocalReceiver()");
+
+ if (_externalTransport)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "SetLocalReceiver() conflict with external transport");
+ return -1;
+ }
+
+ char ipAddrTmp[UdpTransport::kIpAddressVersion6Length] = {0};
+ WebRtc_UWord16 rtpPort(0);
+ WebRtc_UWord16 rtcpPort(0);
+ char multicastIpAddr[UdpTransport::kIpAddressVersion6Length] = {0};
+
+ // Acquire socket information from the socket module
+ if (_socketTransportModule.ReceiveSocketInformation(ipAddrTmp,
+ rtpPort,
+ rtcpPort,
+ multicastIpAddr) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CANNOT_GET_SOCKET_INFO, kTraceError,
+ "GetLocalReceiver() unable to retrieve socket information");
+ return -1;
+ }
+
+ // Deliver valid results to the user
+ port = static_cast<int> (rtpPort);
+ RTCPport = static_cast<int> (rtcpPort);
+ if (ipAddr != NULL)
+ {
+ strcpy(ipAddr, ipAddrTmp);
+ }
+ return 0;
+}
+#endif
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32
+Channel::SetSendDestination(const WebRtc_UWord16 rtpPort,
+ const char ipAddr[64],
+ const int sourcePort,
+ const WebRtc_UWord16 rtcpPort)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetSendDestination()");
+
+ if (_externalTransport)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "SetSendDestination() conflict with external transport");
+ return -1;
+ }
+
+ // Initialize ports and IP address for the remote (destination) side.
+ // By default, the sockets used for receiving are used for transmission as
+ // well, hence the source ports for outgoing packets are the same as the
+ // receiving ports specified in SetLocalReceiver.
+ // If an extra send socket has been created, it will be utilized until a
+ // new source port is specified or until the channel has been deleted and
+ // recreated. If no socket exists, sockets will be created when the first
+ // RTP and RTCP packets shall be transmitted (see e.g.
+ // UdpTransportImpl::SendPacket()).
+ //
+ // NOTE: this function does not require that sockets exists; all it does is
+ // to build send structures to be used with the sockets when they exist.
+ // It is therefore possible to call this method before SetLocalReceiver.
+ // However, sockets must exist if a multi-cast address is given as input.
+
+ // Build send structures and enable QoS (if enabled and supported)
+ if (_socketTransportModule.InitializeSendSockets(
+ ipAddr, rtpPort, rtcpPort) != UdpTransport::kNoSocketError)
+ {
+ UdpTransport::ErrorCode lastSockError(
+ _socketTransportModule.LastError());
+ switch (lastSockError)
+ {
+ case UdpTransport::kIpAddressInvalid:
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_IP_ADDRESS, kTraceError,
+ "SetSendDestination() invalid IP address 1");
+ break;
+ case UdpTransport::kSocketInvalid:
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_ERROR, kTraceError,
+ "SetSendDestination() invalid socket 1");
+ break;
+ case UdpTransport::kQosError:
+ _engineStatisticsPtr->SetLastError(
+ VE_GQOS_ERROR, kTraceError,
+ "SetSendDestination() failed to set QoS");
+ break;
+ case UdpTransport::kMulticastAddressInvalid:
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_MULTICAST_ADDRESS, kTraceError,
+ "SetSendDestination() invalid multicast address");
+ break;
+ default:
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_ERROR, kTraceError,
+ "SetSendDestination() undefined socket error 1");
+ break;
+ }
+ return -1;
+ }
+
+ // Check if the user has specified a non-default source port different from
+ // the local receive port.
+ // If so, an extra local socket will be created unless the source port is
+ // not unique.
+ if (sourcePort != kVoEDefault)
+ {
+ WebRtc_UWord16 receiverRtpPort(0);
+ WebRtc_UWord16 rtcpNA(0);
+ if (_socketTransportModule.ReceiveSocketInformation(NULL,
+ receiverRtpPort,
+ rtcpNA,
+ NULL) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CANNOT_GET_SOCKET_INFO, kTraceError,
+ "SetSendDestination() failed to retrieve socket information");
+ return -1;
+ }
+
+ WebRtc_UWord16 sourcePortUW16 =
+ static_cast<WebRtc_UWord16> (sourcePort);
+
+ // An extra socket will only be created if the specified source port
+ // differs from the local receive port.
+ if (sourcePortUW16 != receiverRtpPort)
+ {
+ // Initialize extra local socket to get a different source port
+ // than the local
+ // receiver port. Always use default source for RTCP.
+ // Note that, this calls UdpTransport::CloseSendSockets().
+ if (_socketTransportModule.InitializeSourcePorts(
+ sourcePortUW16,
+ sourcePortUW16+1) != 0)
+ {
+ UdpTransport::ErrorCode lastSockError(
+ _socketTransportModule.LastError());
+ switch (lastSockError)
+ {
+ case UdpTransport::kIpAddressInvalid:
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_IP_ADDRESS, kTraceError,
+ "SetSendDestination() invalid IP address 2");
+ break;
+ case UdpTransport::kSocketInvalid:
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_ERROR, kTraceError,
+ "SetSendDestination() invalid socket 2");
+ break;
+ default:
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_ERROR, kTraceError,
+ "SetSendDestination() undefined socket error 2");
+ break;
+ }
+ return -1;
+ }
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "SetSendDestination() extra local socket is created"
+ " to facilitate unique source port");
+ }
+ else
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "SetSendDestination() sourcePort equals the local"
+ " receive port => no extra socket is created");
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32
+Channel::GetSendDestination(int& port,
+ char ipAddr[64],
+ int& sourcePort,
+ int& RTCPport)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetSendDestination()");
+
+ if (_externalTransport)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "GetSendDestination() conflict with external transport");
+ return -1;
+ }
+
+ char ipAddrTmp[UdpTransport::kIpAddressVersion6Length] = {0};
+ WebRtc_UWord16 rtpPort(0);
+ WebRtc_UWord16 rtcpPort(0);
+ WebRtc_UWord16 rtpSourcePort(0);
+ WebRtc_UWord16 rtcpSourcePort(0);
+
+ // Acquire sending socket information from the socket module
+ _socketTransportModule.SendSocketInformation(ipAddrTmp, rtpPort, rtcpPort);
+ _socketTransportModule.SourcePorts(rtpSourcePort, rtcpSourcePort);
+
+ // Deliver valid results to the user
+ port = static_cast<int> (rtpPort);
+ RTCPport = static_cast<int> (rtcpPort);
+ sourcePort = static_cast<int> (rtpSourcePort);
+ if (ipAddr != NULL)
+ {
+ strcpy(ipAddr, ipAddrTmp);
+ }
+
+ return 0;
+}
+#endif
+
+
+WebRtc_Word32
+Channel::SetNetEQPlayoutMode(NetEqModes mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetNetEQPlayoutMode()");
+ AudioPlayoutMode playoutMode(voice);
+ switch (mode)
+ {
+ case kNetEqDefault:
+ playoutMode = voice;
+ break;
+ case kNetEqStreaming:
+ playoutMode = streaming;
+ break;
+ case kNetEqFax:
+ playoutMode = fax;
+ break;
+ }
+ if (_audioCodingModule.SetPlayoutMode(playoutMode) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetNetEQPlayoutMode() failed to set playout mode");
+ return -1;
+ }
+ return 0;
+}
+
+WebRtc_Word32
+Channel::GetNetEQPlayoutMode(NetEqModes& mode)
+{
+ const AudioPlayoutMode playoutMode = _audioCodingModule.PlayoutMode();
+ switch (playoutMode)
+ {
+ case voice:
+ mode = kNetEqDefault;
+ break;
+ case streaming:
+ mode = kNetEqStreaming;
+ break;
+ case fax:
+ mode = kNetEqFax;
+ break;
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "Channel::GetNetEQPlayoutMode() => mode=%u", mode);
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SetNetEQBGNMode(NetEqBgnModes mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetNetEQPlayoutMode()");
+ ACMBackgroundNoiseMode noiseMode(On);
+ switch (mode)
+ {
+ case kBgnOn:
+ noiseMode = On;
+ break;
+ case kBgnFade:
+ noiseMode = Fade;
+ break;
+ case kBgnOff:
+ noiseMode = Off;
+ break;
+ }
+ if (_audioCodingModule.SetBackgroundNoiseMode(noiseMode) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetBackgroundNoiseMode() failed to set noise mode");
+ return -1;
+ }
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SetOnHoldStatus(bool enable, OnHoldModes mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetOnHoldStatus()");
+ if (mode == kHoldSendAndPlay)
+ {
+ _outputIsOnHold = enable;
+ _inputIsOnHold = enable;
+ }
+ else if (mode == kHoldPlayOnly)
+ {
+ _outputIsOnHold = enable;
+ }
+ if (mode == kHoldSendOnly)
+ {
+ _inputIsOnHold = enable;
+ }
+ return 0;
+}
+
+WebRtc_Word32
+Channel::GetOnHoldStatus(bool& enabled, OnHoldModes& mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetOnHoldStatus()");
+ enabled = (_outputIsOnHold || _inputIsOnHold);
+ if (_outputIsOnHold && _inputIsOnHold)
+ {
+ mode = kHoldSendAndPlay;
+ }
+ else if (_outputIsOnHold && !_inputIsOnHold)
+ {
+ mode = kHoldPlayOnly;
+ }
+ else if (!_outputIsOnHold && _inputIsOnHold)
+ {
+ mode = kHoldSendOnly;
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetOnHoldStatus() => enabled=%d, mode=%d",
+ enabled, mode);
+ return 0;
+}
+
+WebRtc_Word32
+Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::RegisterVoiceEngineObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_voiceEngineObserverPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "RegisterVoiceEngineObserver() observer already enabled");
+ return -1;
+ }
+ _voiceEngineObserverPtr = &observer;
+ return 0;
+}
+
+WebRtc_Word32
+Channel::DeRegisterVoiceEngineObserver()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::DeRegisterVoiceEngineObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (!_voiceEngineObserverPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "DeRegisterVoiceEngineObserver() observer already disabled");
+ return 0;
+ }
+ _voiceEngineObserverPtr = NULL;
+ return 0;
+}
+
+WebRtc_Word32
+Channel::GetNetEQBGNMode(NetEqBgnModes& mode)
+{
+ ACMBackgroundNoiseMode noiseMode(On);
+ _audioCodingModule.BackgroundNoiseMode(noiseMode);
+ switch (noiseMode)
+ {
+ case On:
+ mode = kBgnOn;
+ break;
+ case Fade:
+ mode = kBgnFade;
+ break;
+ case Off:
+ mode = kBgnOff;
+ break;
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetNetEQBGNMode() => mode=%u", mode);
+ return 0;
+}
+
+WebRtc_Word32
+Channel::GetSendCodec(CodecInst& codec)
+{
+ return (_audioCodingModule.SendCodec(codec));
+}
+
+WebRtc_Word32
+Channel::GetRecCodec(CodecInst& codec)
+{
+ return (_audioCodingModule.ReceiveCodec(codec));
+}
+
+WebRtc_Word32
+Channel::SetSendCodec(const CodecInst& codec)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetSendCodec()");
+
+ if (_audioCodingModule.RegisterSendCodec(codec) != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+ "SetSendCodec() failed to register codec to ACM");
+ return -1;
+ }
+
+ if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
+ {
+ _rtpRtcpModule->DeRegisterSendPayload(codec.pltype);
+ if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
+ {
+ WEBRTC_TRACE(
+ kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+ "SetSendCodec() failed to register codec to"
+ " RTP/RTCP module");
+ return -1;
+ }
+ }
+
+ if (_rtpRtcpModule->SetAudioPacketSize(codec.pacsize) != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+ "SetSendCodec() failed to set audio packet size");
+ return -1;
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SetVADStatus(bool enableVAD, ACMVADMode mode, bool disableDTX)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetVADStatus(mode=%d)", mode);
+ // To disable VAD, DTX must be disabled too
+ disableDTX = ((enableVAD == false) ? true : disableDTX);
+ if (_audioCodingModule.SetVAD(!disableDTX, enableVAD, mode) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetVADStatus() failed to set VAD");
+ return -1;
+ }
+ return 0;
+}
+
+WebRtc_Word32
+Channel::GetVADStatus(bool& enabledVAD, ACMVADMode& mode, bool& disabledDTX)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetVADStatus");
+ if (_audioCodingModule.VAD(disabledDTX, enabledVAD, mode) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "GetVADStatus() failed to get VAD status");
+ return -1;
+ }
+ disabledDTX = !disabledDTX;
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SetRecPayloadType(const CodecInst& codec)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetRecPayloadType()");
+
+ if (_playing)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ALREADY_PLAYING, kTraceError,
+ "SetRecPayloadType() unable to set PT while playing");
+ return -1;
+ }
+ if (_receiving)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ALREADY_LISTENING, kTraceError,
+ "SetRecPayloadType() unable to set PT while listening");
+ return -1;
+ }
+
+ if (codec.pltype == -1)
+ {
+ // De-register the selected codec (RTP/RTCP module and ACM)
+
+ WebRtc_Word8 pltype(-1);
+ CodecInst rxCodec = codec;
+
+ // Get payload type for the given codec
+ _rtpRtcpModule->ReceivePayloadType(rxCodec, &pltype);
+ rxCodec.pltype = pltype;
+
+ if (_rtpRtcpModule->DeRegisterReceivePayload(pltype) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR,
+ kTraceError,
+ "SetRecPayloadType() RTP/RTCP-module deregistration "
+ "failed");
+ return -1;
+ }
+ if (_audioCodingModule.UnregisterReceiveCodec(rxCodec.pltype) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetRecPayloadType() ACM deregistration failed - 1");
+ return -1;
+ }
+ return 0;
+ }
+
+ if (_rtpRtcpModule->RegisterReceivePayload(codec) != 0)
+ {
+ // First attempt to register failed => de-register and try again
+ _rtpRtcpModule->DeRegisterReceivePayload(codec.pltype);
+ if (_rtpRtcpModule->RegisterReceivePayload(codec) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "SetRecPayloadType() RTP/RTCP-module registration failed");
+ return -1;
+ }
+ }
+ if (_audioCodingModule.RegisterReceiveCodec(codec) != 0)
+ {
+ _audioCodingModule.UnregisterReceiveCodec(codec.pltype);
+ if (_audioCodingModule.RegisterReceiveCodec(codec) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetRecPayloadType() ACM registration failed - 1");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+WebRtc_Word32
+Channel::GetRecPayloadType(CodecInst& codec)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetRecPayloadType()");
+ WebRtc_Word8 payloadType(-1);
+ if (_rtpRtcpModule->ReceivePayloadType(codec, &payloadType) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
+ "GetRecPayloadType() failed to retrieve RX payload type");
+ return -1;
+ }
+ codec.pltype = payloadType;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetRecPayloadType() => pltype=%u", codec.pltype);
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SetAMREncFormat(AmrMode mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetAMREncFormat()");
+
+ // ACM doesn't support AMR
+ return -1;
+}
+
+WebRtc_Word32
+Channel::SetAMRDecFormat(AmrMode mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetAMRDecFormat()");
+
+ // ACM doesn't support AMR
+ return -1;
+}
+
+WebRtc_Word32
+Channel::SetAMRWbEncFormat(AmrMode mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetAMRWbEncFormat()");
+
+ // ACM doesn't support AMR
+ return -1;
+
+}
+
+WebRtc_Word32
+Channel::SetAMRWbDecFormat(AmrMode mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetAMRWbDecFormat()");
+
+ // ACM doesn't support AMR
+ return -1;
+}
+
+WebRtc_Word32
+Channel::SetSendCNPayloadType(int type, PayloadFrequencies frequency)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetSendCNPayloadType()");
+
+ CodecInst codec;
+ WebRtc_Word32 samplingFreqHz(-1);
+ const int kMono = 1;
+ if (frequency == kFreq32000Hz)
+ samplingFreqHz = 32000;
+ else if (frequency == kFreq16000Hz)
+ samplingFreqHz = 16000;
+
+ if (_audioCodingModule.Codec("CN", codec, samplingFreqHz, kMono) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetSendCNPayloadType() failed to retrieve default CN codec "
+ "settings");
+ return -1;
+ }
+
+ // Modify the payload type (must be set to dynamic range)
+ codec.pltype = type;
+
+ if (_audioCodingModule.RegisterSendCodec(codec) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetSendCNPayloadType() failed to register CN to ACM");
+ return -1;
+ }
+
+ if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
+ {
+ _rtpRtcpModule->DeRegisterSendPayload(codec.pltype);
+ if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "SetSendCNPayloadType() failed to register CN to RTP/RTCP "
+ "module");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SetISACInitTargetRate(int rateBps, bool useFixedFrameSize)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetISACInitTargetRate()");
+
+ CodecInst sendCodec;
+ if (_audioCodingModule.SendCodec(sendCodec) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CODEC_ERROR, kTraceError,
+ "SetISACInitTargetRate() failed to retrieve send codec");
+ return -1;
+ }
+ if (STR_CASE_CMP(sendCodec.plname, "ISAC") != 0)
+ {
+ // This API is only valid if iSAC is setup to run in channel-adaptive
+ // mode.
+ // We do not validate the adaptive mode here. It is done later in the
+ // ConfigISACBandwidthEstimator() API.
+ _engineStatisticsPtr->SetLastError(
+ VE_CODEC_ERROR, kTraceError,
+ "SetISACInitTargetRate() send codec is not iSAC");
+ return -1;
+ }
+
+ WebRtc_UWord8 initFrameSizeMsec(0);
+ if (16000 == sendCodec.plfreq)
+ {
+ // Note that 0 is a valid and corresponds to "use default
+ if ((rateBps != 0 &&
+ rateBps < kVoiceEngineMinIsacInitTargetRateBpsWb) ||
+ (rateBps > kVoiceEngineMaxIsacInitTargetRateBpsWb))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetISACInitTargetRate() invalid target rate - 1");
+ return -1;
+ }
+ // 30 or 60ms
+ initFrameSizeMsec = (WebRtc_UWord8)(sendCodec.pacsize / 16);
+ }
+ else if (32000 == sendCodec.plfreq)
+ {
+ if ((rateBps != 0 &&
+ rateBps < kVoiceEngineMinIsacInitTargetRateBpsSwb) ||
+ (rateBps > kVoiceEngineMaxIsacInitTargetRateBpsSwb))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetISACInitTargetRate() invalid target rate - 2");
+ return -1;
+ }
+ initFrameSizeMsec = (WebRtc_UWord8)(sendCodec.pacsize / 32); // 30ms
+ }
+
+ if (_audioCodingModule.ConfigISACBandwidthEstimator(
+ initFrameSizeMsec, rateBps, useFixedFrameSize) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetISACInitTargetRate() iSAC BWE config failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SetISACMaxRate(int rateBps)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetISACMaxRate()");
+
+ CodecInst sendCodec;
+ if (_audioCodingModule.SendCodec(sendCodec) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CODEC_ERROR, kTraceError,
+ "SetISACMaxRate() failed to retrieve send codec");
+ return -1;
+ }
+ if (STR_CASE_CMP(sendCodec.plname, "ISAC") != 0)
+ {
+ // This API is only valid if iSAC is selected as sending codec.
+ _engineStatisticsPtr->SetLastError(
+ VE_CODEC_ERROR, kTraceError,
+ "SetISACMaxRate() send codec is not iSAC");
+ return -1;
+ }
+ if (16000 == sendCodec.plfreq)
+ {
+ if ((rateBps < kVoiceEngineMinIsacMaxRateBpsWb) ||
+ (rateBps > kVoiceEngineMaxIsacMaxRateBpsWb))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetISACMaxRate() invalid max rate - 1");
+ return -1;
+ }
+ }
+ else if (32000 == sendCodec.plfreq)
+ {
+ if ((rateBps < kVoiceEngineMinIsacMaxRateBpsSwb) ||
+ (rateBps > kVoiceEngineMaxIsacMaxRateBpsSwb))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetISACMaxRate() invalid max rate - 2");
+ return -1;
+ }
+ }
+ if (_sending)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SENDING, kTraceError,
+ "SetISACMaxRate() unable to set max rate while sending");
+ return -1;
+ }
+
+ // Set the maximum instantaneous rate of iSAC (works for both adaptive
+ // and non-adaptive mode)
+ if (_audioCodingModule.SetISACMaxRate(rateBps) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetISACMaxRate() failed to set max rate");
+ return -1;
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SetISACMaxPayloadSize(int sizeBytes)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetISACMaxPayloadSize()");
+ CodecInst sendCodec;
+ if (_audioCodingModule.SendCodec(sendCodec) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CODEC_ERROR, kTraceError,
+ "SetISACMaxPayloadSize() failed to retrieve send codec");
+ return -1;
+ }
+ if (STR_CASE_CMP(sendCodec.plname, "ISAC") != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CODEC_ERROR, kTraceError,
+ "SetISACMaxPayloadSize() send codec is not iSAC");
+ return -1;
+ }
+ if (16000 == sendCodec.plfreq)
+ {
+ if ((sizeBytes < kVoiceEngineMinIsacMaxPayloadSizeBytesWb) ||
+ (sizeBytes > kVoiceEngineMaxIsacMaxPayloadSizeBytesWb))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetISACMaxPayloadSize() invalid max payload - 1");
+ return -1;
+ }
+ }
+ else if (32000 == sendCodec.plfreq)
+ {
+ if ((sizeBytes < kVoiceEngineMinIsacMaxPayloadSizeBytesSwb) ||
+ (sizeBytes > kVoiceEngineMaxIsacMaxPayloadSizeBytesSwb))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetISACMaxPayloadSize() invalid max payload - 2");
+ return -1;
+ }
+ }
+ if (_sending)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SENDING, kTraceError,
+ "SetISACMaxPayloadSize() unable to set max rate while sending");
+ return -1;
+ }
+
+ if (_audioCodingModule.SetISACMaxPayloadSize(sizeBytes) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetISACMaxPayloadSize() failed to set max payload size");
+ return -1;
+ }
+ return 0;
+}
+
+WebRtc_Word32 Channel::RegisterExternalTransport(Transport& transport)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::RegisterExternalTransport()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ // Sanity checks for default (non external transport) to avoid conflict with
+ // WebRtc sockets.
+ if (_socketTransportModule.SendSocketsInitialized())
+ {
+ _engineStatisticsPtr->SetLastError(VE_SEND_SOCKETS_CONFLICT,
+ kTraceError,
+ "RegisterExternalTransport() send sockets already initialized");
+ return -1;
+ }
+ if (_socketTransportModule.ReceiveSocketsInitialized())
+ {
+ _engineStatisticsPtr->SetLastError(VE_RECEIVE_SOCKETS_CONFLICT,
+ kTraceError,
+ "RegisterExternalTransport() receive sockets already initialized");
+ return -1;
+ }
+#endif
+ if (_externalTransport)
+ {
+ _engineStatisticsPtr->SetLastError(VE_INVALID_OPERATION,
+ kTraceError,
+ "RegisterExternalTransport() external transport already enabled");
+ return -1;
+ }
+ _externalTransport = true;
+ _transportPtr = &transport;
+ return 0;
+}
+
+WebRtc_Word32
+Channel::DeRegisterExternalTransport()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::DeRegisterExternalTransport()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (!_transportPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "DeRegisterExternalTransport() external transport already "
+ "disabled");
+ return 0;
+ }
+ _externalTransport = false;
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+ _transportPtr = NULL;
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "DeRegisterExternalTransport() all transport is disabled");
+#else
+ _transportPtr = &_socketTransportModule;
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "DeRegisterExternalTransport() internal Transport is enabled");
+#endif
+ return 0;
+}
+
+WebRtc_Word32
+Channel::ReceivedRTPPacket(const WebRtc_Word8* data, WebRtc_Word32 length)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::ReceivedRTPPacket()");
+ const char dummyIP[] = "127.0.0.1";
+ IncomingRTPPacket(data, length, dummyIP, 0);
+ return 0;
+}
+
+WebRtc_Word32
+Channel::ReceivedRTCPPacket(const WebRtc_Word8* data, WebRtc_Word32 length)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::ReceivedRTCPPacket()");
+ const char dummyIP[] = "127.0.0.1";
+ IncomingRTCPPacket(data, length, dummyIP, 0);
+ return 0;
+}
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32
+Channel::GetSourceInfo(int& rtpPort, int& rtcpPort, char ipAddr[64])
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetSourceInfo()");
+
+ WebRtc_UWord16 rtpPortModule;
+ WebRtc_UWord16 rtcpPortModule;
+ char ipaddr[UdpTransport::kIpAddressVersion6Length] = {0};
+
+ if (_socketTransportModule.RemoteSocketInformation(ipaddr,
+ rtpPortModule,
+ rtcpPortModule) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+ "GetSourceInfo() failed to retrieve remote socket information");
+ return -1;
+ }
+ strcpy(ipAddr, ipaddr);
+ rtpPort = rtpPortModule;
+ rtcpPort = rtcpPortModule;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "GetSourceInfo() => rtpPort=%d, rtcpPort=%d, ipAddr=%s",
+ rtpPort, rtcpPort, ipAddr);
+ return 0;
+}
+
+WebRtc_Word32
+Channel::EnableIPv6()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::EnableIPv6()");
+ if (_socketTransportModule.ReceiveSocketsInitialized() ||
+ _socketTransportModule.SendSocketsInitialized())
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "EnableIPv6() socket layer is already initialized");
+ return -1;
+ }
+ if (_socketTransportModule.EnableIpV6() != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_ERROR, kTraceError,
+ "EnableIPv6() failed to enable IPv6");
+ const UdpTransport::ErrorCode lastError =
+ _socketTransportModule.LastError();
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "UdpTransport::LastError() => %d", lastError);
+ return -1;
+ }
+ return 0;
+}
+
+bool
+Channel::IPv6IsEnabled() const
+{
+ bool isEnabled = _socketTransportModule.IpV6Enabled();
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "IPv6IsEnabled() => %d", isEnabled);
+ return isEnabled;
+}
+
+WebRtc_Word32
+Channel::SetSourceFilter(int rtpPort, int rtcpPort, const char ipAddr[64])
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetSourceFilter()");
+ if (_socketTransportModule.SetFilterPorts(
+ static_cast<WebRtc_UWord16>(rtpPort),
+ static_cast<WebRtc_UWord16>(rtcpPort)) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+ "SetSourceFilter() failed to set filter ports");
+ const UdpTransport::ErrorCode lastError =
+ _socketTransportModule.LastError();
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "UdpTransport::LastError() => %d",
+ lastError);
+ return -1;
+ }
+ const char* filterIpAddress = ipAddr;
+ if (_socketTransportModule.SetFilterIP(filterIpAddress) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_IP_ADDRESS, kTraceError,
+ "SetSourceFilter() failed to set filter IP address");
+ const UdpTransport::ErrorCode lastError =
+ _socketTransportModule.LastError();
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "UdpTransport::LastError() => %d", lastError);
+ return -1;
+ }
+ return 0;
+}
+
+WebRtc_Word32
+Channel::GetSourceFilter(int& rtpPort, int& rtcpPort, char ipAddr[64])
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetSourceFilter()");
+ WebRtc_UWord16 rtpFilterPort(0);
+ WebRtc_UWord16 rtcpFilterPort(0);
+ if (_socketTransportModule.FilterPorts(rtpFilterPort, rtcpFilterPort) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceWarning,
+ "GetSourceFilter() failed to retrieve filter ports");
+ }
+ char ipAddrTmp[UdpTransport::kIpAddressVersion6Length] = {0};
+ if (_socketTransportModule.FilterIP(ipAddrTmp) != 0)
+ {
+ // no filter has been configured (not seen as an error)
+ memset(ipAddrTmp,
+ 0, UdpTransport::kIpAddressVersion6Length);
+ }
+ rtpPort = static_cast<int> (rtpFilterPort);
+ rtcpPort = static_cast<int> (rtcpFilterPort);
+ strcpy(ipAddr, ipAddrTmp);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "GetSourceFilter() => rtpPort=%d, rtcpPort=%d, ipAddr=%s",
+ rtpPort, rtcpPort, ipAddr);
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SetSendTOS(int DSCP, int priority, bool useSetSockopt)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetSendTOS(DSCP=%d, useSetSockopt=%d)",
+ DSCP, (int)useSetSockopt);
+
+ // Set TOS value and possibly try to force usage of setsockopt()
+ if (_socketTransportModule.SetToS(DSCP, useSetSockopt) != 0)
+ {
+ UdpTransport::ErrorCode lastSockError(
+ _socketTransportModule.LastError());
+ switch (lastSockError)
+ {
+ case UdpTransport::kTosError:
+ _engineStatisticsPtr->SetLastError(VE_TOS_ERROR, kTraceError,
+ "SetSendTOS() TOS error");
+ break;
+ case UdpTransport::kQosError:
+ _engineStatisticsPtr->SetLastError(
+ VE_TOS_GQOS_CONFLICT, kTraceError,
+ "SetSendTOS() GQOS error");
+ break;
+ case UdpTransport::kTosInvalid:
+ // can't switch SetSockOpt method without disabling TOS first, or
+ // SetSockopt() call failed
+ _engineStatisticsPtr->SetLastError(VE_TOS_INVALID, kTraceError,
+ "SetSendTOS() invalid TOS");
+ break;
+ case UdpTransport::kSocketInvalid:
+ _engineStatisticsPtr->SetLastError(VE_SOCKET_ERROR, kTraceError,
+ "SetSendTOS() invalid Socket");
+ break;
+ default:
+ _engineStatisticsPtr->SetLastError(VE_TOS_ERROR, kTraceError,
+ "SetSendTOS() TOS error");
+ break;
+ }
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+ "UdpTransport => lastError = %d",
+ lastSockError);
+ return -1;
+ }
+
+ // Set priority (PCP) value, -1 means don't change
+ if (-1 != priority)
+ {
+ if (_socketTransportModule.SetPCP(priority) != 0)
+ {
+ UdpTransport::ErrorCode lastSockError(
+ _socketTransportModule.LastError());
+ switch (lastSockError)
+ {
+ case UdpTransport::kPcpError:
+ _engineStatisticsPtr->SetLastError(VE_TOS_ERROR, kTraceError,
+ "SetSendTOS() PCP error");
+ break;
+ case UdpTransport::kQosError:
+ _engineStatisticsPtr->SetLastError(
+ VE_TOS_GQOS_CONFLICT, kTraceError,
+ "SetSendTOS() GQOS conflict");
+ break;
+ case UdpTransport::kSocketInvalid:
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_ERROR, kTraceError,
+ "SetSendTOS() invalid Socket");
+ break;
+ default:
+ _engineStatisticsPtr->SetLastError(VE_TOS_ERROR, kTraceError,
+ "SetSendTOS() PCP error");
+ break;
+ }
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "UdpTransport => lastError = %d",
+ lastSockError);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::GetSendTOS(int &DSCP, int& priority, bool &useSetSockopt)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetSendTOS(DSCP=?, useSetSockopt=?)");
+ WebRtc_Word32 dscp(0), prio(0);
+ bool setSockopt(false);
+ if (_socketTransportModule.ToS(dscp, setSockopt) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+ "GetSendTOS() failed to get TOS info");
+ return -1;
+ }
+ if (_socketTransportModule.PCP(prio) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+ "GetSendTOS() failed to get PCP info");
+ return -1;
+ }
+ DSCP = static_cast<int> (dscp);
+ priority = static_cast<int> (prio);
+ useSetSockopt = setSockopt;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "GetSendTOS() => DSCP=%d, priority=%d, useSetSockopt=%d",
+ DSCP, priority, (int)useSetSockopt);
+ return 0;
+}
+
+#if defined(_WIN32)
+WebRtc_Word32
+Channel::SetSendGQoS(bool enable, int serviceType, int overrideDSCP)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetSendGQoS(enable=%d, serviceType=%d, "
+ "overrideDSCP=%d)",
+ (int)enable, serviceType, overrideDSCP);
+ if(!_socketTransportModule.ReceiveSocketsInitialized())
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKETS_NOT_INITED, kTraceError,
+ "SetSendGQoS() GQoS state must be set after sockets are created");
+ return -1;
+ }
+ if(!_socketTransportModule.SendSocketsInitialized())
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_DESTINATION_NOT_INITED, kTraceError,
+ "SetSendGQoS() GQoS state must be set after sending side is "
+ "initialized");
+ return -1;
+ }
+ if (enable &&
+ (serviceType != SERVICETYPE_BESTEFFORT) &&
+ (serviceType != SERVICETYPE_CONTROLLEDLOAD) &&
+ (serviceType != SERVICETYPE_GUARANTEED) &&
+ (serviceType != SERVICETYPE_QUALITATIVE))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendGQoS() Invalid service type");
+ return -1;
+ }
+ if (enable && ((overrideDSCP < 0) || (overrideDSCP > 63)))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendGQoS() Invalid overrideDSCP value");
+ return -1;
+ }
+
+ // Avoid GQoS/ToS conflict when user wants to override the default DSCP
+ // mapping
+ bool QoS(false);
+ WebRtc_Word32 sType(0);
+ WebRtc_Word32 ovrDSCP(0);
+ if (_socketTransportModule.QoS(QoS, sType, ovrDSCP))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+ "SetSendGQoS() failed to get QOS info");
+ return -1;
+ }
+ if (QoS && ovrDSCP == 0 && overrideDSCP != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_TOS_GQOS_CONFLICT, kTraceError,
+ "SetSendGQoS() QOS is already enabled and overrideDSCP differs,"
+ " not allowed");
+ return -1;
+ }
+ const WebRtc_Word32 maxBitrate(0);
+ if (_socketTransportModule.SetQoS(enable,
+ static_cast<WebRtc_Word32>(serviceType),
+ maxBitrate,
+ static_cast<WebRtc_Word32>(overrideDSCP),
+ true))
+ {
+ UdpTransport::ErrorCode lastSockError(
+ _socketTransportModule.LastError());
+ switch (lastSockError)
+ {
+ case UdpTransport::kQosError:
+ _engineStatisticsPtr->SetLastError(VE_GQOS_ERROR, kTraceError,
+ "SetSendGQoS() QOS error");
+ break;
+ default:
+ _engineStatisticsPtr->SetLastError(VE_SOCKET_ERROR, kTraceError,
+ "SetSendGQoS() Socket error");
+ break;
+ }
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+ "UdpTransport() => lastError = %d",
+ lastSockError);
+ return -1;
+ }
+ return 0;
+}
+#endif
+
+#if defined(_WIN32)
+WebRtc_Word32
+Channel::GetSendGQoS(bool &enabled, int &serviceType, int &overrideDSCP)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetSendGQoS(enable=?, serviceType=?, "
+ "overrideDSCP=?)");
+
+ bool QoS(false);
+ WebRtc_Word32 serviceTypeModule(0);
+ WebRtc_Word32 overrideDSCPModule(0);
+ _socketTransportModule.QoS(QoS, serviceTypeModule, overrideDSCPModule);
+
+ enabled = QoS;
+ serviceType = static_cast<int> (serviceTypeModule);
+ overrideDSCP = static_cast<int> (overrideDSCPModule);
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "GetSendGQoS() => enabled=%d, serviceType=%d, overrideDSCP=%d",
+ (int)enabled, serviceType, overrideDSCP);
+ return 0;
+}
+#endif
+#endif
+
+WebRtc_Word32
+Channel::SetPacketTimeoutNotification(bool enable, int timeoutSeconds)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetPacketTimeoutNotification()");
+ if (enable)
+ {
+ const WebRtc_UWord32 RTPtimeoutMS = 1000*timeoutSeconds;
+ const WebRtc_UWord32 RTCPtimeoutMS = 0;
+ _rtpRtcpModule->SetPacketTimeout(RTPtimeoutMS, RTCPtimeoutMS);
+ _rtpPacketTimeOutIsEnabled = true;
+ _rtpTimeOutSeconds = timeoutSeconds;
+ }
+ else
+ {
+ _rtpRtcpModule->SetPacketTimeout(0, 0);
+ _rtpPacketTimeOutIsEnabled = false;
+ _rtpTimeOutSeconds = 0;
+ }
+ return 0;
+}
+
+WebRtc_Word32
+Channel::GetPacketTimeoutNotification(bool& enabled, int& timeoutSeconds)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetPacketTimeoutNotification()");
+ enabled = _rtpPacketTimeOutIsEnabled;
+ if (enabled)
+ {
+ timeoutSeconds = _rtpTimeOutSeconds;
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "GetPacketTimeoutNotification() => enabled=%d,"
+ " timeoutSeconds=%d",
+ enabled, timeoutSeconds);
+ return 0;
+}
+
+WebRtc_Word32
+Channel::RegisterDeadOrAliveObserver(VoEConnectionObserver& observer)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::RegisterDeadOrAliveObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_connectionObserverPtr)
+ {
+ _engineStatisticsPtr->SetLastError(VE_INVALID_OPERATION, kTraceError,
+ "RegisterDeadOrAliveObserver() observer already enabled");
+ return -1;
+ }
+
+ _connectionObserverPtr = &observer;
+ _connectionObserver = true;
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::DeRegisterDeadOrAliveObserver()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::DeRegisterDeadOrAliveObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (!_connectionObserverPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "DeRegisterDeadOrAliveObserver() observer already disabled");
+ return 0;
+ }
+
+ _connectionObserver = false;
+ _connectionObserverPtr = NULL;
+
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SetPeriodicDeadOrAliveStatus(bool enable, int sampleTimeSeconds)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetPeriodicDeadOrAliveStatus()");
+ if (!_connectionObserverPtr)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+ "SetPeriodicDeadOrAliveStatus() connection observer has"
+ " not been registered");
+ }
+ if (enable)
+ {
+ ResetDeadOrAliveCounters();
+ }
+ bool enabled(false);
+ WebRtc_UWord8 currentSampleTimeSec(0);
+ // Store last state (will be used later if dead-or-alive is disabled).
+ _rtpRtcpModule->PeriodicDeadOrAliveStatus(enabled, currentSampleTimeSec);
+ // Update the dead-or-alive state.
+ if (_rtpRtcpModule->SetPeriodicDeadOrAliveStatus(
+ enable, (WebRtc_UWord8)sampleTimeSeconds) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR,
+ kTraceError,
+ "SetPeriodicDeadOrAliveStatus() failed to set dead-or-alive "
+ "status");
+ return -1;
+ }
+ if (!enable)
+ {
+ // Restore last utilized sample time.
+ // Without this, the sample time would always be reset to default
+ // (2 sec), each time dead-or-alived was disabled without sample-time
+ // parameter.
+ _rtpRtcpModule->SetPeriodicDeadOrAliveStatus(enable,
+ currentSampleTimeSec);
+ }
+ return 0;
+}
+
+WebRtc_Word32
+Channel::GetPeriodicDeadOrAliveStatus(bool& enabled, int& sampleTimeSeconds)
+{
+ _rtpRtcpModule->PeriodicDeadOrAliveStatus(
+ enabled,
+ (WebRtc_UWord8&)sampleTimeSeconds);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "GetPeriodicDeadOrAliveStatus() => enabled=%d,"
+ " sampleTimeSeconds=%d",
+ enabled, sampleTimeSeconds);
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SendUDPPacket(const void* data,
+ unsigned int length,
+ int& transmittedBytes,
+ bool useRtcpSocket)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SendUDPPacket()");
+ if (_externalTransport)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "SendUDPPacket() external transport is enabled");
+ return -1;
+ }
+ if (useRtcpSocket && !_rtpRtcpModule->RTCP())
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTCP_ERROR, kTraceError,
+ "SendUDPPacket() RTCP is disabled");
+ return -1;
+ }
+ if (!_sending)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_NOT_SENDING, kTraceError,
+ "SendUDPPacket() not sending");
+ return -1;
+ }
+
+ char* dataC = new char[length];
+ if (NULL == dataC)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_NO_MEMORY, kTraceError,
+ "SendUDPPacket() memory allocation failed");
+ return -1;
+ }
+ memcpy(dataC, data, length);
+
+ transmittedBytes = SendPacketRaw(dataC, length, useRtcpSocket);
+
+ delete [] dataC;
+ dataC = NULL;
+
+ if (transmittedBytes <= 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SEND_ERROR, kTraceError,
+ "SendUDPPacket() transmission failed");
+ transmittedBytes = 0;
+ return -1;
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "SendUDPPacket() => transmittedBytes=%d", transmittedBytes);
+ return 0;
+}
+
+
+int Channel::StartPlayingFileLocally(const char* fileName,
+ const bool loop,
+ const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StartPlayingFileLocally(fileNameUTF8[]=%s, loop=%d,"
+ " format=%d, volumeScaling=%5.3f, startPosition=%d, "
+ "stopPosition=%d)", fileName, loop, format, volumeScaling,
+ startPosition, stopPosition);
+
+ if (_outputFilePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ALREADY_PLAYING, kTraceError,
+ "StartPlayingFileLocally() is already playing");
+ return -1;
+ }
+
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ if (_outputFilePlayerPtr)
+ {
+ _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+ FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+ _outputFilePlayerPtr = NULL;
+ }
+
+ _outputFilePlayerPtr = FilePlayer::CreateFilePlayer(
+ _outputFilePlayerId, (const FileFormats)format);
+
+ if (_outputFilePlayerPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartPlayingFileLocally() filePlayer format is not correct");
+ return -1;
+ }
+
+ const WebRtc_UWord32 notificationTime(0);
+
+ if (_outputFilePlayerPtr->StartPlayingFile(
+ fileName,
+ loop,
+ startPosition,
+ volumeScaling,
+ notificationTime,
+ stopPosition,
+ (const CodecInst*)codecInst) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartPlayingFile() failed to start file playout");
+ _outputFilePlayerPtr->StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+ _outputFilePlayerPtr = NULL;
+ return -1;
+ }
+ _outputFilePlayerPtr->RegisterModuleFileCallback(this);
+ _outputFilePlaying = true;
+ }
+
+ if (RegisterFilePlayingToMixer() != 0)
+ return -1;
+
+ return 0;
+}
+
+int Channel::StartPlayingFileLocally(InStream* stream,
+ const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StartPlayingFileLocally(format=%d,"
+ " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
+ format, volumeScaling, startPosition, stopPosition);
+
+ if(stream == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartPlayingFileLocally() NULL as input stream");
+ return -1;
+ }
+
+
+ if (_outputFilePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ALREADY_PLAYING, kTraceError,
+ "StartPlayingFileLocally() is already playing");
+ return -1;
+ }
+
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ // Destroy the old instance
+ if (_outputFilePlayerPtr)
+ {
+ _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+ FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+ _outputFilePlayerPtr = NULL;
+ }
+
+ // Create the instance
+ _outputFilePlayerPtr = FilePlayer::CreateFilePlayer(
+ _outputFilePlayerId,
+ (const FileFormats)format);
+
+ if (_outputFilePlayerPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartPlayingFileLocally() filePlayer format isnot correct");
+ return -1;
+ }
+
+ const WebRtc_UWord32 notificationTime(0);
+
+ if (_outputFilePlayerPtr->StartPlayingFile(*stream, startPosition,
+ volumeScaling,
+ notificationTime,
+ stopPosition, codecInst) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+ "StartPlayingFile() failed to "
+ "start file playout");
+ _outputFilePlayerPtr->StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+ _outputFilePlayerPtr = NULL;
+ return -1;
+ }
+ _outputFilePlayerPtr->RegisterModuleFileCallback(this);
+ _outputFilePlaying = true;
+ }
+
+ if (RegisterFilePlayingToMixer() != 0)
+ return -1;
+
+ return 0;
+}
+
+int Channel::StopPlayingFileLocally()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StopPlayingFileLocally()");
+
+ if (!_outputFilePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "StopPlayingFileLocally() isnot playing");
+ return 0;
+ }
+
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ if (_outputFilePlayerPtr->StopPlayingFile() != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_STOP_RECORDING_FAILED, kTraceError,
+ "StopPlayingFile() could not stop playing");
+ return -1;
+ }
+ _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+ FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+ _outputFilePlayerPtr = NULL;
+ _outputFilePlaying = false;
+ }
+ // _fileCritSect cannot be taken while calling
+ // SetAnonymousMixibilityStatus. Refer to comments in
+ // StartPlayingFileLocally(const char* ...) for more details.
+ if (_outputMixerPtr->SetAnonymousMixabilityStatus(*this, false) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
+ "StopPlayingFile() failed to stop participant from playing as"
+ "file in the mixer");
+ return -1;
+ }
+
+ return 0;
+}
+
+int Channel::IsPlayingFileLocally() const
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::IsPlayingFileLocally()");
+
+ return (WebRtc_Word32)_outputFilePlaying;
+}
+
+int Channel::RegisterFilePlayingToMixer()
+{
+ // Return success for not registering for file playing to mixer if:
+ // 1. playing file before playout is started on that channel.
+ // 2. starting playout without file playing on that channel.
+ if (!_playing || !_outputFilePlaying)
+ {
+ return 0;
+ }
+
+ // |_fileCritSect| cannot be taken while calling
+ // SetAnonymousMixabilityStatus() since as soon as the participant is added
+ // frames can be pulled by the mixer. Since the frames are generated from
+ // the file, _fileCritSect will be taken. This would result in a deadlock.
+ if (_outputMixerPtr->SetAnonymousMixabilityStatus(*this, true) != 0)
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+ _outputFilePlaying = false;
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
+ "StartPlayingFile() failed to add participant as file to mixer");
+ _outputFilePlayerPtr->StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
+ _outputFilePlayerPtr = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+int Channel::ScaleLocalFilePlayout(const float scale)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::ScaleLocalFilePlayout(scale=%5.3f)", scale);
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ if (!_outputFilePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "ScaleLocalFilePlayout() isnot playing");
+ return -1;
+ }
+ if ((_outputFilePlayerPtr == NULL) ||
+ (_outputFilePlayerPtr->SetAudioScaling(scale) != 0))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_ARGUMENT, kTraceError,
+ "SetAudioScaling() failed to scale the playout");
+ return -1;
+ }
+
+ return 0;
+}
+
+int Channel::GetLocalPlayoutPosition(int& positionMs)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetLocalPlayoutPosition(position=?)");
+
+ WebRtc_UWord32 position;
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ if (_outputFilePlayerPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "GetLocalPlayoutPosition() filePlayer instance doesnot exist");
+ return -1;
+ }
+
+ if (_outputFilePlayerPtr->GetPlayoutPosition(position) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "GetLocalPlayoutPosition() failed");
+ return -1;
+ }
+ positionMs = position;
+
+ return 0;
+}
+
+int Channel::StartPlayingFileAsMicrophone(const char* fileName,
+ const bool loop,
+ const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StartPlayingFileAsMicrophone(fileNameUTF8[]=%s, "
+ "loop=%d, format=%d, volumeScaling=%5.3f, startPosition=%d, "
+ "stopPosition=%d)", fileName, loop, format, volumeScaling,
+ startPosition, stopPosition);
+
+ if (_inputFilePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ALREADY_PLAYING, kTraceWarning,
+ "StartPlayingFileAsMicrophone() filePlayer is playing");
+ return 0;
+ }
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ // Destroy the old instance
+ if (_inputFilePlayerPtr)
+ {
+ _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+ FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+ _inputFilePlayerPtr = NULL;
+ }
+
+ // Create the instance
+ _inputFilePlayerPtr = FilePlayer::CreateFilePlayer(
+ _inputFilePlayerId, (const FileFormats)format);
+
+ if (_inputFilePlayerPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
+ return -1;
+ }
+
+ const WebRtc_UWord32 notificationTime(0);
+
+ if (_inputFilePlayerPtr->StartPlayingFile(
+ fileName,
+ loop,
+ startPosition,
+ volumeScaling,
+ notificationTime,
+ stopPosition,
+ (const CodecInst*)codecInst) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartPlayingFile() failed to start file playout");
+ _inputFilePlayerPtr->StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+ _inputFilePlayerPtr = NULL;
+ return -1;
+ }
+ _inputFilePlayerPtr->RegisterModuleFileCallback(this);
+ _inputFilePlaying = true;
+
+ return 0;
+}
+
+int Channel::StartPlayingFileAsMicrophone(InStream* stream,
+ const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StartPlayingFileAsMicrophone(format=%d, "
+ "volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
+ format, volumeScaling, startPosition, stopPosition);
+
+ if(stream == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartPlayingFileAsMicrophone NULL as input stream");
+ return -1;
+ }
+
+ if (_inputFilePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ALREADY_PLAYING, kTraceWarning,
+ "StartPlayingFileAsMicrophone() is playing");
+ return 0;
+ }
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ // Destroy the old instance
+ if (_inputFilePlayerPtr)
+ {
+ _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+ FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+ _inputFilePlayerPtr = NULL;
+ }
+
+ // Create the instance
+ _inputFilePlayerPtr = FilePlayer::CreateFilePlayer(
+ _inputFilePlayerId, (const FileFormats)format);
+
+ if (_inputFilePlayerPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartPlayingInputFile() filePlayer format isnot correct");
+ return -1;
+ }
+
+ const WebRtc_UWord32 notificationTime(0);
+
+ if (_inputFilePlayerPtr->StartPlayingFile(*stream, startPosition,
+ volumeScaling, notificationTime,
+ stopPosition, codecInst) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+ "StartPlayingFile() failed to start "
+ "file playout");
+ _inputFilePlayerPtr->StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+ _inputFilePlayerPtr = NULL;
+ return -1;
+ }
+
+ _inputFilePlayerPtr->RegisterModuleFileCallback(this);
+ _inputFilePlaying = true;
+
+ return 0;
+}
+
+int Channel::StopPlayingFileAsMicrophone()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StopPlayingFileAsMicrophone()");
+
+ if (!_inputFilePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "StopPlayingFileAsMicrophone() isnot playing");
+ return 0;
+ }
+
+ CriticalSectionScoped cs(&_fileCritSect);
+ if (_inputFilePlayerPtr->StopPlayingFile() != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_STOP_RECORDING_FAILED, kTraceError,
+ "StopPlayingFile() could not stop playing");
+ return -1;
+ }
+ _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
+ FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
+ _inputFilePlayerPtr = NULL;
+ _inputFilePlaying = false;
+
+ return 0;
+}
+
+int Channel::IsPlayingFileAsMicrophone() const
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::IsPlayingFileAsMicrophone()");
+
+ return _inputFilePlaying;
+}
+
+int Channel::ScaleFileAsMicrophonePlayout(const float scale)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::ScaleFileAsMicrophonePlayout(scale=%5.3f)", scale);
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ if (!_inputFilePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "ScaleFileAsMicrophonePlayout() isnot playing");
+ return -1;
+ }
+
+ if ((_inputFilePlayerPtr == NULL) ||
+ (_inputFilePlayerPtr->SetAudioScaling(scale) != 0))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_ARGUMENT, kTraceError,
+ "SetAudioScaling() failed to scale playout");
+ return -1;
+ }
+
+ return 0;
+}
+
+int Channel::StartRecordingPlayout(const char* fileName,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StartRecordingPlayout(fileName=%s)", fileName);
+
+ if (_outputFileRecording)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+ "StartRecordingPlayout() is already recording");
+ return 0;
+ }
+
+ FileFormats format;
+ const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+ CodecInst dummyCodec={100,"L16",16000,320,1,320000};
+
+ if ((codecInst != NULL) &&
+ ((codecInst->channels < 1) || (codecInst->channels > 2)))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_ARGUMENT, kTraceError,
+ "StartRecordingPlayout() invalid compression");
+ return(-1);
+ }
+ if(codecInst == NULL)
+ {
+ format = kFileFormatPcm16kHzFile;
+ codecInst=&dummyCodec;
+ }
+ else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+ {
+ format = kFileFormatWavFile;
+ }
+ else
+ {
+ format = kFileFormatCompressedFile;
+ }
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ // Destroy the old instance
+ if (_outputFileRecorderPtr)
+ {
+ _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ }
+
+ _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
+ _outputFileRecorderId, (const FileFormats)format);
+ if (_outputFileRecorderPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartRecordingPlayout() fileRecorder format isnot correct");
+ return -1;
+ }
+
+ if (_outputFileRecorderPtr->StartRecordingAudioFile(
+ fileName, (const CodecInst&)*codecInst, notificationTime) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartRecordingAudioFile() failed to start file recording");
+ _outputFileRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ return -1;
+ }
+ _outputFileRecorderPtr->RegisterModuleFileCallback(this);
+ _outputFileRecording = true;
+
+ return 0;
+}
+
+int Channel::StartRecordingPlayout(OutStream* stream,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::StartRecordingPlayout()");
+
+ if (_outputFileRecording)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+ "StartRecordingPlayout() is already recording");
+ return 0;
+ }
+
+ FileFormats format;
+ const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+ CodecInst dummyCodec={100,"L16",16000,320,1,320000};
+
+ if (codecInst != NULL && codecInst->channels != 1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_ARGUMENT, kTraceError,
+ "StartRecordingPlayout() invalid compression");
+ return(-1);
+ }
+ if(codecInst == NULL)
+ {
+ format = kFileFormatPcm16kHzFile;
+ codecInst=&dummyCodec;
+ }
+ else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+ {
+ format = kFileFormatWavFile;
+ }
+ else
+ {
+ format = kFileFormatCompressedFile;
+ }
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ // Destroy the old instance
+ if (_outputFileRecorderPtr)
+ {
+ _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ }
+
+ _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
+ _outputFileRecorderId, (const FileFormats)format);
+ if (_outputFileRecorderPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartRecordingPlayout() fileRecorder format isnot correct");
+ return -1;
+ }
+
+ if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream, *codecInst,
+ notificationTime) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+ "StartRecordingPlayout() failed to "
+ "start file recording");
+ _outputFileRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ return -1;
+ }
+
+ _outputFileRecorderPtr->RegisterModuleFileCallback(this);
+ _outputFileRecording = true;
+
+ return 0;
+}
+
+int Channel::StopRecordingPlayout()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "Channel::StopRecordingPlayout()");
+
+ if (!_outputFileRecording)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+ "StopRecordingPlayout() isnot recording");
+ return -1;
+ }
+
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ if (_outputFileRecorderPtr->StopRecording() != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_STOP_RECORDING_FAILED, kTraceError,
+ "StopRecording() could not stop recording");
+ return(-1);
+ }
+ _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ _outputFileRecording = false;
+
+ return 0;
+}
+
+void
+Channel::SetMixWithMicStatus(bool mix)
+{
+ _mixFileWithMicrophone=mix;
+}
+
+int
+Channel::GetSpeechOutputLevel(WebRtc_UWord32& level) const
+{
+ WebRtc_Word8 currentLevel = _outputAudioLevel.Level();
+ level = static_cast<WebRtc_Word32> (currentLevel);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "GetSpeechOutputLevel() => level=%u", level);
+ return 0;
+}
+
+int
+Channel::GetSpeechOutputLevelFullRange(WebRtc_UWord32& level) const
+{
+ WebRtc_Word16 currentLevel = _outputAudioLevel.LevelFullRange();
+ level = static_cast<WebRtc_Word32> (currentLevel);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "GetSpeechOutputLevelFullRange() => level=%u", level);
+ return 0;
+}
+
+int
+Channel::SetMute(bool enable)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetMute(enable=%d)", enable);
+ _mute = enable;
+ return 0;
+}
+
+bool
+Channel::Mute() const
+{
+ return _mute;
+}
+
+int
+Channel::SetOutputVolumePan(float left, float right)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetOutputVolumePan()");
+ _panLeft = left;
+ _panRight = right;
+ return 0;
+}
+
+int
+Channel::GetOutputVolumePan(float& left, float& right) const
+{
+ left = _panLeft;
+ right = _panRight;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "GetOutputVolumePan() => left=%3.2f, right=%3.2f", left, right);
+ return 0;
+}
+
+int
+Channel::SetChannelOutputVolumeScaling(float scaling)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetChannelOutputVolumeScaling()");
+ _outputGain = scaling;
+ return 0;
+}
+
+int
+Channel::GetChannelOutputVolumeScaling(float& scaling) const
+{
+ scaling = _outputGain;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "GetChannelOutputVolumeScaling() => scaling=%3.2f", scaling);
+ return 0;
+}
+
+#ifdef WEBRTC_SRTP
+
+int
+Channel::EnableSRTPSend(
+ CipherTypes cipherType,
+ int cipherKeyLength,
+ AuthenticationTypes authType,
+ int authKeyLength,
+ int authTagLength,
+ SecurityLevels level,
+ const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+ bool useForRTCP)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::EnableSRTPSend()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_encrypting)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "EnableSRTPSend() encryption already enabled");
+ return -1;
+ }
+
+ if (key == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceWarning,
+ "EnableSRTPSend() invalid key string");
+ return -1;
+ }
+
+ if (((kEncryption == level ||
+ kEncryptionAndAuthentication == level) &&
+ (cipherKeyLength < kVoiceEngineMinSrtpEncryptLength ||
+ cipherKeyLength > kVoiceEngineMaxSrtpEncryptLength)) ||
+ ((kAuthentication == level ||
+ kEncryptionAndAuthentication == level) &&
+ kAuthHmacSha1 == authType &&
+ (authKeyLength > kVoiceEngineMaxSrtpAuthSha1Length ||
+ authTagLength > kVoiceEngineMaxSrtpAuthSha1Length)) ||
+ ((kAuthentication == level ||
+ kEncryptionAndAuthentication == level) &&
+ kAuthNull == authType &&
+ (authKeyLength > kVoiceEngineMaxSrtpKeyAuthNullLength ||
+ authTagLength > kVoiceEngineMaxSrtpTagAuthNullLength)))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "EnableSRTPSend() invalid key length(s)");
+ return -1;
+ }
+
+
+ if (_srtpModule.EnableSRTPEncrypt(
+ !useForRTCP,
+ (SrtpModule::CipherTypes)cipherType,
+ cipherKeyLength,
+ (SrtpModule::AuthenticationTypes)authType,
+ authKeyLength, authTagLength,
+ (SrtpModule::SecurityLevels)level,
+ key) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SRTP_ERROR, kTraceError,
+ "EnableSRTPSend() failed to enable SRTP encryption");
+ return -1;
+ }
+
+ if (_encryptionPtr == NULL)
+ {
+ _encryptionPtr = &_srtpModule;
+ }
+ _encrypting = true;
+
+ return 0;
+}
+
+int
+Channel::DisableSRTPSend()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::DisableSRTPSend()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (!_encrypting)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "DisableSRTPSend() SRTP encryption already disabled");
+ return 0;
+ }
+
+ _encrypting = false;
+
+ if (_srtpModule.DisableSRTPEncrypt() == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SRTP_ERROR, kTraceError,
+ "DisableSRTPSend() failed to disable SRTP encryption");
+ return -1;
+ }
+
+ if (!_srtpModule.SRTPDecrypt() && !_srtpModule.SRTPEncrypt())
+ {
+ // Both directions are disabled
+ _encryptionPtr = NULL;
+ }
+
+ return 0;
+}
+
+int
+Channel::EnableSRTPReceive(
+ CipherTypes cipherType,
+ int cipherKeyLength,
+ AuthenticationTypes authType,
+ int authKeyLength,
+ int authTagLength,
+ SecurityLevels level,
+ const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+ bool useForRTCP)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::EnableSRTPReceive()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_decrypting)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "EnableSRTPReceive() SRTP decryption already enabled");
+ return -1;
+ }
+
+ if (key == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceWarning,
+ "EnableSRTPReceive() invalid key string");
+ return -1;
+ }
+
+ if ((((kEncryption == level) ||
+ (kEncryptionAndAuthentication == level)) &&
+ ((cipherKeyLength < kVoiceEngineMinSrtpEncryptLength) ||
+ (cipherKeyLength > kVoiceEngineMaxSrtpEncryptLength))) ||
+ (((kAuthentication == level) ||
+ (kEncryptionAndAuthentication == level)) &&
+ (kAuthHmacSha1 == authType) &&
+ ((authKeyLength > kVoiceEngineMaxSrtpAuthSha1Length) ||
+ (authTagLength > kVoiceEngineMaxSrtpAuthSha1Length))) ||
+ (((kAuthentication == level) ||
+ (kEncryptionAndAuthentication == level)) &&
+ (kAuthNull == authType) &&
+ ((authKeyLength > kVoiceEngineMaxSrtpKeyAuthNullLength) ||
+ (authTagLength > kVoiceEngineMaxSrtpTagAuthNullLength))))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "EnableSRTPReceive() invalid key length(s)");
+ return -1;
+ }
+
+ if (_srtpModule.EnableSRTPDecrypt(
+ !useForRTCP,
+ (SrtpModule::CipherTypes)cipherType,
+ cipherKeyLength,
+ (SrtpModule::AuthenticationTypes)authType,
+ authKeyLength,
+ authTagLength,
+ (SrtpModule::SecurityLevels)level,
+ key) == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SRTP_ERROR, kTraceError,
+ "EnableSRTPReceive() failed to enable SRTP decryption");
+ return -1;
+ }
+
+ if (_encryptionPtr == NULL)
+ {
+ _encryptionPtr = &_srtpModule;
+ }
+
+ _decrypting = true;
+
+ return 0;
+}
+
+int
+Channel::DisableSRTPReceive()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::DisableSRTPReceive()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (!_decrypting)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "DisableSRTPReceive() SRTP decryption already disabled");
+ return 0;
+ }
+
+ _decrypting = false;
+
+ if (_srtpModule.DisableSRTPDecrypt() == -1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SRTP_ERROR, kTraceError,
+ "DisableSRTPReceive() failed to disable SRTP decryption");
+ return -1;
+ }
+
+ if (!_srtpModule.SRTPDecrypt() && !_srtpModule.SRTPEncrypt())
+ {
+ _encryptionPtr = NULL;
+ }
+
+ return 0;
+}
+
+#endif
+
+int
+Channel::RegisterExternalEncryption(Encryption& encryption)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::RegisterExternalEncryption()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_encryptionPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "RegisterExternalEncryption() encryption already enabled");
+ return -1;
+ }
+
+ _encryptionPtr = &encryption;
+
+ _decrypting = true;
+ _encrypting = true;
+
+ return 0;
+}
+
+int
+Channel::DeRegisterExternalEncryption()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::DeRegisterExternalEncryption()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (!_encryptionPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "DeRegisterExternalEncryption() encryption already disabled");
+ return 0;
+ }
+
+ _decrypting = false;
+ _encrypting = false;
+
+ _encryptionPtr = NULL;
+
+ return 0;
+}
+
+int Channel::SendTelephoneEventOutband(unsigned char eventCode,
+ int lengthMs, int attenuationDb,
+ bool playDtmfEvent)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::SendTelephoneEventOutband(..., playDtmfEvent=%d)",
+ playDtmfEvent);
+
+ _playOutbandDtmfEvent = playDtmfEvent;
+
+ if (_rtpRtcpModule->SendTelephoneEventOutband(eventCode, lengthMs,
+ attenuationDb) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SEND_DTMF_FAILED,
+ kTraceWarning,
+ "SendTelephoneEventOutband() failed to send event");
+ return -1;
+ }
+ return 0;
+}
+
+int Channel::SendTelephoneEventInband(unsigned char eventCode,
+ int lengthMs,
+ int attenuationDb,
+ bool playDtmfEvent)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::SendTelephoneEventInband(..., playDtmfEvent=%d)",
+ playDtmfEvent);
+
+ _playInbandDtmfEvent = playDtmfEvent;
+ _inbandDtmfQueue.AddDtmf(eventCode, lengthMs, attenuationDb);
+
+ return 0;
+}
+
+int
+Channel::SetDtmfPlayoutStatus(bool enable)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetDtmfPlayoutStatus()");
+ if (_audioCodingModule.SetDtmfPlayoutStatus(enable) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceWarning,
+ "SetDtmfPlayoutStatus() failed to set Dtmf playout");
+ return -1;
+ }
+ return 0;
+}
+
+bool
+Channel::DtmfPlayoutStatus() const
+{
+ return _audioCodingModule.DtmfPlayoutStatus();
+}
+
+int
+Channel::SetSendTelephoneEventPayloadType(unsigned char type)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetSendTelephoneEventPayloadType()");
+ if (type > 127)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendTelephoneEventPayloadType() invalid type");
+ return -1;
+ }
+ CodecInst codec;
+ codec.plfreq = 8000;
+ codec.pltype = type;
+ memcpy(codec.plname, "telephone-event", 16);
+ if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "SetSendTelephoneEventPayloadType() failed to register send"
+ "payload type");
+ return -1;
+ }
+ _sendTelephoneEventPayloadType = type;
+ return 0;
+}
+
+int
+Channel::GetSendTelephoneEventPayloadType(unsigned char& type)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetSendTelephoneEventPayloadType()");
+ type = _sendTelephoneEventPayloadType;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "GetSendTelephoneEventPayloadType() => type=%u", type);
+ return 0;
+}
+
+#ifdef WEBRTC_DTMF_DETECTION
+
+WebRtc_Word32
+Channel::RegisterTelephoneEventDetection(
+ TelephoneEventDetectionMethods detectionMethod,
+ VoETelephoneEventObserver& observer)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::RegisterTelephoneEventDetection()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_telephoneEventDetectionPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "RegisterTelephoneEventDetection() detection already enabled");
+ return -1;
+ }
+
+ _telephoneEventDetectionPtr = &observer;
+
+ switch (detectionMethod)
+ {
+ case kInBand:
+ _inbandTelephoneEventDetection = true;
+ _outOfBandTelephoneEventDetecion = false;
+ break;
+ case kOutOfBand:
+ _inbandTelephoneEventDetection = false;
+ _outOfBandTelephoneEventDetecion = true;
+ break;
+ case kInAndOutOfBand:
+ _inbandTelephoneEventDetection = true;
+ _outOfBandTelephoneEventDetecion = true;
+ break;
+ default:
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "RegisterTelephoneEventDetection() invalid detection method");
+ return -1;
+ }
+
+ if (_inbandTelephoneEventDetection)
+ {
+ // Enable in-band Dtmf detectin in the ACM.
+ if (_audioCodingModule.RegisterIncomingMessagesCallback(this) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "RegisterTelephoneEventDetection() failed to enable Dtmf "
+ "detection");
+ }
+ }
+
+ // Enable/disable out-of-band detection of received telephone-events.
+ // When enabled, RtpAudioFeedback::OnReceivedTelephoneEvent() will be
+ // called two times by the RTP/RTCP module (start & end).
+ const bool forwardToDecoder =
+ _rtpRtcpModule->TelephoneEventForwardToDecoder();
+ const bool detectEndOfTone = true;
+ _rtpRtcpModule->SetTelephoneEventStatus(_outOfBandTelephoneEventDetecion,
+ forwardToDecoder,
+ detectEndOfTone);
+
+ return 0;
+}
+
+int
+Channel::DeRegisterTelephoneEventDetection()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::DeRegisterTelephoneEventDetection()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (!_telephoneEventDetectionPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION,
+ kTraceWarning,
+ "DeRegisterTelephoneEventDetection() detection already disabled");
+ return 0;
+ }
+
+ // Disable out-of-band event detection
+ const bool forwardToDecoder =
+ _rtpRtcpModule->TelephoneEventForwardToDecoder();
+ _rtpRtcpModule->SetTelephoneEventStatus(false, forwardToDecoder);
+
+ // Disable in-band Dtmf detection
+ _audioCodingModule.RegisterIncomingMessagesCallback(NULL);
+
+ _inbandTelephoneEventDetection = false;
+ _outOfBandTelephoneEventDetecion = false;
+ _telephoneEventDetectionPtr = NULL;
+
+ return 0;
+}
+
+int
+Channel::GetTelephoneEventDetectionStatus(
+ bool& enabled,
+ TelephoneEventDetectionMethods& detectionMethod)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::GetTelephoneEventDetectionStatus()");
+
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ enabled = (_telephoneEventDetectionPtr != NULL);
+ }
+
+ if (enabled)
+ {
+ if (_inbandTelephoneEventDetection && !_outOfBandTelephoneEventDetecion)
+ detectionMethod = kInBand;
+ else if (!_inbandTelephoneEventDetection
+ && _outOfBandTelephoneEventDetecion)
+ detectionMethod = kOutOfBand;
+ else if (_inbandTelephoneEventDetection
+ && _outOfBandTelephoneEventDetecion)
+ detectionMethod = kInAndOutOfBand;
+ else
+ {
+ assert(false);
+ return -1;
+ }
+ }
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetTelephoneEventDetectionStatus() => enabled=%d,"
+ "detectionMethod=%d", enabled, detectionMethod);
+ return 0;
+}
+
+#endif // #ifdef WEBRTC_DTMF_DETECTION
+
+int
+Channel::UpdateRxVadDetection(AudioFrame& audioFrame)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::UpdateRxVadDetection()");
+
+ int vadDecision = 1;
+
+ vadDecision = (audioFrame.vad_activity_ == AudioFrame::kVadActive)? 1 : 0;
+
+ if ((vadDecision != _oldVadDecision) && _rxVadObserverPtr)
+ {
+ OnRxVadDetected(vadDecision);
+ _oldVadDecision = vadDecision;
+ }
+
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::UpdateRxVadDetection() => vadDecision=%d",
+ vadDecision);
+ return 0;
+}
+
+int
+Channel::RegisterRxVadObserver(VoERxVadCallback &observer)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::RegisterRxVadObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_rxVadObserverPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "RegisterRxVadObserver() observer already enabled");
+ return -1;
+ }
+ _rxVadObserverPtr = &observer;
+ _RxVadDetection = true;
+ return 0;
+}
+
+int
+Channel::DeRegisterRxVadObserver()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::DeRegisterRxVadObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (!_rxVadObserverPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "DeRegisterRxVadObserver() observer already disabled");
+ return 0;
+ }
+ _rxVadObserverPtr = NULL;
+ _RxVadDetection = false;
+ return 0;
+}
+
+int
+Channel::VoiceActivityIndicator(int &activity)
+{
+ activity = _sendFrameType;
+
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::VoiceActivityIndicator(indicator=%d)", activity);
+ return 0;
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+
+int
+Channel::SetRxAgcStatus(const bool enable, const AgcModes mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetRxAgcStatus(enable=%d, mode=%d)",
+ (int)enable, (int)mode);
+
+ GainControl::Mode agcMode(GainControl::kFixedDigital);
+ switch (mode)
+ {
+ case kAgcDefault:
+ agcMode = GainControl::kAdaptiveDigital;
+ break;
+ case kAgcUnchanged:
+ agcMode = _rxAudioProcessingModulePtr->gain_control()->mode();
+ break;
+ case kAgcFixedDigital:
+ agcMode = GainControl::kFixedDigital;
+ break;
+ case kAgcAdaptiveDigital:
+ agcMode =GainControl::kAdaptiveDigital;
+ break;
+ default:
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetRxAgcStatus() invalid Agc mode");
+ return -1;
+ }
+
+ if (_rxAudioProcessingModulePtr->gain_control()->set_mode(agcMode) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceError,
+ "SetRxAgcStatus() failed to set Agc mode");
+ return -1;
+ }
+ if (_rxAudioProcessingModulePtr->gain_control()->Enable(enable) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceError,
+ "SetRxAgcStatus() failed to set Agc state");
+ return -1;
+ }
+
+ _rxAgcIsEnabled = enable;
+
+ _rxApmIsEnabled = ((_rxAgcIsEnabled == true) || (_rxNsIsEnabled == true));
+
+ return 0;
+}
+
+int
+Channel::GetRxAgcStatus(bool& enabled, AgcModes& mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetRxAgcStatus(enable=?, mode=?)");
+
+ bool enable = _rxAudioProcessingModulePtr->gain_control()->is_enabled();
+ GainControl::Mode agcMode =
+ _rxAudioProcessingModulePtr->gain_control()->mode();
+
+ enabled = enable;
+
+ switch (agcMode)
+ {
+ case GainControl::kFixedDigital:
+ mode = kAgcFixedDigital;
+ break;
+ case GainControl::kAdaptiveDigital:
+ mode = kAgcAdaptiveDigital;
+ break;
+ default:
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceError,
+ "GetRxAgcStatus() invalid Agc mode");
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+Channel::SetRxAgcConfig(const AgcConfig config)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetRxAgcConfig()");
+
+ if (_rxAudioProcessingModulePtr->gain_control()->set_target_level_dbfs(
+ config.targetLeveldBOv) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceError,
+ "SetRxAgcConfig() failed to set target peak |level|"
+ "(or envelope) of the Agc");
+ return -1;
+ }
+ if (_rxAudioProcessingModulePtr->gain_control()->set_compression_gain_db(
+ config.digitalCompressionGaindB) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceError,
+ "SetRxAgcConfig() failed to set the range in |gain| the"
+ " digital compression stage may apply");
+ return -1;
+ }
+ if (_rxAudioProcessingModulePtr->gain_control()->enable_limiter(
+ config.limiterEnable) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceError,
+ "SetRxAgcConfig() failed to set hard limiter to the signal");
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+Channel::GetRxAgcConfig(AgcConfig& config)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetRxAgcConfig(config=%?)");
+
+ config.targetLeveldBOv =
+ _rxAudioProcessingModulePtr->gain_control()->target_level_dbfs();
+ config.digitalCompressionGaindB =
+ _rxAudioProcessingModulePtr->gain_control()->compression_gain_db();
+ config.limiterEnable =
+ _rxAudioProcessingModulePtr->gain_control()->is_limiter_enabled();
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId), "GetRxAgcConfig() => "
+ "targetLeveldBOv=%u, digitalCompressionGaindB=%u,"
+ " limiterEnable=%d",
+ config.targetLeveldBOv,
+ config.digitalCompressionGaindB,
+ config.limiterEnable);
+
+ return 0;
+}
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_AGC
+
+#ifdef WEBRTC_VOICE_ENGINE_NR
+
+int
+Channel::SetRxNsStatus(const bool enable, const NsModes mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetRxNsStatus(enable=%d, mode=%d)",
+ (int)enable, (int)mode);
+
+ NoiseSuppression::Level nsLevel(
+ (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_MODE);
+ switch (mode)
+ {
+
+ case kNsDefault:
+ nsLevel = (NoiseSuppression::Level)
+ WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_MODE;
+ break;
+ case kNsUnchanged:
+ nsLevel = _rxAudioProcessingModulePtr->noise_suppression()->level();
+ break;
+ case kNsConference:
+ nsLevel = NoiseSuppression::kHigh;
+ break;
+ case kNsLowSuppression:
+ nsLevel = NoiseSuppression::kLow;
+ break;
+ case kNsModerateSuppression:
+ nsLevel = NoiseSuppression::kModerate;
+ break;
+ case kNsHighSuppression:
+ nsLevel = NoiseSuppression::kHigh;
+ break;
+ case kNsVeryHighSuppression:
+ nsLevel = NoiseSuppression::kVeryHigh;
+ break;
+ }
+
+ if (_rxAudioProcessingModulePtr->noise_suppression()->set_level(nsLevel)
+ != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceError,
+ "SetRxAgcStatus() failed to set Ns level");
+ return -1;
+ }
+ if (_rxAudioProcessingModulePtr->noise_suppression()->Enable(enable) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_APM_ERROR, kTraceError,
+ "SetRxAgcStatus() failed to set Agc state");
+ return -1;
+ }
+
+ _rxNsIsEnabled = enable;
+ _rxApmIsEnabled = ((_rxAgcIsEnabled == true) || (_rxNsIsEnabled == true));
+
+ return 0;
+}
+
+int
+Channel::GetRxNsStatus(bool& enabled, NsModes& mode)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetRxNsStatus(enable=?, mode=?)");
+
+ bool enable =
+ _rxAudioProcessingModulePtr->noise_suppression()->is_enabled();
+ NoiseSuppression::Level ncLevel =
+ _rxAudioProcessingModulePtr->noise_suppression()->level();
+
+ enabled = enable;
+
+ switch (ncLevel)
+ {
+ case NoiseSuppression::kLow:
+ mode = kNsLowSuppression;
+ break;
+ case NoiseSuppression::kModerate:
+ mode = kNsModerateSuppression;
+ break;
+ case NoiseSuppression::kHigh:
+ mode = kNsHighSuppression;
+ break;
+ case NoiseSuppression::kVeryHigh:
+ mode = kNsVeryHighSuppression;
+ break;
+ }
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "GetRxNsStatus() => enabled=%d, mode=%d", enabled, mode);
+ return 0;
+}
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_NR
+
+int
+Channel::RegisterRTPObserver(VoERTPObserver& observer)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::RegisterRTPObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_rtpObserverPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "RegisterRTPObserver() observer already enabled");
+ return -1;
+ }
+
+ _rtpObserverPtr = &observer;
+ _rtpObserver = true;
+
+ return 0;
+}
+
+int
+Channel::DeRegisterRTPObserver()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::DeRegisterRTPObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (!_rtpObserverPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "DeRegisterRTPObserver() observer already disabled");
+ return 0;
+ }
+
+ _rtpObserver = false;
+ _rtpObserverPtr = NULL;
+
+ return 0;
+}
+
+int
+Channel::RegisterRTCPObserver(VoERTCPObserver& observer)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::RegisterRTCPObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_rtcpObserverPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "RegisterRTCPObserver() observer already enabled");
+ return -1;
+ }
+
+ _rtcpObserverPtr = &observer;
+ _rtcpObserver = true;
+
+ return 0;
+}
+
+int
+Channel::DeRegisterRTCPObserver()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::DeRegisterRTCPObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (!_rtcpObserverPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "DeRegisterRTCPObserver() observer already disabled");
+ return 0;
+ }
+
+ _rtcpObserver = false;
+ _rtcpObserverPtr = NULL;
+
+ return 0;
+}
+
+int
+Channel::SetLocalSSRC(unsigned int ssrc)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::SetLocalSSRC()");
+ if (_sending)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ALREADY_SENDING, kTraceError,
+ "SetLocalSSRC() already sending");
+ return -1;
+ }
+ if (_rtpRtcpModule->SetSSRC(ssrc) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "SetLocalSSRC() failed to set SSRC");
+ return -1;
+ }
+ return 0;
+}
+
+int
+Channel::GetLocalSSRC(unsigned int& ssrc)
+{
+ ssrc = _rtpRtcpModule->SSRC();
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "GetLocalSSRC() => ssrc=%lu", ssrc);
+ return 0;
+}
+
+int
+Channel::GetRemoteSSRC(unsigned int& ssrc)
+{
+ ssrc = _rtpRtcpModule->RemoteSSRC();
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "GetRemoteSSRC() => ssrc=%lu", ssrc);
+ return 0;
+}
+
+int
+Channel::GetRemoteCSRCs(unsigned int arrCSRC[15])
+{
+ if (arrCSRC == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "GetRemoteCSRCs() invalid array argument");
+ return -1;
+ }
+ WebRtc_UWord32 arrOfCSRC[kRtpCsrcSize];
+ WebRtc_Word32 CSRCs(0);
+ CSRCs = _rtpRtcpModule->CSRCs(arrOfCSRC);
+ if (CSRCs > 0)
+ {
+ memcpy(arrCSRC, arrOfCSRC, CSRCs * sizeof(WebRtc_UWord32));
+ for (int i = 0; i < (int) CSRCs; i++)
+ {
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRemoteCSRCs() => arrCSRC[%d]=%lu", i, arrCSRC[i]);
+ }
+ } else
+ {
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRemoteCSRCs() => list is empty!");
+ }
+ return CSRCs;
+}
+
+int
+Channel::SetRTPAudioLevelIndicationStatus(bool enable, unsigned char ID)
+{
+ if (_rtpAudioProc.get() == NULL)
+ {
+ _rtpAudioProc.reset(AudioProcessing::Create(VoEModuleId(_instanceId,
+ _channelId)));
+ if (_rtpAudioProc.get() == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(VE_NO_MEMORY, kTraceCritical,
+ "Failed to create AudioProcessing");
+ return -1;
+ }
+ }
+
+ if (_rtpAudioProc->level_estimator()->Enable(enable) !=
+ AudioProcessing::kNoError)
+ {
+ _engineStatisticsPtr->SetLastError(VE_APM_ERROR, kTraceWarning,
+ "Failed to enable AudioProcessing::level_estimator()");
+ }
+
+ _includeAudioLevelIndication = enable;
+ return _rtpRtcpModule->SetRTPAudioLevelIndicationStatus(enable, ID);
+}
+int
+Channel::GetRTPAudioLevelIndicationStatus(bool& enabled, unsigned char& ID)
+{
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "GetRTPAudioLevelIndicationStatus() => enabled=%d, ID=%u",
+ enabled, ID);
+ return _rtpRtcpModule->GetRTPAudioLevelIndicationStatus(enabled, ID);
+}
+
+int
+Channel::SetRTCPStatus(bool enable)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetRTCPStatus()");
+ if (_rtpRtcpModule->SetRTCPStatus(enable ?
+ kRtcpCompound : kRtcpOff) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "SetRTCPStatus() failed to set RTCP status");
+ return -1;
+ }
+ return 0;
+}
+
+int
+Channel::GetRTCPStatus(bool& enabled)
+{
+ RTCPMethod method = _rtpRtcpModule->RTCP();
+ enabled = (method != kRtcpOff);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "GetRTCPStatus() => enabled=%d", enabled);
+ return 0;
+}
+
+int
+Channel::SetRTCP_CNAME(const char cName[256])
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::SetRTCP_CNAME()");
+ if (_rtpRtcpModule->SetCNAME(cName) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "SetRTCP_CNAME() failed to set RTCP CNAME");
+ return -1;
+ }
+ return 0;
+}
+
+int
+Channel::GetRTCP_CNAME(char cName[256])
+{
+ if (_rtpRtcpModule->CNAME(cName) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "GetRTCP_CNAME() failed to retrieve RTCP CNAME");
+ return -1;
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTCP_CNAME() => cName=%s", cName);
+ return 0;
+}
+
+int
+Channel::GetRemoteRTCP_CNAME(char cName[256])
+{
+ if (cName == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "GetRemoteRTCP_CNAME() invalid CNAME input buffer");
+ return -1;
+ }
+ char cname[RTCP_CNAME_SIZE];
+ const WebRtc_UWord32 remoteSSRC = _rtpRtcpModule->RemoteSSRC();
+ if (_rtpRtcpModule->RemoteCNAME(remoteSSRC, cname) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CANNOT_RETRIEVE_CNAME, kTraceError,
+ "GetRemoteRTCP_CNAME() failed to retrieve remote RTCP CNAME");
+ return -1;
+ }
+ strcpy(cName, cname);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRemoteRTCP_CNAME() => cName=%s", cName);
+ return 0;
+}
+
+int
+Channel::GetRemoteRTCPData(
+ unsigned int& NTPHigh,
+ unsigned int& NTPLow,
+ unsigned int& timestamp,
+ unsigned int& playoutTimestamp,
+ unsigned int* jitter,
+ unsigned short* fractionLost)
+{
+ // --- Information from sender info in received Sender Reports
+
+ RTCPSenderInfo senderInfo;
+ if (_rtpRtcpModule->RemoteRTCPStat(&senderInfo) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "GetRemoteRTCPData() failed to retrieve sender info for remote "
+ "side");
+ return -1;
+ }
+
+ // We only utilize 12 out of 20 bytes in the sender info (ignores packet
+ // and octet count)
+ NTPHigh = senderInfo.NTPseconds;
+ NTPLow = senderInfo.NTPfraction;
+ timestamp = senderInfo.RTPtimeStamp;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRemoteRTCPData() => NTPHigh=%lu, NTPLow=%lu, "
+ "timestamp=%lu",
+ NTPHigh, NTPLow, timestamp);
+
+ // --- Locally derived information
+
+ // This value is updated on each incoming RTCP packet (0 when no packet
+ // has been received)
+ playoutTimestamp = _playoutTimeStampRTCP;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRemoteRTCPData() => playoutTimestamp=%lu",
+ _playoutTimeStampRTCP);
+
+ if (NULL != jitter || NULL != fractionLost)
+ {
+ // Get all RTCP receiver report blocks that have been received on this
+ // channel. If we receive RTP packets from a remote source we know the
+ // remote SSRC and use the report block from him.
+ // Otherwise use the first report block.
+ std::vector<RTCPReportBlock> remote_stats;
+ if (_rtpRtcpModule->RemoteRTCPStat(&remote_stats) != 0 ||
+ remote_stats.empty()) {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRemoteRTCPData() failed to measure statistics due"
+ " to lack of received RTP and/or RTCP packets");
+ return -1;
+ }
+
+ WebRtc_UWord32 remoteSSRC = _rtpRtcpModule->RemoteSSRC();
+ std::vector<RTCPReportBlock>::const_iterator it = remote_stats.begin();
+ for (; it != remote_stats.end(); ++it) {
+ if (it->remoteSSRC == remoteSSRC)
+ break;
+ }
+
+ if (it == remote_stats.end()) {
+ // If we have not received any RTCP packets from this SSRC it probably
+ // means that we have not received any RTP packets.
+ // Use the first received report block instead.
+ it = remote_stats.begin();
+ remoteSSRC = it->remoteSSRC;
+ }
+
+ if (jitter) {
+ *jitter = it->jitter;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRemoteRTCPData() => jitter = %lu", *jitter);
+ }
+
+ if (fractionLost) {
+ *fractionLost = it->fractionLost;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRemoteRTCPData() => fractionLost = %lu",
+ *fractionLost);
+ }
+ }
+ return 0;
+}
+
+int
+Channel::SendApplicationDefinedRTCPPacket(const unsigned char subType,
+ unsigned int name,
+ const char* data,
+ unsigned short dataLengthInBytes)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::SendApplicationDefinedRTCPPacket()");
+ if (!_sending)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_NOT_SENDING, kTraceError,
+ "SendApplicationDefinedRTCPPacket() not sending");
+ return -1;
+ }
+ if (NULL == data)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SendApplicationDefinedRTCPPacket() invalid data value");
+ return -1;
+ }
+ if (dataLengthInBytes % 4 != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SendApplicationDefinedRTCPPacket() invalid length value");
+ return -1;
+ }
+ RTCPMethod status = _rtpRtcpModule->RTCP();
+ if (status == kRtcpOff)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTCP_ERROR, kTraceError,
+ "SendApplicationDefinedRTCPPacket() RTCP is disabled");
+ return -1;
+ }
+
+ // Create and schedule the RTCP APP packet for transmission
+ if (_rtpRtcpModule->SetRTCPApplicationSpecificData(
+ subType,
+ name,
+ (const unsigned char*) data,
+ dataLengthInBytes) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SEND_ERROR, kTraceError,
+ "SendApplicationDefinedRTCPPacket() failed to send RTCP packet");
+ return -1;
+ }
+ return 0;
+}
+
+int
+Channel::GetRTPStatistics(
+ unsigned int& averageJitterMs,
+ unsigned int& maxJitterMs,
+ unsigned int& discardedPackets)
+{
+ WebRtc_UWord8 fraction_lost(0);
+ WebRtc_UWord32 cum_lost(0);
+ WebRtc_UWord32 ext_max(0);
+ WebRtc_UWord32 jitter(0);
+ WebRtc_UWord32 max_jitter(0);
+
+ // The jitter statistics is updated for each received RTP packet and is
+ // based on received packets.
+ if (_rtpRtcpModule->StatisticsRTP(&fraction_lost,
+ &cum_lost,
+ &ext_max,
+ &jitter,
+ &max_jitter) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CANNOT_RETRIEVE_RTP_STAT, kTraceWarning,
+ "GetRTPStatistics() failed to read RTP statistics from the "
+ "RTP/RTCP module");
+ }
+
+ const WebRtc_Word32 playoutFrequency =
+ _audioCodingModule.PlayoutFrequency();
+ if (playoutFrequency > 0)
+ {
+ // Scale RTP statistics given the current playout frequency
+ maxJitterMs = max_jitter / (playoutFrequency / 1000);
+ averageJitterMs = jitter / (playoutFrequency / 1000);
+ }
+
+ discardedPackets = _numberOfDiscardedPackets;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTPStatistics() => averageJitterMs = %lu, maxJitterMs = %lu,"
+ " discardedPackets = %lu)",
+ averageJitterMs, maxJitterMs, discardedPackets);
+ return 0;
+}
+
+int Channel::GetRemoteRTCPSenderInfo(SenderInfo* sender_info) {
+ if (sender_info == NULL) {
+ _engineStatisticsPtr->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "GetRemoteRTCPSenderInfo() invalid sender_info.");
+ return -1;
+ }
+
+ // Get the sender info from the latest received RTCP Sender Report.
+ RTCPSenderInfo rtcp_sender_info;
+ if (_rtpRtcpModule->RemoteRTCPStat(&rtcp_sender_info) != 0) {
+ _engineStatisticsPtr->SetLastError(VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "GetRemoteRTCPSenderInfo() failed to read RTCP SR sender info.");
+ return -1;
+ }
+
+ sender_info->NTP_timestamp_high = rtcp_sender_info.NTPseconds;
+ sender_info->NTP_timestamp_low = rtcp_sender_info.NTPfraction;
+ sender_info->RTP_timestamp = rtcp_sender_info.RTPtimeStamp;
+ sender_info->sender_packet_count = rtcp_sender_info.sendPacketCount;
+ sender_info->sender_octet_count = rtcp_sender_info.sendOctetCount;
+ return 0;
+}
+
+int Channel::GetRemoteRTCPReportBlocks(
+ std::vector<ReportBlock>* report_blocks) {
+ if (report_blocks == NULL) {
+ _engineStatisticsPtr->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "GetRemoteRTCPReportBlock()s invalid report_blocks.");
+ return -1;
+ }
+
+ // Get the report blocks from the latest received RTCP Sender or Receiver
+ // Report. Each element in the vector contains the sender's SSRC and a
+ // report block according to RFC 3550.
+ std::vector<RTCPReportBlock> rtcp_report_blocks;
+ if (_rtpRtcpModule->RemoteRTCPStat(&rtcp_report_blocks) != 0) {
+ _engineStatisticsPtr->SetLastError(VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "GetRemoteRTCPReportBlocks() failed to read RTCP SR/RR report block.");
+ return -1;
+ }
+
+ if (rtcp_report_blocks.empty())
+ return 0;
+
+ std::vector<RTCPReportBlock>::const_iterator it = rtcp_report_blocks.begin();
+ for (; it != rtcp_report_blocks.end(); ++it) {
+ ReportBlock report_block;
+ report_block.sender_SSRC = it->remoteSSRC;
+ report_block.source_SSRC = it->sourceSSRC;
+ report_block.fraction_lost = it->fractionLost;
+ report_block.cumulative_num_packets_lost = it->cumulativeLost;
+ report_block.extended_highest_sequence_number = it->extendedHighSeqNum;
+ report_block.interarrival_jitter = it->jitter;
+ report_block.last_SR_timestamp = it->lastSR;
+ report_block.delay_since_last_SR = it->delaySinceLastSR;
+ report_blocks->push_back(report_block);
+ }
+ return 0;
+}
+
+int
+Channel::GetRTPStatistics(CallStatistics& stats)
+{
+ WebRtc_UWord8 fraction_lost(0);
+ WebRtc_UWord32 cum_lost(0);
+ WebRtc_UWord32 ext_max(0);
+ WebRtc_UWord32 jitter(0);
+ WebRtc_UWord32 max_jitter(0);
+
+ // --- Part one of the final structure (four values)
+
+ // The jitter statistics is updated for each received RTP packet and is
+ // based on received packets.
+ if (_rtpRtcpModule->StatisticsRTP(&fraction_lost,
+ &cum_lost,
+ &ext_max,
+ &jitter,
+ &max_jitter) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CANNOT_RETRIEVE_RTP_STAT, kTraceWarning,
+ "GetRTPStatistics() failed to read RTP statistics from the "
+ "RTP/RTCP module");
+ }
+
+ stats.fractionLost = fraction_lost;
+ stats.cumulativeLost = cum_lost;
+ stats.extendedMax = ext_max;
+ stats.jitterSamples = jitter;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTPStatistics() => fractionLost=%lu, cumulativeLost=%lu,"
+ " extendedMax=%lu, jitterSamples=%li)",
+ stats.fractionLost, stats.cumulativeLost, stats.extendedMax,
+ stats.jitterSamples);
+
+ // --- Part two of the final structure (one value)
+
+ WebRtc_UWord16 RTT(0);
+ RTCPMethod method = _rtpRtcpModule->RTCP();
+ if (method == kRtcpOff)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTPStatistics() RTCP is disabled => valid RTT "
+ "measurements cannot be retrieved");
+ } else
+ {
+ // The remote SSRC will be zero if no RTP packet has been received.
+ WebRtc_UWord32 remoteSSRC = _rtpRtcpModule->RemoteSSRC();
+ if (remoteSSRC > 0)
+ {
+ WebRtc_UWord16 avgRTT(0);
+ WebRtc_UWord16 maxRTT(0);
+ WebRtc_UWord16 minRTT(0);
+
+ if (_rtpRtcpModule->RTT(remoteSSRC, &RTT, &avgRTT, &minRTT, &maxRTT)
+ != 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTPStatistics() failed to retrieve RTT from "
+ "the RTP/RTCP module");
+ }
+ } else
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTPStatistics() failed to measure RTT since no "
+ "RTP packets have been received yet");
+ }
+ }
+
+ stats.rttMs = static_cast<int> (RTT);
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTPStatistics() => rttMs=%d", stats.rttMs);
+
+ // --- Part three of the final structure (four values)
+
+ WebRtc_UWord32 bytesSent(0);
+ WebRtc_UWord32 packetsSent(0);
+ WebRtc_UWord32 bytesReceived(0);
+ WebRtc_UWord32 packetsReceived(0);
+
+ if (_rtpRtcpModule->DataCountersRTP(&bytesSent,
+ &packetsSent,
+ &bytesReceived,
+ &packetsReceived) != 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTPStatistics() failed to retrieve RTP datacounters =>"
+ " output will not be complete");
+ }
+
+ stats.bytesSent = bytesSent;
+ stats.packetsSent = packetsSent;
+ stats.bytesReceived = bytesReceived;
+ stats.packetsReceived = packetsReceived;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetRTPStatistics() => bytesSent=%d, packetsSent=%d,"
+ " bytesReceived=%d, packetsReceived=%d)",
+ stats.bytesSent, stats.packetsSent, stats.bytesReceived,
+ stats.packetsReceived);
+
+ return 0;
+}
+
+int
+Channel::SetFECStatus(bool enable, int redPayloadtype)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::SetFECStatus()");
+
+ CodecInst codec;
+
+ // Get default RED settings from the ACM database
+ bool foundRED(false);
+ const WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
+ for (int idx = 0; (!foundRED && idx < nSupportedCodecs); idx++)
+ {
+ _audioCodingModule.Codec(idx, codec);
+ if (!STR_CASE_CMP(codec.plname, "RED"))
+ {
+ foundRED = true;
+ }
+ }
+ if (!foundRED)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CODEC_ERROR, kTraceError,
+ "SetFECStatus() RED is not supported");
+ return -1;
+ }
+
+ if (redPayloadtype != -1)
+ {
+ codec.pltype = redPayloadtype;
+ }
+
+ if (_audioCodingModule.RegisterSendCodec(codec) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetFECStatus() RED registration in ACM module failed");
+ return -1;
+ }
+ if (_rtpRtcpModule->SetSendREDPayloadType(codec.pltype) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "SetFECStatus() RED registration in RTP/RTCP module failed");
+ return -1;
+ }
+ if (_audioCodingModule.SetFECStatus(enable) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetFECStatus() failed to set FEC state in the ACM");
+ return -1;
+ }
+ return 0;
+}
+
+int
+Channel::GetFECStatus(bool& enabled, int& redPayloadtype)
+{
+ enabled = _audioCodingModule.FECStatus();
+ if (enabled)
+ {
+ WebRtc_Word8 payloadType(0);
+ if (_rtpRtcpModule->SendREDPayloadType(payloadType) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "GetFECStatus() failed to retrieve RED PT from RTP/RTCP "
+ "module");
+ return -1;
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetFECStatus() => enabled=%d, redPayloadtype=%d",
+ enabled, redPayloadtype);
+ return 0;
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "GetFECStatus() => enabled=%d", enabled);
+ return 0;
+}
+
+int
+Channel::StartRTPDump(const char fileNameUTF8[1024],
+ RTPDirections direction)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::StartRTPDump()");
+ if ((direction != kRtpIncoming) && (direction != kRtpOutgoing))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartRTPDump() invalid RTP direction");
+ return -1;
+ }
+ RtpDump* rtpDumpPtr = (direction == kRtpIncoming) ?
+ &_rtpDumpIn : &_rtpDumpOut;
+ if (rtpDumpPtr == NULL)
+ {
+ assert(false);
+ return -1;
+ }
+ if (rtpDumpPtr->IsActive())
+ {
+ rtpDumpPtr->Stop();
+ }
+ if (rtpDumpPtr->Start(fileNameUTF8) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartRTPDump() failed to create file");
+ return -1;
+ }
+ return 0;
+}
+
+int
+Channel::StopRTPDump(RTPDirections direction)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::StopRTPDump()");
+ if ((direction != kRtpIncoming) && (direction != kRtpOutgoing))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StopRTPDump() invalid RTP direction");
+ return -1;
+ }
+ RtpDump* rtpDumpPtr = (direction == kRtpIncoming) ?
+ &_rtpDumpIn : &_rtpDumpOut;
+ if (rtpDumpPtr == NULL)
+ {
+ assert(false);
+ return -1;
+ }
+ if (!rtpDumpPtr->IsActive())
+ {
+ return 0;
+ }
+ return rtpDumpPtr->Stop();
+}
+
+bool
+Channel::RTPDumpIsActive(RTPDirections direction)
+{
+ if ((direction != kRtpIncoming) &&
+ (direction != kRtpOutgoing))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "RTPDumpIsActive() invalid RTP direction");
+ return false;
+ }
+ RtpDump* rtpDumpPtr = (direction == kRtpIncoming) ?
+ &_rtpDumpIn : &_rtpDumpOut;
+ return rtpDumpPtr->IsActive();
+}
+
+int
+Channel::InsertExtraRTPPacket(unsigned char payloadType,
+ bool markerBit,
+ const char* payloadData,
+ unsigned short payloadSize)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::InsertExtraRTPPacket()");
+ if (payloadType > 127)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_PLTYPE, kTraceError,
+ "InsertExtraRTPPacket() invalid payload type");
+ return -1;
+ }
+ if (payloadData == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "InsertExtraRTPPacket() invalid payload data");
+ return -1;
+ }
+ if (payloadSize > _rtpRtcpModule->MaxDataPayloadLength())
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "InsertExtraRTPPacket() invalid payload size");
+ return -1;
+ }
+ if (!_sending)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_NOT_SENDING, kTraceError,
+ "InsertExtraRTPPacket() not sending");
+ return -1;
+ }
+
+ // Create extra RTP packet by calling RtpRtcp::SendOutgoingData().
+ // Transport::SendPacket() will be called by the module when the RTP packet
+ // is created.
+ // The call to SendOutgoingData() does *not* modify the timestamp and
+ // payloadtype to ensure that the RTP module generates a valid RTP packet
+ // (user might utilize a non-registered payload type).
+ // The marker bit and payload type will be replaced just before the actual
+ // transmission, i.e., the actual modification is done *after* the RTP
+ // module has delivered its RTP packet back to the VoE.
+ // We will use the stored values above when the packet is modified
+ // (see Channel::SendPacket()).
+
+ _extraPayloadType = payloadType;
+ _extraMarkerBit = markerBit;
+ _insertExtraRTPPacket = true;
+
+ if (_rtpRtcpModule->SendOutgoingData(kAudioFrameSpeech,
+ _lastPayloadType,
+ _lastLocalTimeStamp,
+ // Leaving the time when this frame was
+ // received from the capture device as
+ // undefined for voice for now.
+ -1,
+ (const WebRtc_UWord8*) payloadData,
+ payloadSize) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "InsertExtraRTPPacket() failed to send extra RTP packet");
+ return -1;
+ }
+
+ return 0;
+}
+
+WebRtc_UWord32
+Channel::Demultiplex(const AudioFrame& audioFrame)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::Demultiplex()");
+ _audioFrame = audioFrame;
+ _audioFrame.id_ = _channelId;
+ return 0;
+}
+
+WebRtc_UWord32
+Channel::PrepareEncodeAndSend(int mixingFrequency)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::PrepareEncodeAndSend()");
+
+ if (_audioFrame.samples_per_channel_ == 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::PrepareEncodeAndSend() invalid audio frame");
+ return -1;
+ }
+
+ if (_inputFilePlaying)
+ {
+ MixOrReplaceAudioWithFile(mixingFrequency);
+ }
+
+ if (_mute)
+ {
+ AudioFrameOperations::Mute(_audioFrame);
+ }
+
+ if (_inputExternalMedia)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ const bool isStereo = (_audioFrame.num_channels_ == 2);
+ if (_inputExternalMediaCallbackPtr)
+ {
+ _inputExternalMediaCallbackPtr->Process(
+ _channelId,
+ kRecordingPerChannel,
+ (WebRtc_Word16*)_audioFrame.data_,
+ _audioFrame.samples_per_channel_,
+ _audioFrame.sample_rate_hz_,
+ isStereo);
+ }
+ }
+
+ InsertInbandDtmfTone();
+
+ if (_includeAudioLevelIndication)
+ {
+ assert(_rtpAudioProc.get() != NULL);
+
+ // Check if settings need to be updated.
+ if (_rtpAudioProc->sample_rate_hz() != _audioFrame.sample_rate_hz_)
+ {
+ if (_rtpAudioProc->set_sample_rate_hz(_audioFrame.sample_rate_hz_) !=
+ AudioProcessing::kNoError)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Error setting AudioProcessing sample rate");
+ return -1;
+ }
+ }
+
+ if (_rtpAudioProc->num_input_channels() != _audioFrame.num_channels_)
+ {
+ if (_rtpAudioProc->set_num_channels(_audioFrame.num_channels_,
+ _audioFrame.num_channels_)
+ != AudioProcessing::kNoError)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Error setting AudioProcessing channels");
+ return -1;
+ }
+ }
+
+ // Performs level analysis only; does not affect the signal.
+ _rtpAudioProc->ProcessStream(&_audioFrame);
+ }
+
+ return 0;
+}
+
+WebRtc_UWord32
+Channel::EncodeAndSend()
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::EncodeAndSend()");
+
+ assert(_audioFrame.num_channels_ <= 2);
+ if (_audioFrame.samples_per_channel_ == 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::EncodeAndSend() invalid audio frame");
+ return -1;
+ }
+
+ _audioFrame.id_ = _channelId;
+
+ // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
+
+ // The ACM resamples internally.
+ _audioFrame.timestamp_ = _timeStamp;
+ if (_audioCodingModule.Add10MsData((AudioFrame&)_audioFrame) != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::EncodeAndSend() ACM encoding failed");
+ return -1;
+ }
+
+ _timeStamp += _audioFrame.samples_per_channel_;
+
+ // --- Encode if complete frame is ready
+
+ // This call will trigger AudioPacketizationCallback::SendData if encoding
+ // is done and payload is ready for packetization and transmission.
+ return _audioCodingModule.Process();
+}
+
+int Channel::RegisterExternalMediaProcessing(
+ ProcessingTypes type,
+ VoEMediaProcess& processObject)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::RegisterExternalMediaProcessing()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (kPlaybackPerChannel == type)
+ {
+ if (_outputExternalMediaCallbackPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "Channel::RegisterExternalMediaProcessing() "
+ "output external media already enabled");
+ return -1;
+ }
+ _outputExternalMediaCallbackPtr = &processObject;
+ _outputExternalMedia = true;
+ }
+ else if (kRecordingPerChannel == type)
+ {
+ if (_inputExternalMediaCallbackPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "Channel::RegisterExternalMediaProcessing() "
+ "output external media already enabled");
+ return -1;
+ }
+ _inputExternalMediaCallbackPtr = &processObject;
+ _inputExternalMedia = true;
+ }
+ return 0;
+}
+
+int Channel::DeRegisterExternalMediaProcessing(ProcessingTypes type)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::DeRegisterExternalMediaProcessing()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (kPlaybackPerChannel == type)
+ {
+ if (!_outputExternalMediaCallbackPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "Channel::DeRegisterExternalMediaProcessing() "
+ "output external media already disabled");
+ return 0;
+ }
+ _outputExternalMedia = false;
+ _outputExternalMediaCallbackPtr = NULL;
+ }
+ else if (kRecordingPerChannel == type)
+ {
+ if (!_inputExternalMediaCallbackPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "Channel::DeRegisterExternalMediaProcessing() "
+ "input external media already disabled");
+ return 0;
+ }
+ _inputExternalMedia = false;
+ _inputExternalMediaCallbackPtr = NULL;
+ }
+
+ return 0;
+}
+
+int
+Channel::ResetRTCPStatistics()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::ResetRTCPStatistics()");
+ WebRtc_UWord32 remoteSSRC(0);
+ remoteSSRC = _rtpRtcpModule->RemoteSSRC();
+ return _rtpRtcpModule->ResetRTT(remoteSSRC);
+}
+
+int
+Channel::GetRoundTripTimeSummary(StatVal& delaysMs) const
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetRoundTripTimeSummary()");
+ // Override default module outputs for the case when RTCP is disabled.
+ // This is done to ensure that we are backward compatible with the
+ // VoiceEngine where we did not use RTP/RTCP module.
+ if (!_rtpRtcpModule->RTCP())
+ {
+ delaysMs.min = -1;
+ delaysMs.max = -1;
+ delaysMs.average = -1;
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetRoundTripTimeSummary() RTCP is disabled =>"
+ " valid RTT measurements cannot be retrieved");
+ return 0;
+ }
+
+ WebRtc_UWord32 remoteSSRC;
+ WebRtc_UWord16 RTT;
+ WebRtc_UWord16 avgRTT;
+ WebRtc_UWord16 maxRTT;
+ WebRtc_UWord16 minRTT;
+ // The remote SSRC will be zero if no RTP packet has been received.
+ remoteSSRC = _rtpRtcpModule->RemoteSSRC();
+ if (remoteSSRC == 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetRoundTripTimeSummary() unable to measure RTT"
+ " since no RTP packet has been received yet");
+ }
+
+ // Retrieve RTT statistics from the RTP/RTCP module for the specified
+ // channel and SSRC. The SSRC is required to parse out the correct source
+ // in conference scenarios.
+ if (_rtpRtcpModule->RTT(remoteSSRC, &RTT, &avgRTT, &minRTT,&maxRTT) != 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+ "GetRoundTripTimeSummary unable to retrieve RTT values"
+ " from the RTCP layer");
+ delaysMs.min = -1; delaysMs.max = -1; delaysMs.average = -1;
+ }
+ else
+ {
+ delaysMs.min = minRTT;
+ delaysMs.max = maxRTT;
+ delaysMs.average = avgRTT;
+ }
+ return 0;
+}
+
+int
+Channel::GetNetworkStatistics(NetworkStatistics& stats)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetNetworkStatistics()");
+ return _audioCodingModule.NetworkStatistics(
+ (ACMNetworkStatistics &)stats);
+}
+
+int
+Channel::GetDelayEstimate(int& delayMs) const
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetDelayEstimate()");
+ delayMs = (_averageDelayMs + 5) / 10 + _recPacketDelayMs;
+ return 0;
+}
+
+int
+Channel::SetMinimumPlayoutDelay(int delayMs)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetMinimumPlayoutDelay()");
+ if ((delayMs < kVoiceEngineMinMinPlayoutDelayMs) ||
+ (delayMs > kVoiceEngineMaxMinPlayoutDelayMs))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "SetMinimumPlayoutDelay() invalid min delay");
+ return -1;
+ }
+ if (_audioCodingModule.SetMinimumPlayoutDelay(delayMs) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
+ "SetMinimumPlayoutDelay() failed to set min playout delay");
+ return -1;
+ }
+ return 0;
+}
+
+int
+Channel::GetPlayoutTimestamp(unsigned int& timestamp)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetPlayoutTimestamp()");
+ WebRtc_UWord32 playoutTimestamp(0);
+ if (GetPlayoutTimeStamp(playoutTimestamp) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CANNOT_RETRIEVE_VALUE, kTraceError,
+ "GetPlayoutTimestamp() failed to retrieve timestamp");
+ return -1;
+ }
+ timestamp = playoutTimestamp;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_instanceId,_channelId),
+ "GetPlayoutTimestamp() => timestamp=%u", timestamp);
+ return 0;
+}
+
+int
+Channel::SetInitTimestamp(unsigned int timestamp)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetInitTimestamp()");
+ if (_sending)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SENDING, kTraceError, "SetInitTimestamp() already sending");
+ return -1;
+ }
+ if (_rtpRtcpModule->SetStartTimestamp(timestamp) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "SetInitTimestamp() failed to set timestamp");
+ return -1;
+ }
+ return 0;
+}
+
+int
+Channel::SetInitSequenceNumber(short sequenceNumber)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::SetInitSequenceNumber()");
+ if (_sending)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_SENDING, kTraceError,
+ "SetInitSequenceNumber() already sending");
+ return -1;
+ }
+ if (_rtpRtcpModule->SetSequenceNumber(sequenceNumber) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_RTP_RTCP_MODULE_ERROR, kTraceError,
+ "SetInitSequenceNumber() failed to set sequence number");
+ return -1;
+ }
+ return 0;
+}
+
+int
+Channel::GetRtpRtcp(RtpRtcp* &rtpRtcpModule) const
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetRtpRtcp()");
+ rtpRtcpModule = _rtpRtcpModule.get();
+ return 0;
+}
+
+// TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
+// a shared helper.
+WebRtc_Word32
+Channel::MixOrReplaceAudioWithFile(const int mixingFrequency)
+{
+ scoped_array<WebRtc_Word16> fileBuffer(new WebRtc_Word16[640]);
+ int fileSamples(0);
+
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ if (_inputFilePlayerPtr == NULL)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Channel::MixOrReplaceAudioWithFile() fileplayer"
+ " doesnt exist");
+ return -1;
+ }
+
+ if (_inputFilePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
+ fileSamples,
+ mixingFrequency) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Channel::MixOrReplaceAudioWithFile() file mixing "
+ "failed");
+ return -1;
+ }
+ if (fileSamples == 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Channel::MixOrReplaceAudioWithFile() file is ended");
+ return 0;
+ }
+ }
+
+ assert(_audioFrame.samples_per_channel_ == fileSamples);
+
+ if (_mixFileWithMicrophone)
+ {
+ // Currently file stream is always mono.
+ // TODO(xians): Change the code when FilePlayer supports real stereo.
+ Utility::MixWithSat(_audioFrame.data_,
+ _audioFrame.num_channels_,
+ fileBuffer.get(),
+ 1,
+ fileSamples);
+ }
+ else
+ {
+ // Replace ACM audio with file.
+ // Currently file stream is always mono.
+ // TODO(xians): Change the code when FilePlayer supports real stereo.
+ _audioFrame.UpdateFrame(_channelId,
+ -1,
+ fileBuffer.get(),
+ fileSamples,
+ mixingFrequency,
+ AudioFrame::kNormalSpeech,
+ AudioFrame::kVadUnknown,
+ 1);
+
+ }
+ return 0;
+}
+
+WebRtc_Word32
+Channel::MixAudioWithFile(AudioFrame& audioFrame,
+ const int mixingFrequency)
+{
+ assert(mixingFrequency <= 32000);
+
+ scoped_array<WebRtc_Word16> fileBuffer(new WebRtc_Word16[640]);
+ int fileSamples(0);
+
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ if (_outputFilePlayerPtr == NULL)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Channel::MixAudioWithFile() file mixing failed");
+ return -1;
+ }
+
+ // We should get the frequency we ask for.
+ if (_outputFilePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
+ fileSamples,
+ mixingFrequency) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Channel::MixAudioWithFile() file mixing failed");
+ return -1;
+ }
+ }
+
+ if (audioFrame.samples_per_channel_ == fileSamples)
+ {
+ // Currently file stream is always mono.
+ // TODO(xians): Change the code when FilePlayer supports real stereo.
+ Utility::MixWithSat(audioFrame.data_,
+ audioFrame.num_channels_,
+ fileBuffer.get(),
+ 1,
+ fileSamples);
+ }
+ else
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::MixAudioWithFile() samples_per_channel_(%d) != "
+ "fileSamples(%d)",
+ audioFrame.samples_per_channel_, fileSamples);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+Channel::InsertInbandDtmfTone()
+{
+ // Check if we should start a new tone.
+ if (_inbandDtmfQueue.PendingDtmf() &&
+ !_inbandDtmfGenerator.IsAddingTone() &&
+ _inbandDtmfGenerator.DelaySinceLastTone() >
+ kMinTelephoneEventSeparationMs)
+ {
+ WebRtc_Word8 eventCode(0);
+ WebRtc_UWord16 lengthMs(0);
+ WebRtc_UWord8 attenuationDb(0);
+
+ eventCode = _inbandDtmfQueue.NextDtmf(&lengthMs, &attenuationDb);
+ _inbandDtmfGenerator.AddTone(eventCode, lengthMs, attenuationDb);
+ if (_playInbandDtmfEvent)
+ {
+ // Add tone to output mixer using a reduced length to minimize
+ // risk of echo.
+ _outputMixerPtr->PlayDtmfTone(eventCode, lengthMs - 80,
+ attenuationDb);
+ }
+ }
+
+ if (_inbandDtmfGenerator.IsAddingTone())
+ {
+ WebRtc_UWord16 frequency(0);
+ _inbandDtmfGenerator.GetSampleRate(frequency);
+
+ if (frequency != _audioFrame.sample_rate_hz_)
+ {
+ // Update sample rate of Dtmf tone since the mixing frequency
+ // has changed.
+ _inbandDtmfGenerator.SetSampleRate(
+ (WebRtc_UWord16) (_audioFrame.sample_rate_hz_));
+ // Reset the tone to be added taking the new sample rate into
+ // account.
+ _inbandDtmfGenerator.ResetTone();
+ }
+
+ WebRtc_Word16 toneBuffer[320];
+ WebRtc_UWord16 toneSamples(0);
+ // Get 10ms tone segment and set time since last tone to zero
+ if (_inbandDtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Channel::EncodeAndSend() inserting Dtmf failed");
+ return -1;
+ }
+
+ // Replace mixed audio with DTMF tone.
+ for (int sample = 0;
+ sample < _audioFrame.samples_per_channel_;
+ sample++)
+ {
+ for (int channel = 0;
+ channel < _audioFrame.num_channels_;
+ channel++)
+ {
+ _audioFrame.data_[sample * _audioFrame.num_channels_ + channel] =
+ toneBuffer[sample];
+ }
+ }
+
+ assert(_audioFrame.samples_per_channel_ == toneSamples);
+ } else
+ {
+ // Add 10ms to "delay-since-last-tone" counter
+ _inbandDtmfGenerator.UpdateDelaySinceLastTone();
+ }
+ return 0;
+}
+
+WebRtc_Word32
+Channel::GetPlayoutTimeStamp(WebRtc_UWord32& playoutTimestamp)
+{
+ WebRtc_UWord32 timestamp(0);
+ CodecInst currRecCodec;
+
+ if (_audioCodingModule.PlayoutTimestamp(timestamp) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetPlayoutTimeStamp() failed to read playout"
+ " timestamp from the ACM");
+ return -1;
+ }
+
+ WebRtc_UWord16 delayMS(0);
+ if (_audioDeviceModulePtr->PlayoutDelay(&delayMS) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetPlayoutTimeStamp() failed to read playout"
+ " delay from the ADM");
+ return -1;
+ }
+
+ WebRtc_Word32 playoutFrequency = _audioCodingModule.PlayoutFrequency();
+ if (_audioCodingModule.ReceiveCodec(currRecCodec) == 0) {
+ if (STR_CASE_CMP("G722", currRecCodec.plname) == 0) {
+ playoutFrequency = 8000;
+ } else if (STR_CASE_CMP("opus", currRecCodec.plname) == 0) {
+ playoutFrequency = 48000;
+ }
+ }
+ timestamp -= (delayMS * (playoutFrequency/1000));
+
+ playoutTimestamp = timestamp;
+
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::GetPlayoutTimeStamp() => playoutTimestamp = %lu",
+ playoutTimestamp);
+ return 0;
+}
+
+void
+Channel::ResetDeadOrAliveCounters()
+{
+ _countDeadDetections = 0;
+ _countAliveDetections = 0;
+}
+
+void
+Channel::UpdateDeadOrAliveCounters(bool alive)
+{
+ if (alive)
+ _countAliveDetections++;
+ else
+ _countDeadDetections++;
+}
+
+int
+Channel::GetDeadOrAliveCounters(int& countDead, int& countAlive) const
+{
+ bool enabled;
+ WebRtc_UWord8 timeSec;
+
+ _rtpRtcpModule->PeriodicDeadOrAliveStatus(enabled, timeSec);
+ if (!enabled)
+ return (-1);
+
+ countDead = static_cast<int> (_countDeadDetections);
+ countAlive = static_cast<int> (_countAliveDetections);
+ return 0;
+}
+
+WebRtc_Word32
+Channel::SendPacketRaw(const void *data, int len, bool RTCP)
+{
+ if (_transportPtr == NULL)
+ {
+ return -1;
+ }
+ if (!RTCP)
+ {
+ return _transportPtr->SendPacket(_channelId, data, len);
+ }
+ else
+ {
+ return _transportPtr->SendRTCPPacket(_channelId, data, len);
+ }
+}
+
+WebRtc_Word32
+Channel::UpdatePacketDelay(const WebRtc_UWord32 timestamp,
+ const WebRtc_UWord16 sequenceNumber)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::UpdatePacketDelay(timestamp=%lu, sequenceNumber=%u)",
+ timestamp, sequenceNumber);
+
+ WebRtc_Word32 rtpReceiveFrequency(0);
+
+ // Get frequency of last received payload
+ rtpReceiveFrequency = _audioCodingModule.ReceiveFrequency();
+
+ CodecInst currRecCodec;
+ if (_audioCodingModule.ReceiveCodec(currRecCodec) == 0) {
+ if (STR_CASE_CMP("G722", currRecCodec.plname) == 0) {
+ // Even though the actual sampling rate for G.722 audio is
+ // 16,000 Hz, the RTP clock rate for the G722 payload format is
+ // 8,000 Hz because that value was erroneously assigned in
+ // RFC 1890 and must remain unchanged for backward compatibility.
+ rtpReceiveFrequency = 8000;
+ } else if (STR_CASE_CMP("opus", currRecCodec.plname) == 0) {
+ // We are resampling Opus internally to 32,000 Hz until all our
+ // DSP routines can operate at 48,000 Hz, but the RTP clock
+ // rate for the Opus payload format is standardized to 48,000 Hz,
+ // because that is the maximum supported decoding sampling rate.
+ rtpReceiveFrequency = 48000;
+ }
+ }
+
+ const WebRtc_UWord32 timeStampDiff = timestamp - _playoutTimeStampRTP;
+ WebRtc_UWord32 timeStampDiffMs(0);
+
+ if (timeStampDiff > 0)
+ {
+ switch (rtpReceiveFrequency) {
+ case 8000:
+ timeStampDiffMs = static_cast<WebRtc_UWord32>(timeStampDiff >> 3);
+ break;
+ case 16000:
+ timeStampDiffMs = static_cast<WebRtc_UWord32>(timeStampDiff >> 4);
+ break;
+ case 32000:
+ timeStampDiffMs = static_cast<WebRtc_UWord32>(timeStampDiff >> 5);
+ break;
+ case 48000:
+ timeStampDiffMs = static_cast<WebRtc_UWord32>(timeStampDiff / 48);
+ break;
+ default:
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Channel::UpdatePacketDelay() invalid sample rate");
+ timeStampDiffMs = 0;
+ return -1;
+ }
+ if (timeStampDiffMs > 5000)
+ {
+ timeStampDiffMs = 0;
+ }
+
+ if (_averageDelayMs == 0)
+ {
+ _averageDelayMs = timeStampDiffMs;
+ }
+ else
+ {
+ // Filter average delay value using exponential filter (alpha is
+ // 7/8). We derive 10*_averageDelayMs here (reduces risk of
+ // rounding error) and compensate for it in GetDelayEstimate()
+ // later. Adding 4/8 results in correct rounding.
+ _averageDelayMs = ((_averageDelayMs*7 + 10*timeStampDiffMs + 4)>>3);
+ }
+
+ if (sequenceNumber - _previousSequenceNumber == 1)
+ {
+ WebRtc_UWord16 packetDelayMs = 0;
+ switch (rtpReceiveFrequency) {
+ case 8000:
+ packetDelayMs = static_cast<WebRtc_UWord16>(
+ (timestamp - _previousTimestamp) >> 3);
+ break;
+ case 16000:
+ packetDelayMs = static_cast<WebRtc_UWord16>(
+ (timestamp - _previousTimestamp) >> 4);
+ break;
+ case 32000:
+ packetDelayMs = static_cast<WebRtc_UWord16>(
+ (timestamp - _previousTimestamp) >> 5);
+ break;
+ case 48000:
+ packetDelayMs = static_cast<WebRtc_UWord16>(
+ (timestamp - _previousTimestamp) / 48);
+ break;
+ }
+
+ if (packetDelayMs >= 10 && packetDelayMs <= 60)
+ _recPacketDelayMs = packetDelayMs;
+ }
+ }
+
+ _previousSequenceNumber = sequenceNumber;
+ _previousTimestamp = timestamp;
+
+ return 0;
+}
+
+void
+Channel::RegisterReceiveCodecsToRTPModule()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::RegisterReceiveCodecsToRTPModule()");
+
+
+ CodecInst codec;
+ const WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
+
+ for (int idx = 0; idx < nSupportedCodecs; idx++)
+ {
+ // Open up the RTP/RTCP receiver for all supported codecs
+ if ((_audioCodingModule.Codec(idx, codec) == -1) ||
+ (_rtpRtcpModule->RegisterReceivePayload(codec) == -1))
+ {
+ WEBRTC_TRACE(
+ kTraceWarning,
+ kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Channel::RegisterReceiveCodecsToRTPModule() unable"
+ " to register %s (%d/%d/%d/%d) to RTP/RTCP receiver",
+ codec.plname, codec.pltype, codec.plfreq,
+ codec.channels, codec.rate);
+ }
+ else
+ {
+ WEBRTC_TRACE(
+ kTraceInfo,
+ kTraceVoice,
+ VoEId(_instanceId, _channelId),
+ "Channel::RegisterReceiveCodecsToRTPModule() %s "
+ "(%d/%d/%d/%d) has been added to the RTP/RTCP "
+ "receiver",
+ codec.plname, codec.pltype, codec.plfreq,
+ codec.channels, codec.rate);
+ }
+ }
+}
+
+int
+Channel::ApmProcessRx(AudioFrame& audioFrame)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
+ "Channel::ApmProcessRx()");
+
+ // Reset the APM frequency if the frequency has changed
+ if (_rxAudioProcessingModulePtr->sample_rate_hz() !=
+ audioFrame.sample_rate_hz_)
+ {
+ if (_rxAudioProcessingModulePtr->set_sample_rate_hz(
+ audioFrame.sample_rate_hz_) != 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+ "AudioProcessingModule::set_sample_rate_hz("
+ "sample_rate_hz_=%u) => error",
+ _audioFrame.sample_rate_hz_);
+ }
+ }
+
+ if (_rxAudioProcessingModulePtr->ProcessStream(&audioFrame) != 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+ "AudioProcessingModule::ProcessStream() => error");
+ }
+
+ return 0;
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/channel.h b/voice_engine/channel.h
new file mode 100644
index 0000000..8889bc2
--- /dev/null
+++ b/voice_engine/channel.h
@@ -0,0 +1,659 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_CHANNEL_H
+#define WEBRTC_VOICE_ENGINE_CHANNEL_H
+
+#include "audio_coding_module.h"
+#include "audio_conference_mixer_defines.h"
+#include "common_types.h"
+#include "dtmf_inband.h"
+#include "dtmf_inband_queue.h"
+#include "file_player.h"
+#include "file_recorder.h"
+#include "level_indicator.h"
+#include "resampler.h"
+#include "rtp_rtcp.h"
+#include "scoped_ptr.h"
+#include "shared_data.h"
+#include "voe_audio_processing.h"
+#include "voe_network.h"
+#include "voice_engine_defines.h"
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+#include "udp_transport.h"
+#endif
+#ifdef WEBRTC_SRTP
+#include "SrtpModule.h"
+#endif
+#ifdef WEBRTC_DTMF_DETECTION
+#include "voe_dtmf.h" // TelephoneEventDetectionMethods, TelephoneEventObserver
+#endif
+
+namespace webrtc
+{
+class CriticalSectionWrapper;
+class ProcessThread;
+class AudioDeviceModule;
+class RtpRtcp;
+class FileWrapper;
+class RtpDump;
+class VoiceEngineObserver;
+class VoEMediaProcess;
+class VoERTPObserver;
+class VoERTCPObserver;
+
+struct CallStatistics;
+struct ReportBlock;
+struct SenderInfo;
+
+namespace voe
+{
+class Statistics;
+class TransmitMixer;
+class OutputMixer;
+
+
+class Channel:
+ public RtpData,
+ public RtpFeedback,
+ public RtcpFeedback,
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ public UdpTransportData, // receiving packet from sockets
+#endif
+ public FileCallback, // receiving notification from file player & recorder
+ public Transport,
+ public RtpAudioFeedback,
+ public AudioPacketizationCallback, // receive encoded packets from the ACM
+ public ACMVADCallback, // receive voice activity from the ACM
+#ifdef WEBRTC_DTMF_DETECTION
+ public AudioCodingFeedback, // inband Dtmf detection in the ACM
+#endif
+ public MixerParticipant // supplies output mixer with audio frames
+{
+public:
+ enum {KNumSocketThreads = 1};
+ enum {KNumberOfSocketBuffers = 8};
+public:
+ virtual ~Channel();
+ static WebRtc_Word32 CreateChannel(Channel*& channel,
+ const WebRtc_Word32 channelId,
+ const WebRtc_UWord32 instanceId);
+ Channel(const WebRtc_Word32 channelId, const WebRtc_UWord32 instanceId);
+ WebRtc_Word32 Init();
+ WebRtc_Word32 SetEngineInformation(
+ Statistics& engineStatistics,
+ OutputMixer& outputMixer,
+ TransmitMixer& transmitMixer,
+ ProcessThread& moduleProcessThread,
+ AudioDeviceModule& audioDeviceModule,
+ VoiceEngineObserver* voiceEngineObserver,
+ CriticalSectionWrapper* callbackCritSect);
+ WebRtc_Word32 UpdateLocalTimeStamp();
+
+public:
+ // API methods
+
+ // VoEBase
+ WebRtc_Word32 StartPlayout();
+ WebRtc_Word32 StopPlayout();
+ WebRtc_Word32 StartSend();
+ WebRtc_Word32 StopSend();
+ WebRtc_Word32 StartReceiving();
+ WebRtc_Word32 StopReceiving();
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ WebRtc_Word32 SetLocalReceiver(const WebRtc_UWord16 rtpPort,
+ const WebRtc_UWord16 rtcpPort,
+ const char ipAddr[64],
+ const char multicastIpAddr[64]);
+ WebRtc_Word32 GetLocalReceiver(int& port, int& RTCPport, char ipAddr[]);
+ WebRtc_Word32 SetSendDestination(const WebRtc_UWord16 rtpPort,
+ const char ipAddr[64],
+ const int sourcePort,
+ const WebRtc_UWord16 rtcpPort);
+ WebRtc_Word32 GetSendDestination(int& port, char ipAddr[64],
+ int& sourcePort, int& RTCPport);
+#endif
+ WebRtc_Word32 SetNetEQPlayoutMode(NetEqModes mode);
+ WebRtc_Word32 GetNetEQPlayoutMode(NetEqModes& mode);
+ WebRtc_Word32 SetNetEQBGNMode(NetEqBgnModes mode);
+ WebRtc_Word32 GetNetEQBGNMode(NetEqBgnModes& mode);
+ WebRtc_Word32 SetOnHoldStatus(bool enable, OnHoldModes mode);
+ WebRtc_Word32 GetOnHoldStatus(bool& enabled, OnHoldModes& mode);
+ WebRtc_Word32 RegisterVoiceEngineObserver(VoiceEngineObserver& observer);
+ WebRtc_Word32 DeRegisterVoiceEngineObserver();
+
+ // VoECodec
+ WebRtc_Word32 GetSendCodec(CodecInst& codec);
+ WebRtc_Word32 GetRecCodec(CodecInst& codec);
+ WebRtc_Word32 SetSendCodec(const CodecInst& codec);
+ WebRtc_Word32 SetVADStatus(bool enableVAD, ACMVADMode mode,
+ bool disableDTX);
+ WebRtc_Word32 GetVADStatus(bool& enabledVAD, ACMVADMode& mode,
+ bool& disabledDTX);
+ WebRtc_Word32 SetRecPayloadType(const CodecInst& codec);
+ WebRtc_Word32 GetRecPayloadType(CodecInst& codec);
+ WebRtc_Word32 SetAMREncFormat(AmrMode mode);
+ WebRtc_Word32 SetAMRDecFormat(AmrMode mode);
+ WebRtc_Word32 SetAMRWbEncFormat(AmrMode mode);
+ WebRtc_Word32 SetAMRWbDecFormat(AmrMode mode);
+ WebRtc_Word32 SetSendCNPayloadType(int type, PayloadFrequencies frequency);
+ WebRtc_Word32 SetISACInitTargetRate(int rateBps, bool useFixedFrameSize);
+ WebRtc_Word32 SetISACMaxRate(int rateBps);
+ WebRtc_Word32 SetISACMaxPayloadSize(int sizeBytes);
+
+ // VoENetwork
+ WebRtc_Word32 RegisterExternalTransport(Transport& transport);
+ WebRtc_Word32 DeRegisterExternalTransport();
+ WebRtc_Word32 ReceivedRTPPacket(const WebRtc_Word8* data,
+ WebRtc_Word32 length);
+ WebRtc_Word32 ReceivedRTCPPacket(const WebRtc_Word8* data,
+ WebRtc_Word32 length);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ WebRtc_Word32 GetSourceInfo(int& rtpPort, int& rtcpPort, char ipAddr[64]);
+ WebRtc_Word32 EnableIPv6();
+ bool IPv6IsEnabled() const;
+ WebRtc_Word32 SetSourceFilter(int rtpPort, int rtcpPort,
+ const char ipAddr[64]);
+ WebRtc_Word32 GetSourceFilter(int& rtpPort, int& rtcpPort, char ipAddr[64]);
+ WebRtc_Word32 SetSendTOS(int DSCP, int priority, bool useSetSockopt);
+ WebRtc_Word32 GetSendTOS(int &DSCP, int& priority, bool &useSetSockopt);
+#if defined(_WIN32)
+ WebRtc_Word32 SetSendGQoS(bool enable, int serviceType, int overrideDSCP);
+ WebRtc_Word32 GetSendGQoS(bool &enabled, int &serviceType,
+ int &overrideDSCP);
+#endif
+#endif
+ WebRtc_Word32 SetPacketTimeoutNotification(bool enable, int timeoutSeconds);
+ WebRtc_Word32 GetPacketTimeoutNotification(bool& enabled,
+ int& timeoutSeconds);
+ WebRtc_Word32 RegisterDeadOrAliveObserver(VoEConnectionObserver& observer);
+ WebRtc_Word32 DeRegisterDeadOrAliveObserver();
+ WebRtc_Word32 SetPeriodicDeadOrAliveStatus(bool enable,
+ int sampleTimeSeconds);
+ WebRtc_Word32 GetPeriodicDeadOrAliveStatus(bool& enabled,
+ int& sampleTimeSeconds);
+ WebRtc_Word32 SendUDPPacket(const void* data, unsigned int length,
+ int& transmittedBytes, bool useRtcpSocket);
+
+ // VoEFile
+ int StartPlayingFileLocally(const char* fileName, const bool loop,
+ const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst);
+ int StartPlayingFileLocally(InStream* stream, const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst);
+ int StopPlayingFileLocally();
+ int IsPlayingFileLocally() const;
+ int RegisterFilePlayingToMixer();
+ int ScaleLocalFilePlayout(const float scale);
+ int GetLocalPlayoutPosition(int& positionMs);
+ int StartPlayingFileAsMicrophone(const char* fileName, const bool loop,
+ const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst);
+ int StartPlayingFileAsMicrophone(InStream* stream,
+ const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst);
+ int StopPlayingFileAsMicrophone();
+ int IsPlayingFileAsMicrophone() const;
+ int ScaleFileAsMicrophonePlayout(const float scale);
+ int StartRecordingPlayout(const char* fileName, const CodecInst* codecInst);
+ int StartRecordingPlayout(OutStream* stream, const CodecInst* codecInst);
+ int StopRecordingPlayout();
+
+ void SetMixWithMicStatus(bool mix);
+
+ // VoEExternalMediaProcessing
+ int RegisterExternalMediaProcessing(ProcessingTypes type,
+ VoEMediaProcess& processObject);
+ int DeRegisterExternalMediaProcessing(ProcessingTypes type);
+
+ // VoEVolumeControl
+ int GetSpeechOutputLevel(WebRtc_UWord32& level) const;
+ int GetSpeechOutputLevelFullRange(WebRtc_UWord32& level) const;
+ int SetMute(const bool enable);
+ bool Mute() const;
+ int SetOutputVolumePan(float left, float right);
+ int GetOutputVolumePan(float& left, float& right) const;
+ int SetChannelOutputVolumeScaling(float scaling);
+ int GetChannelOutputVolumeScaling(float& scaling) const;
+
+ // VoECallReport
+ void ResetDeadOrAliveCounters();
+ int ResetRTCPStatistics();
+ int GetRoundTripTimeSummary(StatVal& delaysMs) const;
+ int GetDeadOrAliveCounters(int& countDead, int& countAlive) const;
+
+ // VoENetEqStats
+ int GetNetworkStatistics(NetworkStatistics& stats);
+
+ // VoEVideoSync
+ int GetDelayEstimate(int& delayMs) const;
+ int SetMinimumPlayoutDelay(int delayMs);
+ int GetPlayoutTimestamp(unsigned int& timestamp);
+ int SetInitTimestamp(unsigned int timestamp);
+ int SetInitSequenceNumber(short sequenceNumber);
+
+ // VoEVideoSyncExtended
+ int GetRtpRtcp(RtpRtcp* &rtpRtcpModule) const;
+
+ // VoEEncryption
+#ifdef WEBRTC_SRTP
+ int EnableSRTPSend(
+ CipherTypes cipherType,
+ int cipherKeyLength,
+ AuthenticationTypes authType,
+ int authKeyLength,
+ int authTagLength,
+ SecurityLevels level,
+ const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+ bool useForRTCP);
+ int DisableSRTPSend();
+ int EnableSRTPReceive(
+ CipherTypes cipherType,
+ int cipherKeyLength,
+ AuthenticationTypes authType,
+ int authKeyLength,
+ int authTagLength,
+ SecurityLevels level,
+ const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+ bool useForRTCP);
+ int DisableSRTPReceive();
+#endif
+ int RegisterExternalEncryption(Encryption& encryption);
+ int DeRegisterExternalEncryption();
+
+ // VoEDtmf
+ int SendTelephoneEventOutband(unsigned char eventCode, int lengthMs,
+ int attenuationDb, bool playDtmfEvent);
+ int SendTelephoneEventInband(unsigned char eventCode, int lengthMs,
+ int attenuationDb, bool playDtmfEvent);
+ int SetDtmfPlayoutStatus(bool enable);
+ bool DtmfPlayoutStatus() const;
+ int SetSendTelephoneEventPayloadType(unsigned char type);
+ int GetSendTelephoneEventPayloadType(unsigned char& type);
+#ifdef WEBRTC_DTMF_DETECTION
+ int RegisterTelephoneEventDetection(
+ TelephoneEventDetectionMethods detectionMethod,
+ VoETelephoneEventObserver& observer);
+ int DeRegisterTelephoneEventDetection();
+ int GetTelephoneEventDetectionStatus(
+ bool& enabled,
+ TelephoneEventDetectionMethods& detectionMethod);
+#endif
+
+ // VoEAudioProcessingImpl
+ int UpdateRxVadDetection(AudioFrame& audioFrame);
+ int RegisterRxVadObserver(VoERxVadCallback &observer);
+ int DeRegisterRxVadObserver();
+ int VoiceActivityIndicator(int &activity);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ int SetRxAgcStatus(const bool enable, const AgcModes mode);
+ int GetRxAgcStatus(bool& enabled, AgcModes& mode);
+ int SetRxAgcConfig(const AgcConfig config);
+ int GetRxAgcConfig(AgcConfig& config);
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NR
+ int SetRxNsStatus(const bool enable, const NsModes mode);
+ int GetRxNsStatus(bool& enabled, NsModes& mode);
+#endif
+
+ // VoERTP_RTCP
+ int RegisterRTPObserver(VoERTPObserver& observer);
+ int DeRegisterRTPObserver();
+ int RegisterRTCPObserver(VoERTCPObserver& observer);
+ int DeRegisterRTCPObserver();
+ int SetLocalSSRC(unsigned int ssrc);
+ int GetLocalSSRC(unsigned int& ssrc);
+ int GetRemoteSSRC(unsigned int& ssrc);
+ int GetRemoteCSRCs(unsigned int arrCSRC[15]);
+ int SetRTPAudioLevelIndicationStatus(bool enable, unsigned char ID);
+ int GetRTPAudioLevelIndicationStatus(bool& enable, unsigned char& ID);
+ int SetRTCPStatus(bool enable);
+ int GetRTCPStatus(bool& enabled);
+ int SetRTCP_CNAME(const char cName[256]);
+ int GetRTCP_CNAME(char cName[256]);
+ int GetRemoteRTCP_CNAME(char cName[256]);
+ int GetRemoteRTCPData(unsigned int& NTPHigh, unsigned int& NTPLow,
+ unsigned int& timestamp,
+ unsigned int& playoutTimestamp, unsigned int* jitter,
+ unsigned short* fractionLost);
+ int SendApplicationDefinedRTCPPacket(const unsigned char subType,
+ unsigned int name, const char* data,
+ unsigned short dataLengthInBytes);
+ int GetRTPStatistics(unsigned int& averageJitterMs,
+ unsigned int& maxJitterMs,
+ unsigned int& discardedPackets);
+ int GetRemoteRTCPSenderInfo(SenderInfo* sender_info);
+ int GetRemoteRTCPReportBlocks(std::vector<ReportBlock>* report_blocks);
+ int GetRTPStatistics(CallStatistics& stats);
+ int SetFECStatus(bool enable, int redPayloadtype);
+ int GetFECStatus(bool& enabled, int& redPayloadtype);
+ int StartRTPDump(const char fileNameUTF8[1024], RTPDirections direction);
+ int StopRTPDump(RTPDirections direction);
+ bool RTPDumpIsActive(RTPDirections direction);
+ int InsertExtraRTPPacket(unsigned char payloadType, bool markerBit,
+ const char* payloadData,
+ unsigned short payloadSize);
+
+public:
+ // From AudioPacketizationCallback in the ACM
+ WebRtc_Word32 SendData(FrameType frameType,
+ WebRtc_UWord8 payloadType,
+ WebRtc_UWord32 timeStamp,
+ const WebRtc_UWord8* payloadData,
+ WebRtc_UWord16 payloadSize,
+ const RTPFragmentationHeader* fragmentation);
+ // From ACMVADCallback in the ACM
+ WebRtc_Word32 InFrameType(WebRtc_Word16 frameType);
+
+#ifdef WEBRTC_DTMF_DETECTION
+public: // From AudioCodingFeedback in the ACM
+ int IncomingDtmf(const WebRtc_UWord8 digitDtmf, const bool end);
+#endif
+
+public:
+ WebRtc_Word32 OnRxVadDetected(const int vadDecision);
+
+public:
+ // From RtpData in the RTP/RTCP module
+ WebRtc_Word32 OnReceivedPayloadData(const WebRtc_UWord8* payloadData,
+ const WebRtc_UWord16 payloadSize,
+ const WebRtcRTPHeader* rtpHeader);
+
+public:
+ // From RtpFeedback in the RTP/RTCP module
+ WebRtc_Word32 OnInitializeDecoder(
+ const WebRtc_Word32 id,
+ const WebRtc_Word8 payloadType,
+ const char payloadName[RTP_PAYLOAD_NAME_SIZE],
+ const int frequency,
+ const WebRtc_UWord8 channels,
+ const WebRtc_UWord32 rate);
+
+ void OnPacketTimeout(const WebRtc_Word32 id);
+
+ void OnReceivedPacket(const WebRtc_Word32 id,
+ const RtpRtcpPacketType packetType);
+
+ void OnPeriodicDeadOrAlive(const WebRtc_Word32 id,
+ const RTPAliveType alive);
+
+ void OnIncomingSSRCChanged(const WebRtc_Word32 id,
+ const WebRtc_UWord32 SSRC);
+
+ void OnIncomingCSRCChanged(const WebRtc_Word32 id,
+ const WebRtc_UWord32 CSRC, const bool added);
+
+public:
+ // From RtcpFeedback in the RTP/RTCP module
+ void OnApplicationDataReceived(const WebRtc_Word32 id,
+ const WebRtc_UWord8 subType,
+ const WebRtc_UWord32 name,
+ const WebRtc_UWord16 length,
+ const WebRtc_UWord8* data);
+
+public:
+ // From RtpAudioFeedback in the RTP/RTCP module
+ void OnReceivedTelephoneEvent(const WebRtc_Word32 id,
+ const WebRtc_UWord8 event,
+ const bool endOfEvent);
+
+ void OnPlayTelephoneEvent(const WebRtc_Word32 id,
+ const WebRtc_UWord8 event,
+ const WebRtc_UWord16 lengthMs,
+ const WebRtc_UWord8 volume);
+
+public:
+ // From UdpTransportData in the Socket Transport module
+ void IncomingRTPPacket(const WebRtc_Word8* incomingRtpPacket,
+ const WebRtc_Word32 rtpPacketLength,
+ const char* fromIP,
+ const WebRtc_UWord16 fromPort);
+
+ void IncomingRTCPPacket(const WebRtc_Word8* incomingRtcpPacket,
+ const WebRtc_Word32 rtcpPacketLength,
+ const char* fromIP,
+ const WebRtc_UWord16 fromPort);
+
+public:
+ // From Transport (called by the RTP/RTCP module)
+ int SendPacket(int /*channel*/, const void *data, int len);
+ int SendRTCPPacket(int /*channel*/, const void *data, int len);
+
+public:
+ // From MixerParticipant
+ WebRtc_Word32 GetAudioFrame(const WebRtc_Word32 id,
+ AudioFrame& audioFrame);
+ WebRtc_Word32 NeededFrequency(const WebRtc_Word32 id);
+
+public:
+ // From MonitorObserver
+ void OnPeriodicProcess();
+
+public:
+ // From FileCallback
+ void PlayNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs);
+ void RecordNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs);
+ void PlayFileEnded(const WebRtc_Word32 id);
+ void RecordFileEnded(const WebRtc_Word32 id);
+
+public:
+ WebRtc_UWord32 InstanceId() const
+ {
+ return _instanceId;
+ }
+ WebRtc_Word32 ChannelId() const
+ {
+ return _channelId;
+ }
+ bool Playing() const
+ {
+ return _playing;
+ }
+ bool Sending() const
+ {
+ // A lock is needed because |_sending| is accessed by both
+ // TransmitMixer::PrepareDemux() and StartSend()/StopSend(), which
+ // are called by different threads.
+ CriticalSectionScoped cs(&_callbackCritSect);
+ return _sending;
+ }
+ bool Receiving() const
+ {
+ return _receiving;
+ }
+ bool ExternalTransport() const
+ {
+ return _externalTransport;
+ }
+ bool OutputIsOnHold() const
+ {
+ return _outputIsOnHold;
+ }
+ bool InputIsOnHold() const
+ {
+ return _inputIsOnHold;
+ }
+ RtpRtcp* RtpRtcpModulePtr() const
+ {
+ return _rtpRtcpModule.get();
+ }
+ WebRtc_Word8 OutputEnergyLevel() const
+ {
+ return _outputAudioLevel.Level();
+ }
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ bool SendSocketsInitialized() const
+ {
+ return _socketTransportModule.SendSocketsInitialized();
+ }
+ bool ReceiveSocketsInitialized() const
+ {
+ return _socketTransportModule.ReceiveSocketsInitialized();
+ }
+#endif
+ WebRtc_UWord32 Demultiplex(const AudioFrame& audioFrame);
+ WebRtc_UWord32 PrepareEncodeAndSend(int mixingFrequency);
+ WebRtc_UWord32 EncodeAndSend();
+
+private:
+ int InsertInbandDtmfTone();
+ WebRtc_Word32
+ MixOrReplaceAudioWithFile(const int mixingFrequency);
+ WebRtc_Word32 MixAudioWithFile(AudioFrame& audioFrame,
+ const int mixingFrequency);
+ WebRtc_Word32 GetPlayoutTimeStamp(WebRtc_UWord32& playoutTimestamp);
+ void UpdateDeadOrAliveCounters(bool alive);
+ WebRtc_Word32 SendPacketRaw(const void *data, int len, bool RTCP);
+ WebRtc_Word32 UpdatePacketDelay(const WebRtc_UWord32 timestamp,
+ const WebRtc_UWord16 sequenceNumber);
+ void RegisterReceiveCodecsToRTPModule();
+ int ApmProcessRx(AudioFrame& audioFrame);
+
+private:
+ CriticalSectionWrapper& _fileCritSect;
+ CriticalSectionWrapper& _callbackCritSect;
+ WebRtc_UWord32 _instanceId;
+ WebRtc_Word32 _channelId;
+
+private:
+ scoped_ptr<RtpRtcp> _rtpRtcpModule;
+ AudioCodingModule& _audioCodingModule;
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ WebRtc_UWord8 _numSocketThreads;
+ UdpTransport& _socketTransportModule;
+#endif
+#ifdef WEBRTC_SRTP
+ SrtpModule& _srtpModule;
+#endif
+ RtpDump& _rtpDumpIn;
+ RtpDump& _rtpDumpOut;
+private:
+ AudioLevel _outputAudioLevel;
+ bool _externalTransport;
+ AudioFrame _audioFrame;
+ WebRtc_UWord8 _audioLevel_dBov;
+ FilePlayer* _inputFilePlayerPtr;
+ FilePlayer* _outputFilePlayerPtr;
+ FileRecorder* _outputFileRecorderPtr;
+ int _inputFilePlayerId;
+ int _outputFilePlayerId;
+ int _outputFileRecorderId;
+ bool _inputFilePlaying;
+ bool _outputFilePlaying;
+ bool _outputFileRecording;
+ DtmfInbandQueue _inbandDtmfQueue;
+ DtmfInband _inbandDtmfGenerator;
+ bool _inputExternalMedia;
+ bool _outputExternalMedia;
+ VoEMediaProcess* _inputExternalMediaCallbackPtr;
+ VoEMediaProcess* _outputExternalMediaCallbackPtr;
+ WebRtc_UWord8* _encryptionRTPBufferPtr;
+ WebRtc_UWord8* _decryptionRTPBufferPtr;
+ WebRtc_UWord8* _encryptionRTCPBufferPtr;
+ WebRtc_UWord8* _decryptionRTCPBufferPtr;
+ WebRtc_UWord32 _timeStamp;
+ WebRtc_UWord8 _sendTelephoneEventPayloadType;
+ WebRtc_UWord32 _playoutTimeStampRTP;
+ WebRtc_UWord32 _playoutTimeStampRTCP;
+ WebRtc_UWord32 _numberOfDiscardedPackets;
+private:
+ // uses
+ Statistics* _engineStatisticsPtr;
+ OutputMixer* _outputMixerPtr;
+ TransmitMixer* _transmitMixerPtr;
+ ProcessThread* _moduleProcessThreadPtr;
+ AudioDeviceModule* _audioDeviceModulePtr;
+ VoiceEngineObserver* _voiceEngineObserverPtr; // owned by base
+ CriticalSectionWrapper* _callbackCritSectPtr; // owned by base
+ Transport* _transportPtr; // WebRtc socket or external transport
+ Encryption* _encryptionPtr; // WebRtc SRTP or external encryption
+ scoped_ptr<AudioProcessing> _rtpAudioProc;
+ AudioProcessing* _rxAudioProcessingModulePtr; // far end AudioProcessing
+#ifdef WEBRTC_DTMF_DETECTION
+ VoETelephoneEventObserver* _telephoneEventDetectionPtr;
+#endif
+ VoERxVadCallback* _rxVadObserverPtr;
+ WebRtc_Word32 _oldVadDecision;
+ WebRtc_Word32 _sendFrameType; // Send data is voice, 1-voice, 0-otherwise
+ VoERTPObserver* _rtpObserverPtr;
+ VoERTCPObserver* _rtcpObserverPtr;
+private:
+ // VoEBase
+ bool _outputIsOnHold;
+ bool _externalPlayout;
+ bool _inputIsOnHold;
+ bool _playing;
+ bool _sending;
+ bool _receiving;
+ bool _mixFileWithMicrophone;
+ bool _rtpObserver;
+ bool _rtcpObserver;
+ // VoEVolumeControl
+ bool _mute;
+ float _panLeft;
+ float _panRight;
+ float _outputGain;
+ // VoEEncryption
+ bool _encrypting;
+ bool _decrypting;
+ // VoEDtmf
+ bool _playOutbandDtmfEvent;
+ bool _playInbandDtmfEvent;
+ bool _inbandTelephoneEventDetection;
+ bool _outOfBandTelephoneEventDetecion;
+ // VoeRTP_RTCP
+ WebRtc_UWord8 _extraPayloadType;
+ bool _insertExtraRTPPacket;
+ bool _extraMarkerBit;
+ WebRtc_UWord32 _lastLocalTimeStamp;
+ WebRtc_Word8 _lastPayloadType;
+ bool _includeAudioLevelIndication;
+ // VoENetwork
+ bool _rtpPacketTimedOut;
+ bool _rtpPacketTimeOutIsEnabled;
+ WebRtc_UWord32 _rtpTimeOutSeconds;
+ bool _connectionObserver;
+ VoEConnectionObserver* _connectionObserverPtr;
+ WebRtc_UWord32 _countAliveDetections;
+ WebRtc_UWord32 _countDeadDetections;
+ AudioFrame::SpeechType _outputSpeechType;
+ // VoEVideoSync
+ WebRtc_UWord32 _averageDelayMs;
+ WebRtc_UWord16 _previousSequenceNumber;
+ WebRtc_UWord32 _previousTimestamp;
+ WebRtc_UWord16 _recPacketDelayMs;
+ // VoEAudioProcessing
+ bool _RxVadDetection;
+ bool _rxApmIsEnabled;
+ bool _rxAgcIsEnabled;
+ bool _rxNsIsEnabled;
+};
+
+} // namespace voe
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_CHANNEL_H
diff --git a/voice_engine/channel_manager.cc b/voice_engine/channel_manager.cc
new file mode 100644
index 0000000..47cec4a
--- /dev/null
+++ b/voice_engine/channel_manager.cc
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "channel.h"
+#include "channel_manager.h"
+
+namespace webrtc
+{
+
+namespace voe
+{
+
+ChannelManager::ChannelManager(const WebRtc_UWord32 instanceId) :
+ ChannelManagerBase(),
+ _instanceId(instanceId)
+{
+}
+
+ChannelManager::~ChannelManager()
+{
+ ChannelManagerBase::DestroyAllItems();
+}
+
+bool ChannelManager::CreateChannel(WebRtc_Word32& channelId)
+{
+ return ChannelManagerBase::CreateItem(channelId);
+}
+
+WebRtc_Word32 ChannelManager::DestroyChannel(const WebRtc_Word32 channelId)
+{
+ Channel* deleteChannel =
+ static_cast<Channel*> (ChannelManagerBase::RemoveItem(channelId));
+ if (!deleteChannel)
+ {
+ return -1;
+ }
+ delete deleteChannel;
+ return 0;
+}
+
+WebRtc_Word32 ChannelManager::NumOfChannels() const
+{
+ return ChannelManagerBase::NumOfItems();
+}
+
+WebRtc_Word32 ChannelManager::MaxNumOfChannels() const
+{
+ return ChannelManagerBase::MaxNumOfItems();
+}
+
+void* ChannelManager::NewItem(WebRtc_Word32 itemID)
+{
+ Channel* channel;
+ if (Channel::CreateChannel(channel, itemID, _instanceId) == -1)
+ {
+ return NULL;
+ }
+ return static_cast<void*> (channel);
+}
+
+void ChannelManager::DeleteItem(void* item)
+{
+ Channel* deleteItem = static_cast<Channel*> (item);
+ delete deleteItem;
+}
+
+Channel* ChannelManager::GetChannel(const WebRtc_Word32 channelId) const
+{
+ return static_cast<Channel*> (ChannelManagerBase::GetItem(channelId));
+}
+
+void ChannelManager::ReleaseChannel()
+{
+ ChannelManagerBase::ReleaseItem();
+}
+
+void ChannelManager::GetChannelIds(WebRtc_Word32* channelsArray,
+ WebRtc_Word32& numOfChannels) const
+{
+ ChannelManagerBase::GetItemIds(channelsArray, numOfChannels);
+}
+
+void ChannelManager::GetChannels(MapWrapper& channels) const
+{
+ ChannelManagerBase::GetChannels(channels);
+}
+
+ScopedChannel::ScopedChannel(ChannelManager& chManager) :
+ _chManager(chManager),
+ _channelPtr(NULL)
+{
+ // Copy all existing channels to the local map.
+ // It is not possible to utilize the ChannelPtr() API after
+ // this constructor. The intention is that this constructor
+ // is used in combination with the scoped iterator.
+ _chManager.GetChannels(_channels);
+}
+
+ScopedChannel::ScopedChannel(ChannelManager& chManager,
+ WebRtc_Word32 channelId) :
+ _chManager(chManager),
+ _channelPtr(NULL)
+{
+ _channelPtr = _chManager.GetChannel(channelId);
+}
+
+ScopedChannel::~ScopedChannel()
+{
+ if (_channelPtr != NULL || _channels.Size() != 0)
+ {
+ _chManager.ReleaseChannel();
+ }
+
+ // Delete the map
+ while (_channels.Erase(_channels.First()) == 0)
+ ;
+}
+
+Channel* ScopedChannel::ChannelPtr()
+{
+ return _channelPtr;
+}
+
+Channel* ScopedChannel::GetFirstChannel(void*& iterator) const
+{
+ MapItem* it = _channels.First();
+ iterator = (void*) it;
+ if (!it)
+ {
+ return NULL;
+ }
+ return static_cast<Channel*> (it->GetItem());
+}
+
+Channel* ScopedChannel::GetNextChannel(void*& iterator) const
+{
+ MapItem* it = (MapItem*) iterator;
+ if (!it)
+ {
+ iterator = NULL;
+ return NULL;
+ }
+ it = _channels.Next(it);
+ iterator = (void*) it;
+ if (!it)
+ {
+ return NULL;
+ }
+ return static_cast<Channel*> (it->GetItem());
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/channel_manager.h b/voice_engine/channel_manager.h
new file mode 100644
index 0000000..6c40ef1
--- /dev/null
+++ b/voice_engine/channel_manager.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_H
+#define WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_H
+
+#include "channel_manager_base.h"
+#include "typedefs.h"
+
+namespace webrtc
+{
+
+namespace voe
+{
+
+class ScopedChannel;
+class Channel;
+
+class ChannelManager: private ChannelManagerBase
+{
+ friend class ScopedChannel;
+
+public:
+ bool CreateChannel(WebRtc_Word32& channelId);
+
+ WebRtc_Word32 DestroyChannel(const WebRtc_Word32 channelId);
+
+ WebRtc_Word32 MaxNumOfChannels() const;
+
+ WebRtc_Word32 NumOfChannels() const;
+
+ void GetChannelIds(WebRtc_Word32* channelsArray,
+ WebRtc_Word32& numOfChannels) const;
+
+ ChannelManager(const WebRtc_UWord32 instanceId);
+
+ ~ChannelManager();
+
+private:
+ ChannelManager(const ChannelManager&);
+
+ ChannelManager& operator=(const ChannelManager&);
+
+ Channel* GetChannel(const WebRtc_Word32 channelId) const;
+
+ void GetChannels(MapWrapper& channels) const;
+
+ void ReleaseChannel();
+
+ virtual void* NewItem(WebRtc_Word32 itemID);
+
+ virtual void DeleteItem(void* item);
+
+ WebRtc_UWord32 _instanceId;
+};
+
+class ScopedChannel
+{
+public:
+ // Can only be created by the channel manager
+ ScopedChannel(ChannelManager& chManager);
+
+ ScopedChannel(ChannelManager& chManager, WebRtc_Word32 channelId);
+
+ Channel* ChannelPtr();
+
+ Channel* GetFirstChannel(void*& iterator) const;
+
+ Channel* GetNextChannel(void*& iterator) const;
+
+ ~ScopedChannel();
+private:
+ ChannelManager& _chManager;
+ Channel* _channelPtr;
+ MapWrapper _channels;
+};
+
+} // namespace voe
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_H
diff --git a/voice_engine/channel_manager_base.cc b/voice_engine/channel_manager_base.cc
new file mode 100644
index 0000000..572720c
--- /dev/null
+++ b/voice_engine/channel_manager_base.cc
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "channel_manager_base.h"
+
+#include "critical_section_wrapper.h"
+#include "rw_lock_wrapper.h"
+#include <cassert>
+
+namespace webrtc
+{
+
+namespace voe
+{
+
+ChannelManagerBase::ChannelManagerBase() :
+ _itemsCritSectPtr(CriticalSectionWrapper::CreateCriticalSection()),
+ _itemsRWLockPtr(RWLockWrapper::CreateRWLock())
+{
+ for (int i = 0; i < KMaxNumberOfItems; i++)
+ {
+ _freeItemIds[i] = true;
+ }
+}
+
+ChannelManagerBase::~ChannelManagerBase()
+{
+ if (_itemsRWLockPtr)
+ {
+ delete _itemsRWLockPtr;
+ _itemsRWLockPtr = NULL;
+ }
+ if (_itemsCritSectPtr)
+ {
+ delete _itemsCritSectPtr;
+ _itemsCritSectPtr = NULL;
+ }
+}
+
+bool ChannelManagerBase::GetFreeItemId(WebRtc_Word32& itemId)
+{
+ CriticalSectionScoped cs(_itemsCritSectPtr);
+ WebRtc_Word32 i(0);
+ while (i < KMaxNumberOfItems)
+ {
+ if (_freeItemIds[i])
+ {
+ itemId = i;
+ _freeItemIds[i] = false;
+ return true;
+ }
+ i++;
+ }
+ return false;
+}
+
+void ChannelManagerBase::AddFreeItemId(WebRtc_Word32 itemId)
+{
+ assert(itemId < KMaxNumberOfItems);
+ _freeItemIds[itemId] = true;
+}
+
+void ChannelManagerBase::RemoveFreeItemIds()
+{
+ for (int i = 0; i < KMaxNumberOfItems; i++)
+ {
+ _freeItemIds[i] = false;
+ }
+}
+
+bool ChannelManagerBase::CreateItem(WebRtc_Word32& itemId)
+{
+ _itemsCritSectPtr->Enter();
+ void* itemPtr;
+ itemId = -1;
+ const bool success = GetFreeItemId(itemId);
+ if (!success)
+ {
+ _itemsCritSectPtr->Leave();
+ return false;
+ }
+ itemPtr = NewItem(itemId);
+ if (!itemPtr)
+ {
+ _itemsCritSectPtr->Leave();
+ return false;
+ }
+ _itemsCritSectPtr->Leave();
+ InsertItem(itemId, itemPtr);
+
+ return true;
+}
+
+void ChannelManagerBase::InsertItem(WebRtc_Word32 itemId, void* item)
+{
+ CriticalSectionScoped cs(_itemsCritSectPtr);
+ assert(!_items.Find(itemId));
+ _items.Insert(itemId, item);
+}
+
+void*
+ChannelManagerBase::RemoveItem(WebRtc_Word32 itemId)
+{
+ CriticalSectionScoped cs(_itemsCritSectPtr);
+ WriteLockScoped wlock(*_itemsRWLockPtr);
+ MapItem* it = _items.Find(itemId);
+ if (!it)
+ {
+ return 0;
+ }
+ void* returnItem = it->GetItem();
+ _items.Erase(it);
+ AddFreeItemId(itemId);
+
+ return returnItem;
+}
+
+void ChannelManagerBase::DestroyAllItems()
+{
+ CriticalSectionScoped cs(_itemsCritSectPtr);
+ MapItem* it = _items.First();
+ while (it)
+ {
+ DeleteItem(it->GetItem());
+ _items.Erase(it);
+ it = _items.First();
+ }
+ RemoveFreeItemIds();
+}
+
+WebRtc_Word32 ChannelManagerBase::NumOfItems() const
+{
+ return _items.Size();
+}
+
+WebRtc_Word32 ChannelManagerBase::MaxNumOfItems() const
+{
+ return static_cast<WebRtc_Word32> (KMaxNumberOfItems);
+}
+
+void*
+ChannelManagerBase::GetItem(WebRtc_Word32 itemId) const
+{
+ CriticalSectionScoped cs(_itemsCritSectPtr);
+ MapItem* it = _items.Find(itemId);
+ if (!it)
+ {
+ return 0;
+ }
+ _itemsRWLockPtr->AcquireLockShared();
+ return it->GetItem();
+}
+
+void*
+ChannelManagerBase::GetFirstItem(void*& iterator) const
+{
+ CriticalSectionScoped cs(_itemsCritSectPtr);
+ MapItem* it = _items.First();
+ iterator = (void*) it;
+ if (!it)
+ {
+ return 0;
+ }
+ return it->GetItem();
+}
+
+void*
+ChannelManagerBase::GetNextItem(void*& iterator) const
+{
+ CriticalSectionScoped cs(_itemsCritSectPtr);
+ MapItem* it = (MapItem*) iterator;
+ if (!it)
+ {
+ iterator = 0;
+ return 0;
+ }
+ it = _items.Next(it);
+ iterator = (void*) it;
+ if (!it)
+ {
+ return 0;
+ }
+ return it->GetItem();
+}
+
+void ChannelManagerBase::ReleaseItem()
+{
+ _itemsRWLockPtr->ReleaseLockShared();
+}
+
+void ChannelManagerBase::GetItemIds(WebRtc_Word32* channelsArray,
+ WebRtc_Word32& numOfChannels) const
+{
+ MapItem* it = _items.First();
+ numOfChannels = (numOfChannels <= _items.Size()) ?
+ numOfChannels : _items.Size();
+ for (int i = 0; i < numOfChannels && it != NULL; i++)
+ {
+ channelsArray[i] = it->GetId();
+ it = _items.Next(it);
+ }
+}
+
+void ChannelManagerBase::GetChannels(MapWrapper& channels) const
+{
+ CriticalSectionScoped cs(_itemsCritSectPtr);
+ if (_items.Size() == 0)
+ {
+ return;
+ }
+ _itemsRWLockPtr->AcquireLockShared();
+ for (MapItem* it = _items.First(); it != NULL; it = _items.Next(it))
+ {
+ channels.Insert(it->GetId(), it->GetItem());
+ }
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/channel_manager_base.h b/voice_engine/channel_manager_base.h
new file mode 100644
index 0000000..0831e43
--- /dev/null
+++ b/voice_engine/channel_manager_base.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_BASE_H
+#define WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_BASE_H
+
+#include "typedefs.h"
+#include "map_wrapper.h"
+#include "voice_engine_defines.h"
+
+namespace webrtc
+{
+class CriticalSectionWrapper;
+class RWLockWrapper;
+
+namespace voe
+{
+
+class ScopedChannel;
+class Channel;
+
+class ChannelManagerBase
+{
+public:
+ enum {KMaxNumberOfItems = kVoiceEngineMaxNumOfChannels};
+
+protected:
+ bool CreateItem(WebRtc_Word32& itemId);
+
+ void InsertItem(WebRtc_Word32 itemId, void* item);
+
+ void* RemoveItem(WebRtc_Word32 itemId);
+
+ void* GetItem(WebRtc_Word32 itemId) const;
+
+ void* GetFirstItem(void*& iterator) const ;
+
+ void* GetNextItem(void*& iterator) const;
+
+ void ReleaseItem();
+
+ void AddFreeItemId(WebRtc_Word32 itemId);
+
+ bool GetFreeItemId(WebRtc_Word32& itemId);
+
+ void RemoveFreeItemIds();
+
+ void DestroyAllItems();
+
+ WebRtc_Word32 NumOfItems() const;
+
+ WebRtc_Word32 MaxNumOfItems() const;
+
+ void GetItemIds(WebRtc_Word32* channelsArray,
+ WebRtc_Word32& numOfChannels) const;
+
+ void GetChannels(MapWrapper& channels) const;
+
+ virtual void* NewItem(WebRtc_Word32 itemId) = 0;
+
+ virtual void DeleteItem(void* item) = 0;
+
+ ChannelManagerBase();
+
+ virtual ~ChannelManagerBase();
+
+private:
+ // Protects _items and _freeItemIds
+ CriticalSectionWrapper* _itemsCritSectPtr;
+
+ MapWrapper _items;
+
+ bool _freeItemIds[KMaxNumberOfItems];
+
+ // Protects channels from being destroyed while being used
+ RWLockWrapper* _itemsRWLockPtr;
+};
+
+} // namespace voe
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_CHANNEL_MANAGER_BASE_H
diff --git a/voice_engine/channel_unittest.cc b/voice_engine/channel_unittest.cc
new file mode 100644
index 0000000..fc78679
--- /dev/null
+++ b/voice_engine/channel_unittest.cc
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "channel.h"
+#include "gtest/gtest.h"
+
+// Empty test just to get coverage metrics.
+TEST(ChannelTest, EmptyTestToGetCodeCoverage) {}
diff --git a/voice_engine/dtmf_inband.cc b/voice_engine/dtmf_inband.cc
new file mode 100644
index 0000000..689bc54
--- /dev/null
+++ b/voice_engine/dtmf_inband.cc
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "dtmf_inband.h"
+
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include <cassert>
+
+namespace webrtc {
+
+const WebRtc_Word16 Dtmf_a_times2Tab8Khz[8]=
+{
+ 27978, 26956, 25701, 24219,
+ 19073, 16325, 13085, 9314
+};
+
+const WebRtc_Word16 Dtmf_a_times2Tab16Khz[8]=
+{
+ 31548, 31281, 30951, 30556,
+ 29144, 28361, 27409, 26258
+};
+
+const WebRtc_Word16 Dtmf_a_times2Tab32Khz[8]=
+{
+ 32462,32394, 32311, 32210, 31849, 31647, 31400, 31098
+};
+
+// Second table is sin(2*pi*f/fs) in Q14
+
+const WebRtc_Word16 Dtmf_ym2Tab8Khz[8]=
+{
+ 8527, 9315, 10163, 11036,
+ 13322, 14206, 15021, 15708
+};
+
+const WebRtc_Word16 Dtmf_ym2Tab16Khz[8]=
+{
+ 4429, 4879, 5380, 5918,
+ 7490, 8207, 8979, 9801
+};
+
+const WebRtc_Word16 Dtmf_ym2Tab32Khz[8]=
+{
+ 2235, 2468, 2728, 3010, 3853, 4249, 4685, 5164
+};
+
+const WebRtc_Word16 Dtmf_dBm0kHz[37]=
+{
+ 16141, 14386, 12821, 11427, 10184, 9077,
+ 8090, 7210, 6426, 5727, 5104, 4549,
+ 4054, 3614, 3221, 2870, 2558, 2280,
+ 2032, 1811, 1614, 1439, 1282, 1143,
+ 1018, 908, 809, 721, 643, 573,
+ 510, 455, 405, 361, 322, 287,
+ 256
+};
+
+
+DtmfInband::DtmfInband(const WebRtc_Word32 id) :
+ _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _id(id),
+ _outputFrequencyHz(8000),
+ _frameLengthSamples(0),
+ _remainingSamples(0),
+ _eventCode(0),
+ _attenuationDb(0),
+ _lengthMs(0),
+ _reinit(true),
+ _playing(false),
+ _delaySinceLastToneMS(1000)
+{
+ memset(_oldOutputLow, 0, sizeof(_oldOutputLow));
+ memset(_oldOutputHigh, 0, sizeof(_oldOutputHigh));
+}
+
+DtmfInband::~DtmfInband()
+{
+ delete &_critSect;
+}
+
+int
+DtmfInband::SetSampleRate(const WebRtc_UWord16 frequency)
+{
+ if (frequency != 8000 &&
+ frequency != 16000 &&
+ frequency != 32000)
+ {
+ // invalid sample rate
+ assert(false);
+ return -1;
+ }
+ _outputFrequencyHz = frequency;
+ return 0;
+}
+
+int
+DtmfInband::GetSampleRate(WebRtc_UWord16& frequency)
+{
+ frequency = _outputFrequencyHz;
+ return 0;
+}
+
+void
+DtmfInband::Init()
+{
+ _remainingSamples = 0;
+ _frameLengthSamples = 0;
+ _eventCode = 0;
+ _attenuationDb = 0;
+ _lengthMs = 0;
+ _reinit = true;
+ _oldOutputLow[0] = 0;
+ _oldOutputLow[1] = 0;
+ _oldOutputHigh[0] = 0;
+ _oldOutputHigh[1] = 0;
+ _delaySinceLastToneMS = 1000;
+}
+
+int
+DtmfInband::AddTone(const WebRtc_UWord8 eventCode,
+ WebRtc_Word32 lengthMs,
+ WebRtc_Word32 attenuationDb)
+{
+ CriticalSectionScoped lock(&_critSect);
+
+ if (attenuationDb > 36 || eventCode > 15)
+ {
+ assert(false);
+ return -1;
+ }
+
+ if (IsAddingTone())
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_id,-1),
+ "DtmfInband::AddTone() new tone interrupts ongoing tone");
+ }
+
+ ReInit();
+
+ _frameLengthSamples = static_cast<WebRtc_Word16> (_outputFrequencyHz / 100);
+ _eventCode = static_cast<WebRtc_Word16> (eventCode);
+ _attenuationDb = static_cast<WebRtc_Word16> (attenuationDb);
+ _remainingSamples = static_cast<WebRtc_Word32>
+ (lengthMs * (_outputFrequencyHz / 1000));
+ _lengthMs = lengthMs;
+
+ return 0;
+}
+
+int
+DtmfInband::ResetTone()
+{
+ CriticalSectionScoped lock(&_critSect);
+
+ ReInit();
+
+ _frameLengthSamples = static_cast<WebRtc_Word16> (_outputFrequencyHz / 100);
+ _remainingSamples = static_cast<WebRtc_Word32>
+ (_lengthMs * (_outputFrequencyHz / 1000));
+
+ return 0;
+}
+
+int
+DtmfInband::StartTone(const WebRtc_UWord8 eventCode,
+ WebRtc_Word32 attenuationDb)
+{
+ CriticalSectionScoped lock(&_critSect);
+
+ if (attenuationDb > 36 || eventCode > 15)
+ {
+ assert(false);
+ return -1;
+ }
+
+ if (IsAddingTone())
+ {
+ return -1;
+ }
+
+ ReInit();
+
+ _frameLengthSamples = static_cast<WebRtc_Word16> (_outputFrequencyHz / 100);
+ _eventCode = static_cast<WebRtc_Word16> (eventCode);
+ _attenuationDb = static_cast<WebRtc_Word16> (attenuationDb);
+ _playing = true;
+
+ return 0;
+}
+
+int
+DtmfInband::StopTone()
+{
+ CriticalSectionScoped lock(&_critSect);
+
+ if (!_playing)
+ {
+ return 0;
+ }
+
+ _playing = false;
+
+ return 0;
+}
+
+// Shall be called between tones
+void
+DtmfInband::ReInit()
+{
+ _reinit = true;
+}
+
+bool
+DtmfInband::IsAddingTone()
+{
+ CriticalSectionScoped lock(&_critSect);
+ return (_remainingSamples > 0 || _playing);
+}
+
+int
+DtmfInband::Get10msTone(WebRtc_Word16 output[320],
+ WebRtc_UWord16& outputSizeInSamples)
+{
+ CriticalSectionScoped lock(&_critSect);
+ if (DtmfFix_generate(output,
+ _eventCode,
+ _attenuationDb,
+ _frameLengthSamples,
+ _outputFrequencyHz) == -1)
+ {
+ return -1;
+ }
+ _remainingSamples -= _frameLengthSamples;
+ outputSizeInSamples = _frameLengthSamples;
+ _delaySinceLastToneMS = 0;
+ return 0;
+}
+
+void
+DtmfInband::UpdateDelaySinceLastTone()
+{
+ _delaySinceLastToneMS += kDtmfFrameSizeMs;
+ // avoid wraparound
+ if (_delaySinceLastToneMS > (1<<30))
+ {
+ _delaySinceLastToneMS = 1000;
+ }
+}
+
+WebRtc_UWord32
+DtmfInband::DelaySinceLastTone() const
+{
+ return _delaySinceLastToneMS;
+}
+
+WebRtc_Word16
+DtmfInband::DtmfFix_generate(WebRtc_Word16 *decoded,
+ const WebRtc_Word16 value,
+ const WebRtc_Word16 volume,
+ const WebRtc_Word16 frameLen,
+ const WebRtc_Word16 fs)
+{
+ const WebRtc_Word16 *a_times2Tbl;
+ const WebRtc_Word16 *y2_Table;
+ WebRtc_Word16 a1_times2 = 0, a2_times2 = 0;
+
+ if (fs==8000) {
+ a_times2Tbl=Dtmf_a_times2Tab8Khz;
+ y2_Table=Dtmf_ym2Tab8Khz;
+ } else if (fs==16000) {
+ a_times2Tbl=Dtmf_a_times2Tab16Khz;
+ y2_Table=Dtmf_ym2Tab16Khz;
+ } else if (fs==32000) {
+ a_times2Tbl=Dtmf_a_times2Tab32Khz;
+ y2_Table=Dtmf_ym2Tab32Khz;
+ } else {
+ return(-1);
+ }
+
+ if ((value==1)||(value==2)||(value==3)||(value==12)) {
+ a1_times2=a_times2Tbl[0];
+ if (_reinit) {
+ _oldOutputLow[0]=y2_Table[0];
+ _oldOutputLow[1]=0;
+ }
+ } else if ((value==4)||(value==5)||(value==6)||(value==13)) {
+ a1_times2=a_times2Tbl[1];
+ if (_reinit) {
+ _oldOutputLow[0]=y2_Table[1];
+ _oldOutputLow[1]=0;
+ }
+ } else if ((value==7)||(value==8)||(value==9)||(value==14)) {
+ a1_times2=a_times2Tbl[2];
+ if (_reinit) {
+ _oldOutputLow[0]=y2_Table[2];
+ _oldOutputLow[1]=0;
+ }
+ } else if ((value==10)||(value==0)||(value==11)||(value==15)) {
+ a1_times2=a_times2Tbl[3];
+ if (_reinit) {
+ _oldOutputLow[0]=y2_Table[3];
+ _oldOutputLow[1]=0;
+ }
+ }
+ if ((value==1)||(value==4)||(value==7)||(value==10)) {
+ a2_times2=a_times2Tbl[4];
+ if (_reinit) {
+ _oldOutputHigh[0]=y2_Table[4];
+ _oldOutputHigh[1]=0;
+ _reinit=false;
+ }
+ } else if ((value==2)||(value==5)||(value==8)||(value==0)) {
+ a2_times2=a_times2Tbl[5];
+ if (_reinit) {
+ _oldOutputHigh[0]=y2_Table[5];
+ _oldOutputHigh[1]=0;
+ _reinit=false;
+ }
+ } else if ((value==3)||(value==6)||(value==9)||(value==11)) {
+ a2_times2=a_times2Tbl[6];
+ if (_reinit) {
+ _oldOutputHigh[0]=y2_Table[6];
+ _oldOutputHigh[1]=0;
+ _reinit=false;
+ }
+ } else if ((value==12)||(value==13)||(value==14)||(value==15)) {
+ a2_times2=a_times2Tbl[7];
+ if (_reinit) {
+ _oldOutputHigh[0]=y2_Table[7];
+ _oldOutputHigh[1]=0;
+ _reinit=false;
+ }
+ }
+
+ return (DtmfFix_generateSignal(a1_times2,
+ a2_times2,
+ volume,
+ decoded,
+ frameLen));
+}
+
+WebRtc_Word16
+DtmfInband::DtmfFix_generateSignal(const WebRtc_Word16 a1_times2,
+ const WebRtc_Word16 a2_times2,
+ const WebRtc_Word16 volume,
+ WebRtc_Word16 *signal,
+ const WebRtc_Word16 length)
+{
+ int i;
+
+ /* Generate Signal */
+ for (i=0;i<length;i++) {
+ WebRtc_Word32 tempVal;
+ WebRtc_Word16 tempValLow, tempValHigh;
+
+ /* Use recursion formula y[n] = a*2*y[n-1] - y[n-2] */
+ tempValLow = (WebRtc_Word16)(((( (WebRtc_Word32)(a1_times2 *
+ _oldOutputLow[1])) + 8192) >> 14) - _oldOutputLow[0]);
+ tempValHigh = (WebRtc_Word16)(((( (WebRtc_Word32)(a2_times2 *
+ _oldOutputHigh[1])) + 8192) >> 14) - _oldOutputHigh[0]);
+
+ /* Update memory */
+ _oldOutputLow[0]=_oldOutputLow[1];
+ _oldOutputLow[1]=tempValLow;
+ _oldOutputHigh[0]=_oldOutputHigh[1];
+ _oldOutputHigh[1]=tempValHigh;
+
+ tempVal = (WebRtc_Word32)(kDtmfAmpLow * tempValLow) +
+ (WebRtc_Word32)(kDtmfAmpHigh * tempValHigh);
+
+ /* Norm the signal to Q14 */
+ tempVal=(tempVal+16384)>>15;
+
+ /* Scale the signal to correct dbM0 value */
+ signal[i]=(WebRtc_Word16)((tempVal*Dtmf_dBm0kHz[volume]+8192)>>14);
+ }
+
+ return(0);
+}
+
+} // namespace webrtc
diff --git a/voice_engine/dtmf_inband.h b/voice_engine/dtmf_inband.h
new file mode 100644
index 0000000..806fff0
--- /dev/null
+++ b/voice_engine/dtmf_inband.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_DTMF_INBAND_H
+#define WEBRTC_VOICE_ENGINE_DTMF_INBAND_H
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+class DtmfInband
+{
+public:
+ DtmfInband(const WebRtc_Word32 id);
+
+ virtual ~DtmfInband();
+
+ void Init();
+
+ int SetSampleRate(const WebRtc_UWord16 frequency);
+
+ int GetSampleRate(WebRtc_UWord16& frequency);
+
+ int AddTone(const WebRtc_UWord8 eventCode,
+ WebRtc_Word32 lengthMs,
+ WebRtc_Word32 attenuationDb);
+
+ int ResetTone();
+ int StartTone(const WebRtc_UWord8 eventCode,
+ WebRtc_Word32 attenuationDb);
+
+ int StopTone();
+
+ bool IsAddingTone();
+
+ int Get10msTone(WebRtc_Word16 output[320],
+ WebRtc_UWord16& outputSizeInSamples);
+
+ WebRtc_UWord32 DelaySinceLastTone() const;
+
+ void UpdateDelaySinceLastTone();
+
+private:
+ void ReInit();
+ WebRtc_Word16 DtmfFix_generate(WebRtc_Word16* decoded,
+ const WebRtc_Word16 value,
+ const WebRtc_Word16 volume,
+ const WebRtc_Word16 frameLen,
+ const WebRtc_Word16 fs);
+
+private:
+ enum {kDtmfFrameSizeMs = 10};
+ enum {kDtmfAmpHigh = 32768};
+ enum {kDtmfAmpLow = 23171}; // 3 dB lower than the high frequency
+
+ WebRtc_Word16 DtmfFix_generateSignal(const WebRtc_Word16 a1_times2,
+ const WebRtc_Word16 a2_times2,
+ const WebRtc_Word16 volume,
+ WebRtc_Word16* signal,
+ const WebRtc_Word16 length);
+
+private:
+ CriticalSectionWrapper& _critSect;
+ WebRtc_Word32 _id;
+ WebRtc_UWord16 _outputFrequencyHz; // {8000, 16000, 32000}
+ WebRtc_Word16 _oldOutputLow[2]; // Data needed for oscillator model
+ WebRtc_Word16 _oldOutputHigh[2]; // Data needed for oscillator model
+ WebRtc_Word16 _frameLengthSamples; // {80, 160, 320}
+ WebRtc_Word32 _remainingSamples;
+ WebRtc_Word16 _eventCode; // [0, 15]
+ WebRtc_Word16 _attenuationDb; // [0, 36]
+ WebRtc_Word32 _lengthMs;
+ bool _reinit; // 'true' if the oscillator should be reinit for next event
+ bool _playing;
+ WebRtc_UWord32 _delaySinceLastToneMS; // time since last generated tone [ms]
+};
+
+} // namespace webrtc
+
+#endif // #ifndef WEBRTC_VOICE_ENGINE_DTMF_INBAND_H
diff --git a/voice_engine/dtmf_inband_queue.cc b/voice_engine/dtmf_inband_queue.cc
new file mode 100644
index 0000000..b81d827
--- /dev/null
+++ b/voice_engine/dtmf_inband_queue.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "dtmf_inband_queue.h"
+#include "trace.h"
+
+namespace webrtc {
+
+DtmfInbandQueue::DtmfInbandQueue(const WebRtc_Word32 id):
+ _id(id),
+ _DtmfCritsect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _nextEmptyIndex(0)
+{
+ memset(_DtmfKey,0, sizeof(_DtmfKey));
+ memset(_DtmfLen,0, sizeof(_DtmfLen));
+ memset(_DtmfLevel,0, sizeof(_DtmfLevel));
+}
+
+DtmfInbandQueue::~DtmfInbandQueue()
+{
+ delete &_DtmfCritsect;
+}
+
+int
+DtmfInbandQueue::AddDtmf(WebRtc_UWord8 key,
+ WebRtc_UWord16 len,
+ WebRtc_UWord8 level)
+{
+ CriticalSectionScoped lock(&_DtmfCritsect);
+
+ if (_nextEmptyIndex >= kDtmfInbandMax)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_id,-1),
+ "DtmfInbandQueue::AddDtmf() unable to add Dtmf tone");
+ return -1;
+ }
+ WebRtc_Word32 index = _nextEmptyIndex;
+ _DtmfKey[index] = key;
+ _DtmfLen[index] = len;
+ _DtmfLevel[index] = level;
+ _nextEmptyIndex++;
+ return 0;
+}
+
+WebRtc_Word8
+DtmfInbandQueue::NextDtmf(WebRtc_UWord16* len, WebRtc_UWord8* level)
+{
+ CriticalSectionScoped lock(&_DtmfCritsect);
+
+ if(!PendingDtmf())
+ {
+ return -1;
+ }
+ WebRtc_Word8 nextDtmf = _DtmfKey[0];
+ *len=_DtmfLen[0];
+ *level=_DtmfLevel[0];
+
+ memmove(&(_DtmfKey[0]), &(_DtmfKey[1]),
+ _nextEmptyIndex*sizeof(WebRtc_UWord8));
+ memmove(&(_DtmfLen[0]), &(_DtmfLen[1]),
+ _nextEmptyIndex*sizeof(WebRtc_UWord16));
+ memmove(&(_DtmfLevel[0]), &(_DtmfLevel[1]),
+ _nextEmptyIndex*sizeof(WebRtc_UWord8));
+
+ _nextEmptyIndex--;
+ return nextDtmf;
+}
+
+bool
+DtmfInbandQueue::PendingDtmf()
+{
+ return(_nextEmptyIndex>0);
+}
+
+void
+DtmfInbandQueue::ResetDtmf()
+{
+ _nextEmptyIndex = 0;
+}
+
+} // namespace webrtc
diff --git a/voice_engine/dtmf_inband_queue.h b/voice_engine/dtmf_inband_queue.h
new file mode 100644
index 0000000..b3bd39e
--- /dev/null
+++ b/voice_engine/dtmf_inband_queue.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_DTMF_INBAND_QUEUE_H
+#define WEBRTC_VOICE_ENGINE_DTMF_INBAND_QUEUE_H
+
+#include "critical_section_wrapper.h"
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+
+
+namespace webrtc {
+
+class DtmfInbandQueue
+{
+public:
+
+ DtmfInbandQueue(const WebRtc_Word32 id);
+
+ virtual ~DtmfInbandQueue();
+
+ int AddDtmf(WebRtc_UWord8 DtmfKey,
+ WebRtc_UWord16 len,
+ WebRtc_UWord8 level);
+
+ WebRtc_Word8 NextDtmf(WebRtc_UWord16* len, WebRtc_UWord8* level);
+
+ bool PendingDtmf();
+
+ void ResetDtmf();
+
+private:
+ enum {kDtmfInbandMax = 20};
+
+ WebRtc_Word32 _id;
+ CriticalSectionWrapper& _DtmfCritsect;
+ WebRtc_UWord8 _nextEmptyIndex;
+ WebRtc_UWord8 _DtmfKey[kDtmfInbandMax];
+ WebRtc_UWord16 _DtmfLen[kDtmfInbandMax];
+ WebRtc_UWord8 _DtmfLevel[kDtmfInbandMax];
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_DTMF_INBAND_QUEUE_H
diff --git a/voice_engine/include/mock/mock_voe_connection_observer.h b/voice_engine/include/mock/mock_voe_connection_observer.h
new file mode 100644
index 0000000..62e572e
--- /dev/null
+++ b/voice_engine/include/mock/mock_voe_connection_observer.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MOCK_VOE_CONNECTION_OBSERVER_H_
+#define MOCK_VOE_CONNECTION_OBSERVER_H_
+
+#include "voice_engine/include/voe_network.h"
+
+namespace webrtc {
+
+class MockVoeConnectionObserver : public VoEConnectionObserver {
+ public:
+ MOCK_METHOD2(OnPeriodicDeadOrAlive, void(const int channel,
+ const bool alive));
+};
+
+}
+
+#endif // MOCK_VOE_CONNECTION_OBSERVER_H_
diff --git a/voice_engine/include/mock/mock_voe_observer.h b/voice_engine/include/mock/mock_voe_observer.h
new file mode 100644
index 0000000..c01320d
--- /dev/null
+++ b/voice_engine/include/mock/mock_voe_observer.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_MOCK_VOE_OBSERVER_H_
+#define WEBRTC_VOICE_ENGINE_MOCK_VOE_OBSERVER_H_
+
+#include "gmock/gmock.h"
+#include "voice_engine/include/voe_base.h"
+
+namespace webrtc {
+
+class MockVoEObserver: public VoiceEngineObserver {
+ public:
+ MockVoEObserver() {}
+ virtual ~MockVoEObserver() {}
+
+ MOCK_METHOD2(CallbackOnError, void(const int channel, const int error_code));
+};
+
+}
+
+#endif // WEBRTC_VOICE_ENGINE_MOCK_VOE_OBSERVER_H_
diff --git a/voice_engine/include/voe_audio_processing.h b/voice_engine/include/voe_audio_processing.h
new file mode 100644
index 0000000..4965d33
--- /dev/null
+++ b/voice_engine/include/voe_audio_processing.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - Noise Suppression (NS).
+// - Automatic Gain Control (AGC).
+// - Echo Control (EC).
+// - Receiving side VAD, NS and AGC.
+// - Measurements of instantaneous speech, noise and echo levels.
+// - Generation of AP debug recordings.
+// - Detection of keyboard typing which can disrupt a voice conversation.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface();
+// VoEAudioProcessing* ap = VoEAudioProcessing::GetInterface(voe);
+// base->Init();
+// ap->SetEcStatus(true, kAgcAdaptiveAnalog);
+// ...
+// base->Terminate();
+// base->Release();
+// ap->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H
+#define WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+// VoERxVadCallback
+class WEBRTC_DLLEXPORT VoERxVadCallback
+{
+public:
+ virtual void OnRxVad(int channel, int vadDecision) = 0;
+
+protected:
+ virtual ~VoERxVadCallback() {}
+};
+
+// VoEAudioProcessing
+class WEBRTC_DLLEXPORT VoEAudioProcessing
+{
+public:
+ // Factory for the VoEAudioProcessing sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoEAudioProcessing* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoEAudioProcessing sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Sets Noise Suppression (NS) status and mode.
+ // The NS reduces noise in the microphone signal.
+ virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged) = 0;
+
+ // Gets the NS status and mode.
+ virtual int GetNsStatus(bool& enabled, NsModes& mode) = 0;
+
+ // Sets the Automatic Gain Control (AGC) status and mode.
+ // The AGC adjusts the microphone signal to an appropriate level.
+ virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged) = 0;
+
+ // Gets the AGC status and mode.
+ virtual int GetAgcStatus(bool& enabled, AgcModes& mode) = 0;
+
+ // Sets the AGC configuration.
+ // Should only be used in situations where the working environment
+ // is well known.
+ virtual int SetAgcConfig(const AgcConfig config) = 0;
+
+ // Gets the AGC configuration.
+ virtual int GetAgcConfig(AgcConfig& config) = 0;
+
+ // Sets the Echo Control (EC) status and mode.
+ // The EC mitigates acoustic echo where a user can hear their own
+ // speech repeated back due to an acoustic coupling between the
+ // speaker and the microphone at the remote end.
+ virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged) = 0;
+
+ // Gets the EC status and mode.
+ virtual int GetEcStatus(bool& enabled, EcModes& mode) = 0;
+
+ // Enables the compensation of clock drift between the capture and render
+ // streams by the echo canceller (i.e. only using EcMode==kEcAec). It will
+ // only be enabled if supported on the current platform; otherwise an error
+ // will be returned. Check if the platform is supported by calling
+ // |DriftCompensationSupported()|.
+ virtual int EnableDriftCompensation(bool enable) = 0;
+ virtual bool DriftCompensationEnabled() = 0;
+ static bool DriftCompensationSupported();
+
+ // Sets a delay |offset| in ms to add to the system delay reported by the
+ // OS, which is used by the AEC to synchronize far- and near-end streams.
+ // In some cases a system may introduce a delay which goes unreported by the
+ // OS, but which is known to the user. This method can be used to compensate
+ // for the unreported delay.
+ virtual void SetDelayOffsetMs(int offset) = 0;
+ virtual int DelayOffsetMs() = 0;
+
+ // Modifies settings for the AEC designed for mobile devices (AECM).
+ virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
+ bool enableCNG = true) = 0;
+
+ // Gets settings for the AECM.
+ virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG) = 0;
+
+ // Enables a high pass filter on the capture signal. This removes DC bias
+ // and low-frequency noise. Recommended to be enabled.
+ virtual int EnableHighPassFilter(bool enable) = 0;
+ virtual bool IsHighPassFilterEnabled() = 0;
+
+ // Sets status and mode of the receiving-side (Rx) NS.
+ // The Rx NS reduces noise in the received signal for the specified
+ // |channel|. Intended for advanced usage only.
+ virtual int SetRxNsStatus(int channel,
+ bool enable,
+ NsModes mode = kNsUnchanged) = 0;
+
+ // Gets status and mode of the receiving-side NS.
+ virtual int GetRxNsStatus(int channel,
+ bool& enabled,
+ NsModes& mode) = 0;
+
+ // Sets status and mode of the receiving-side (Rx) AGC.
+ // The Rx AGC adjusts the received signal to an appropriate level
+ // for the specified |channel|. Intended for advanced usage only.
+ virtual int SetRxAgcStatus(int channel,
+ bool enable,
+ AgcModes mode = kAgcUnchanged) = 0;
+
+ // Gets status and mode of the receiving-side AGC.
+ virtual int GetRxAgcStatus(int channel,
+ bool& enabled,
+ AgcModes& mode) = 0;
+
+ // Modifies the AGC configuration on the receiving side for the
+ // specified |channel|.
+ virtual int SetRxAgcConfig(int channel, const AgcConfig config) = 0;
+
+ // Gets the AGC configuration on the receiving side.
+ virtual int GetRxAgcConfig(int channel, AgcConfig& config) = 0;
+
+ // Registers a VoERxVadCallback |observer| instance and enables Rx VAD
+ // notifications for the specified |channel|.
+ virtual int RegisterRxVadObserver(int channel,
+ VoERxVadCallback &observer) = 0;
+
+ // Deregisters the VoERxVadCallback |observer| and disables Rx VAD
+ // notifications for the specified |channel|.
+ virtual int DeRegisterRxVadObserver(int channel) = 0;
+
+ // Gets the VAD/DTX activity for the specified |channel|.
+ // The returned value is 1 if frames of audio contains speech
+ // and 0 if silence. The output is always 1 if VAD is disabled.
+ virtual int VoiceActivityIndicator(int channel) = 0;
+
+ // Enables or disables the possibility to retrieve echo metrics and delay
+ // logging values during an active call. The metrics are only supported in
+ // AEC.
+ virtual int SetEcMetricsStatus(bool enable) = 0;
+
+ // Gets the current EC metric status.
+ virtual int GetEcMetricsStatus(bool& enabled) = 0;
+
+ // Gets the instantaneous echo level metrics.
+ virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP) = 0;
+
+ // Gets the EC internal |delay_median| and |delay_std| in ms between
+ // near-end and far-end. The values are calculated over the time period
+ // since the last GetEcDelayMetrics() call.
+ virtual int GetEcDelayMetrics(int& delay_median, int& delay_std) = 0;
+
+ // Enables recording of Audio Processing (AP) debugging information.
+ // The file can later be used for off-line analysis of the AP performance.
+ virtual int StartDebugRecording(const char* fileNameUTF8) = 0;
+
+ // Disables recording of AP debugging information.
+ virtual int StopDebugRecording() = 0;
+
+ // Enables or disables detection of disturbing keyboard typing.
+ // An error notification will be given as a callback upon detection.
+ virtual int SetTypingDetectionStatus(bool enable) = 0;
+
+ // Gets the current typing detection status.
+ virtual int GetTypingDetectionStatus(bool& enabled) = 0;
+
+ // Reports the lower of:
+ // * Time in seconds since the last typing event.
+ // * Time in seconds since the typing detection was enabled.
+ // Returns error if typing detection is disabled.
+ virtual int TimeSinceLastTyping(int &seconds) = 0;
+
+ // Optional setting of typing detection parameters
+ // Parameter with value == 0 will be ignored
+ // and left with default config.
+ // TODO(niklase) Remove default argument as soon as libJingle is updated!
+ virtual int SetTypingDetectionParameters(int timeWindow,
+ int costPerTyping,
+ int reportingThreshold,
+ int penaltyDecay,
+ int typeEventDelay = 0) = 0;
+
+ // Swaps the capture-side left and right audio channels when enabled. It
+ // only has an effect when using a stereo send codec. The setting is
+ // persistent; it will be applied whenever a stereo send codec is enabled.
+ //
+ // The swap is applied only to the captured audio, and not mixed files. The
+ // swap will appear in file recordings and when accessing audio through the
+ // external media interface.
+ virtual void EnableStereoChannelSwapping(bool enable) = 0;
+ virtual bool IsStereoChannelSwappingEnabled() = 0;
+
+protected:
+ VoEAudioProcessing() {}
+ virtual ~VoEAudioProcessing() {}
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_H
diff --git a/voice_engine/include/voe_base.h b/voice_engine/include/voe_base.h
new file mode 100644
index 0000000..28f465e
--- /dev/null
+++ b/voice_engine/include/voe_base.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - Enables full duplex VoIP sessions via RTP using G.711 (mu-Law or A-Law).
+// - Initialization and termination.
+// - Trace information on text files or via callbacks.
+// - Multi-channel support (mixing, sending to multiple destinations etc.).
+// - Call setup (port and address) for receiving and sending sides.
+//
+// To support other codecs than G.711, the VoECodec sub-API must be utilized.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// base->Init();
+// int ch = base->CreateChannel();
+// base->StartPlayout(ch);
+// ...
+// base->DeleteChannel(ch);
+// base->Terminate();
+// base->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_BASE_H
+#define WEBRTC_VOICE_ENGINE_VOE_BASE_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class AudioDeviceModule;
+
+const int kVoEDefault = -1;
+
+// VoiceEngineObserver
+class WEBRTC_DLLEXPORT VoiceEngineObserver
+{
+public:
+ // This method will be called after the occurrence of any runtime error
+ // code, or warning notification, when the observer interface has been
+ // installed using VoEBase::RegisterVoiceEngineObserver().
+ virtual void CallbackOnError(const int channel, const int errCode) = 0;
+
+protected:
+ virtual ~VoiceEngineObserver() {}
+};
+
+// VoiceEngine
+class WEBRTC_DLLEXPORT VoiceEngine
+{
+public:
+ // Creates a VoiceEngine object, which can then be used to acquire
+ // sub-APIs. Returns NULL on failure.
+ static VoiceEngine* Create();
+
+ // Deletes a created VoiceEngine object and releases the utilized resources.
+ // Note that if there are outstanding references held via other interfaces,
+ // the voice engine instance will not actually be deleted until those
+ // references have been released.
+ static bool Delete(VoiceEngine*& voiceEngine);
+
+ // Specifies the amount and type of trace information which will be
+ // created by the VoiceEngine.
+ static int SetTraceFilter(const unsigned int filter);
+
+ // Sets the name of the trace file and enables non-encrypted trace messages.
+ static int SetTraceFile(const char* fileNameUTF8,
+ const bool addFileCounter = false);
+
+ // Installs the TraceCallback implementation to ensure that the user
+ // receives callbacks for generated trace messages.
+ static int SetTraceCallback(TraceCallback* callback);
+
+ static int SetAndroidObjects(void* javaVM, void* env, void* context);
+
+protected:
+ VoiceEngine() {}
+ virtual ~VoiceEngine() {}
+};
+
+// VoEBase
+class WEBRTC_DLLEXPORT VoEBase
+{
+public:
+ // Factory for the VoEBase sub-API. Increases an internal reference
+ // counter if successful. Returns NULL if the API is not supported or if
+ // construction fails.
+ static VoEBase* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoEBase sub-API and decreases an internal reference
+ // counter. Returns the new reference count. This value should be zero
+ // for all sub-API:s before the VoiceEngine object can be safely deleted.
+ virtual int Release() = 0;
+
+ // Installs the observer class to enable runtime error control and
+ // warning notifications.
+ virtual int RegisterVoiceEngineObserver(VoiceEngineObserver& observer) = 0;
+
+ // Removes and disables the observer class for runtime error control
+ // and warning notifications.
+ virtual int DeRegisterVoiceEngineObserver() = 0;
+
+ // Initiates all common parts of the VoiceEngine; e.g. all
+ // encoders/decoders, the sound card and core receiving components.
+ // This method also makes it possible to install a user-defined
+ // external Audio Device Module (ADM) which implements all the audio
+ // layer functionality in a separate (reference counted) module.
+ virtual int Init(AudioDeviceModule* external_adm = NULL) = 0;
+
+ // Terminates all VoiceEngine functions and releses allocated resources.
+ virtual int Terminate() = 0;
+
+ // Retrieves the maximum number of channels that can be created.
+ virtual int MaxNumOfChannels() = 0;
+
+ // Creates a new channel and allocates the required resources for it.
+ virtual int CreateChannel() = 0;
+
+ // Deletes an existing channel and releases the utilized resources.
+ virtual int DeleteChannel(int channel) = 0;
+
+ // Sets the local receiver port and address for a specified
+ // |channel| number.
+ virtual int SetLocalReceiver(int channel, int port,
+ int RTCPport = kVoEDefault,
+ const char ipAddr[64] = NULL,
+ const char multiCastAddr[64] = NULL) = 0;
+
+ // Gets the local receiver port and address for a specified
+ // |channel| number.
+ virtual int GetLocalReceiver(int channel, int& port, int& RTCPport,
+ char ipAddr[64]) = 0;
+
+ // Sets the destination port and address for a specified |channel| number.
+ virtual int SetSendDestination(int channel, int port,
+ const char ipAddr[64],
+ int sourcePort = kVoEDefault,
+ int RTCPport = kVoEDefault) = 0;
+
+ // Gets the destination port and address for a specified |channel| number.
+ virtual int GetSendDestination(int channel, int& port, char ipAddr[64],
+ int& sourcePort, int& RTCPport) = 0;
+
+ // Prepares and initiates the VoiceEngine for reception of
+ // incoming RTP/RTCP packets on the specified |channel|.
+ virtual int StartReceive(int channel) = 0;
+
+ // Stops receiving incoming RTP/RTCP packets on the specified |channel|.
+ virtual int StopReceive(int channel) = 0;
+
+ // Starts forwarding the packets to the mixer/soundcard for a
+ // specified |channel|.
+ virtual int StartPlayout(int channel) = 0;
+
+ // Stops forwarding the packets to the mixer/soundcard for a
+ // specified |channel|.
+ virtual int StopPlayout(int channel) = 0;
+
+ // Starts sending packets to an already specified IP address and
+ // port number for a specified |channel|.
+ virtual int StartSend(int channel) = 0;
+
+ // Stops sending packets from a specified |channel|.
+ virtual int StopSend(int channel) = 0;
+
+ // Gets the version information for VoiceEngine and its components.
+ virtual int GetVersion(char version[1024]) = 0;
+
+ // Gets the last VoiceEngine error code.
+ virtual int LastError() = 0;
+
+
+ // Stops or resumes playout and transmission on a temporary basis.
+ virtual int SetOnHoldStatus(int channel, bool enable,
+ OnHoldModes mode = kHoldSendAndPlay) = 0;
+
+ // Gets the current playout and transmission status.
+ virtual int GetOnHoldStatus(int channel, bool& enabled,
+ OnHoldModes& mode) = 0;
+
+ // Sets the NetEQ playout mode for a specified |channel| number.
+ virtual int SetNetEQPlayoutMode(int channel, NetEqModes mode) = 0;
+
+ // Gets the NetEQ playout mode for a specified |channel| number.
+ virtual int GetNetEQPlayoutMode(int channel, NetEqModes& mode) = 0;
+
+ // Sets the NetEQ background noise mode for a specified |channel| number.
+ virtual int SetNetEQBGNMode(int channel, NetEqBgnModes mode) = 0;
+
+ // Gets the NetEQ background noise mode for a specified |channel| number.
+ virtual int GetNetEQBGNMode(int channel, NetEqBgnModes& mode) = 0;
+
+protected:
+ VoEBase() {}
+ virtual ~VoEBase() {}
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_BASE_H
diff --git a/voice_engine/include/voe_call_report.h b/voice_engine/include/voe_call_report.h
new file mode 100644
index 0000000..c4d3abd
--- /dev/null
+++ b/voice_engine/include/voe_call_report.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - Long-term speech and noise level metrics.
+// - Long-term echo metric statistics.
+// - Round Trip Time (RTT) statistics.
+// - Dead-or-Alive connection summary.
+// - Generation of call reports to text files.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// VoECallReport report = VoECallReport::GetInterface(voe);
+// base->Init();
+// LevelStatistics stats;
+// report->GetSpeechAndNoiseSummary(stats);
+// ...
+// base->Terminate();
+// base->Release();
+// report->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_H
+#define WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+// VoECallReport
+class WEBRTC_DLLEXPORT VoECallReport
+{
+public:
+ // Factory for the VoECallReport sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoECallReport* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoECallReport sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Performs a combined reset of all components involved in generating
+ // the call report for a specified |channel|. Pass in -1 to reset
+ // all channels.
+ virtual int ResetCallReportStatistics(int channel) = 0;
+
+ // Gets minimum, maximum and average levels for long-term echo metrics.
+ virtual int GetEchoMetricSummary(EchoStatistics& stats) = 0;
+
+ // Gets minimum, maximum and average levels for Round Trip Time (RTT)
+ // measurements.
+ virtual int GetRoundTripTimeSummary(int channel,
+ StatVal& delaysMs) = 0;
+
+ // Gets the total amount of dead and alive connection detections
+ // during a VoIP session.
+ virtual int GetDeadOrAliveSummary(int channel, int& numOfDeadDetections,
+ int& numOfAliveDetections) = 0;
+
+ // Creates a text file in ASCII format, which contains a summary
+ // of all the statistics that can be obtained by the call report sub-API.
+ virtual int WriteReportToFile(const char* fileNameUTF8) = 0;
+
+protected:
+ VoECallReport() { }
+ virtual ~VoECallReport() { }
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_H
diff --git a/voice_engine/include/voe_codec.h b/voice_engine/include/voe_codec.h
new file mode 100644
index 0000000..37f8f68
--- /dev/null
+++ b/voice_engine/include/voe_codec.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - Support of non-default codecs (e.g. iLBC, iSAC, etc.).
+// - Voice Activity Detection (VAD) on a per channel basis.
+// - Possibility to specify how to map received payload types to codecs.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// VoECodec* codec = VoECodec::GetInterface(voe);
+// base->Init();
+// int num_of_codecs = codec->NumOfCodecs()
+// ...
+// base->Terminate();
+// base->Release();
+// codec->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_CODEC_H
+#define WEBRTC_VOICE_ENGINE_VOE_CODEC_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoECodec
+{
+public:
+ // Factory for the VoECodec sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoECodec* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoECodec sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Gets the number of supported codecs.
+ virtual int NumOfCodecs() = 0;
+
+ // Get the |codec| information for a specified list |index|.
+ virtual int GetCodec(int index, CodecInst& codec) = 0;
+
+ // Sets the |codec| for the |channel| to be used for sending.
+ virtual int SetSendCodec(int channel, const CodecInst& codec) = 0;
+
+ // Gets the |codec| parameters for the sending codec on a specified
+ // |channel|.
+ virtual int GetSendCodec(int channel, CodecInst& codec) = 0;
+
+ // Gets the currently received |codec| for a specific |channel|.
+ virtual int GetRecCodec(int channel, CodecInst& codec) = 0;
+
+ // Sets the initial values of target rate and frame size for iSAC
+ // for a specified |channel|. This API is only valid if iSAC is setup
+ // to run in channel-adaptive mode
+ virtual int SetISACInitTargetRate(int channel, int rateBps,
+ bool useFixedFrameSize = false) = 0;
+
+ // Sets the maximum allowed iSAC rate which the codec may not exceed
+ // for a single packet for the specified |channel|. The maximum rate is
+ // defined as payload size per frame size in bits per second.
+ virtual int SetISACMaxRate(int channel, int rateBps) = 0;
+
+ // Sets the maximum allowed iSAC payload size for a specified |channel|.
+ // The maximum value is set independently of the frame size, i.e.
+ // 30 ms and 60 ms packets have the same limit.
+ virtual int SetISACMaxPayloadSize(int channel, int sizeBytes) = 0;
+
+ // Sets the dynamic payload type number for a particular |codec| or
+ // disables (ignores) a codec for receiving. For instance, when receiving
+ // an invite from a SIP-based client, this function can be used to change
+ // the dynamic payload type number to match that in the INVITE SDP-
+ // message. The utilized parameters in the |codec| structure are:
+ // plname, plfreq, pltype and channels.
+ virtual int SetRecPayloadType(int channel, const CodecInst& codec) = 0;
+
+ // Gets the actual payload type that is set for receiving a |codec| on a
+ // |channel|. The value it retrieves will either be the default payload
+ // type, or a value earlier set with SetRecPayloadType().
+ virtual int GetRecPayloadType(int channel, CodecInst& codec) = 0;
+
+ // Sets the payload |type| for the sending of SID-frames with background
+ // noise estimation during silence periods detected by the VAD.
+ virtual int SetSendCNPayloadType(
+ int channel, int type, PayloadFrequencies frequency = kFreq16000Hz) = 0;
+
+
+ // Sets the VAD/DTX (silence suppression) status and |mode| for a
+ // specified |channel|. Disabling VAD (through |enable|) will also disable
+ // DTX; it is not necessary to explictly set |disableDTX| in this case.
+ virtual int SetVADStatus(int channel, bool enable,
+ VadModes mode = kVadConventional,
+ bool disableDTX = false) = 0;
+
+ // Gets the VAD/DTX status and |mode| for a specified |channel|.
+ virtual int GetVADStatus(int channel, bool& enabled, VadModes& mode,
+ bool& disabledDTX) = 0;
+
+ // Not supported
+ virtual int SetAMREncFormat(int channel, AmrMode mode) = 0;
+
+ // Not supported
+ virtual int SetAMRDecFormat(int channel, AmrMode mode) = 0;
+
+ // Not supported
+ virtual int SetAMRWbEncFormat(int channel, AmrMode mode) = 0;
+
+ // Not supported
+ virtual int SetAMRWbDecFormat(int channel, AmrMode mode) = 0;
+
+protected:
+ VoECodec() {}
+ virtual ~VoECodec() {}
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_CODEC_H
diff --git a/voice_engine/include/voe_dtmf.h b/voice_engine/include/voe_dtmf.h
new file mode 100644
index 0000000..3ed1749
--- /dev/null
+++ b/voice_engine/include/voe_dtmf.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - Telephone event transmission.
+// - DTMF tone generation.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// VoEDtmf* dtmf = VoEDtmf::GetInterface(voe);
+// base->Init();
+// int ch = base->CreateChannel();
+// ...
+// dtmf->SendTelephoneEvent(ch, 7);
+// ...
+// base->DeleteChannel(ch);
+// base->Terminate();
+// base->Release();
+// dtmf->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_DTMF_H
+#define WEBRTC_VOICE_ENGINE_VOE_DTMF_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+// VoETelephoneEventObserver
+class WEBRTC_DLLEXPORT VoETelephoneEventObserver
+{
+public:
+ // This method will be called after the detection of an inband
+ // telephone event. The event code is given as output in the
+ // |eventCode| parameter.
+ virtual void OnReceivedTelephoneEventInband(int channel,
+ int eventCode,
+ bool endOfEvent) = 0;
+
+ // This method will be called after the detection of an out-of-band
+ // telephone event. The event code is given as output in the
+ // |eventCode| parameter.
+ virtual void OnReceivedTelephoneEventOutOfBand(
+ int channel,
+ int eventCode,
+ bool endOfEvent) = 0;
+
+protected:
+ virtual ~VoETelephoneEventObserver() {}
+};
+
+// VoEDtmf
+class WEBRTC_DLLEXPORT VoEDtmf
+{
+public:
+
+ // Factory for the VoEDtmf sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoEDtmf* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoEDtmf sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Sends telephone events either in-band or out-of-band.
+ virtual int SendTelephoneEvent(int channel, int eventCode,
+ bool outOfBand = true, int lengthMs = 160,
+ int attenuationDb = 10) = 0;
+
+
+ // Sets the dynamic payload |type| that should be used for telephone
+ // events.
+ virtual int SetSendTelephoneEventPayloadType(int channel,
+ unsigned char type) = 0;
+
+
+ // Gets the currently set dynamic payload |type| for telephone events.
+ virtual int GetSendTelephoneEventPayloadType(int channel,
+ unsigned char& type) = 0;
+
+ // Enables or disables local tone playout for received DTMF events
+ // out-of-band.
+ virtual int SetDtmfPlayoutStatus(int channel, bool enable) = 0;
+
+ // Gets the DTMF playout status.
+ virtual int GetDtmfPlayoutStatus(int channel, bool& enabled) = 0;
+
+ // Toogles DTMF feedback state: when a DTMF tone is sent, the same tone
+ // is played out on the speaker.
+ virtual int SetDtmfFeedbackStatus(bool enable,
+ bool directFeedback = false) = 0;
+
+ // Gets the DTMF feedback status.
+ virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback) = 0;
+
+ // Plays a DTMF feedback tone (only locally).
+ virtual int PlayDtmfTone(int eventCode, int lengthMs = 200,
+ int attenuationDb = 10) = 0;
+
+ // Starts playing out a DTMF feedback tone locally.
+ // The tone will be played out until the corresponding stop function
+ // is called.
+ virtual int StartPlayingDtmfTone(int eventCode,
+ int attenuationDb = 10) = 0;
+
+ // Stops playing out a DTMF feedback tone locally.
+ virtual int StopPlayingDtmfTone() = 0;
+
+ // Installs an instance of a VoETelephoneEventObserver derived class and
+ // activates detection of telephone events for the specified |channel|.
+ virtual int RegisterTelephoneEventDetection(
+ int channel, TelephoneEventDetectionMethods detectionMethod,
+ VoETelephoneEventObserver& observer) = 0;
+
+ // Removes an instance of a VoETelephoneEventObserver derived class and
+ // disables detection of telephone events for the specified |channel|.
+ virtual int DeRegisterTelephoneEventDetection(int channel) = 0;
+
+ // Gets the current telephone-event detection status for a specified
+ // |channel|.
+ virtual int GetTelephoneEventDetectionStatus(
+ int channel, bool& enabled,
+ TelephoneEventDetectionMethods& detectionMethod) = 0;
+
+protected:
+ VoEDtmf() {}
+ virtual ~VoEDtmf() {}
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_DTMF_H
diff --git a/voice_engine/include/voe_encryption.h b/voice_engine/include/voe_encryption.h
new file mode 100644
index 0000000..ae3f373
--- /dev/null
+++ b/voice_engine/include/voe_encryption.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - External encryption and decryption.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEEncryption* encrypt = VoEEncryption::GetInterface(voe);
+// ...
+// encrypt->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_H
+#define WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEEncryption
+{
+public:
+ // Factory for the VoEEncryption sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoEEncryption* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoEEncryption sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Installs an Encryption instance and enables external encryption
+ // for the selected |channel|.
+ virtual int RegisterExternalEncryption(
+ int channel, Encryption& encryption) = 0;
+
+ // Removes an Encryption instance and disables external encryption
+ // for the selected |channel|.
+ virtual int DeRegisterExternalEncryption(int channel) = 0;
+
+ // Not supported
+ virtual int EnableSRTPSend(int channel, CipherTypes cipherType,
+ int cipherKeyLength, AuthenticationTypes authType, int authKeyLength,
+ int authTagLength, SecurityLevels level, const unsigned char key[30],
+ bool useForRTCP = false) = 0;
+
+ // Not supported
+ virtual int DisableSRTPSend(int channel) = 0;
+
+ // Not supported
+ virtual int EnableSRTPReceive(int channel, CipherTypes cipherType,
+ int cipherKeyLength, AuthenticationTypes authType, int authKeyLength,
+ int authTagLength, SecurityLevels level, const unsigned char key[30],
+ bool useForRTCP = false) = 0;
+
+ // Not supported
+ virtual int DisableSRTPReceive(int channel) = 0;
+
+protected:
+ VoEEncryption() {}
+ virtual ~VoEEncryption() {}
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_H
diff --git a/voice_engine/include/voe_errors.h b/voice_engine/include/voe_errors.h
new file mode 100644
index 0000000..cc05970
--- /dev/null
+++ b/voice_engine/include/voe_errors.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_ERRORS_H
+#define WEBRTC_VOICE_ENGINE_VOE_ERRORS_H
+
+// Warnings
+#define VE_PORT_NOT_DEFINED 8001
+#define VE_CHANNEL_NOT_VALID 8002
+#define VE_FUNC_NOT_SUPPORTED 8003
+#define VE_INVALID_LISTNR 8004
+#define VE_INVALID_ARGUMENT 8005
+#define VE_INVALID_PORT_NMBR 8006
+#define VE_INVALID_PLNAME 8007
+#define VE_INVALID_PLFREQ 8008
+#define VE_INVALID_PLTYPE 8009
+#define VE_INVALID_PACSIZE 8010
+#define VE_NOT_SUPPORTED 8011
+#define VE_ALREADY_LISTENING 8012
+#define VE_CHANNEL_NOT_CREATED 8013
+#define VE_MAX_ACTIVE_CHANNELS_REACHED 8014
+#define VE_REC_CANNOT_PREPARE_HEADER 8015
+#define VE_REC_CANNOT_ADD_BUFFER 8016
+#define VE_PLAY_CANNOT_PREPARE_HEADER 8017
+#define VE_ALREADY_SENDING 8018
+#define VE_INVALID_IP_ADDRESS 8019
+#define VE_ALREADY_PLAYING 8020
+#define VE_NOT_ALL_VERSION_INFO 8021
+#define VE_DTMF_OUTOF_RANGE 8022
+#define VE_INVALID_CHANNELS 8023
+#define VE_SET_PLTYPE_FAILED 8024
+#define VE_ENCRYPT_NOT_INITED 8025
+#define VE_NOT_INITED 8026
+#define VE_NOT_SENDING 8027
+#define VE_EXT_TRANSPORT_NOT_SUPPORTED 8028
+#define VE_EXTERNAL_TRANSPORT_ENABLED 8029
+#define VE_STOP_RECORDING_FAILED 8030
+#define VE_INVALID_RATE 8031
+#define VE_INVALID_PACKET 8032
+#define VE_NO_GQOS 8033
+#define VE_INVALID_TIMESTAMP 8034
+#define VE_RECEIVE_PACKET_TIMEOUT 8035
+#define VE_STILL_PLAYING_PREV_DTMF 8036
+#define VE_INIT_FAILED_WRONG_EXPIRY 8037
+#define VE_SENDING 8038
+#define VE_ENABLE_IPV6_FAILED 8039
+#define VE_FUNC_NO_STEREO 8040
+// Range 8041-8080 is not used
+#define VE_FW_TRAVERSAL_ALREADY_INITIALIZED 8081
+#define VE_PACKET_RECEIPT_RESTARTED 8082
+#define VE_NOT_ALL_INFO 8083
+#define VE_CANNOT_SET_SEND_CODEC 8084
+#define VE_CODEC_ERROR 8085
+#define VE_NETEQ_ERROR 8086
+#define VE_RTCP_ERROR 8087
+#define VE_INVALID_OPERATION 8088
+#define VE_CPU_INFO_ERROR 8089
+#define VE_SOUNDCARD_ERROR 8090
+#define VE_SPEECH_LEVEL_ERROR 8091
+#define VE_SEND_ERROR 8092
+#define VE_CANNOT_REMOVE_CONF_CHANNEL 8093
+#define VE_PLTYPE_ERROR 8094
+#define VE_SET_FEC_FAILED 8095
+#define VE_CANNOT_GET_PLAY_DATA 8096
+#define VE_APM_ERROR 8097
+#define VE_RUNTIME_PLAY_WARNING 8098
+#define VE_RUNTIME_REC_WARNING 8099
+#define VE_NOT_PLAYING 8100
+#define VE_SOCKETS_NOT_INITED 8101
+#define VE_CANNOT_GET_SOCKET_INFO 8102
+#define VE_INVALID_MULTICAST_ADDRESS 8103
+#define VE_DESTINATION_NOT_INITED 8104
+#define VE_RECEIVE_SOCKETS_CONFLICT 8105
+#define VE_SEND_SOCKETS_CONFLICT 8106
+#define VE_TYPING_NOISE_WARNING 8107
+#define VE_SATURATION_WARNING 8108
+#define VE_NOISE_WARNING 8109
+#define VE_CANNOT_GET_SEND_CODEC 8110
+#define VE_CANNOT_GET_REC_CODEC 8111
+#define VE_ALREADY_INITED 8112
+
+// Errors causing limited functionality
+#define VE_RTCP_SOCKET_ERROR 9001
+#define VE_MIC_VOL_ERROR 9002
+#define VE_SPEAKER_VOL_ERROR 9003
+#define VE_CANNOT_ACCESS_MIC_VOL 9004
+#define VE_CANNOT_ACCESS_SPEAKER_VOL 9005
+#define VE_GET_MIC_VOL_ERROR 9006
+#define VE_GET_SPEAKER_VOL_ERROR 9007
+#define VE_THREAD_RTCP_ERROR 9008
+#define VE_CANNOT_INIT_APM 9009
+#define VE_SEND_SOCKET_TOS_ERROR 9010
+#define VE_CANNOT_RETRIEVE_DEVICE_NAME 9013
+#define VE_SRTP_ERROR 9014
+// 9015 is not used
+#define VE_INTERFACE_NOT_FOUND 9016
+#define VE_TOS_GQOS_CONFLICT 9017
+#define VE_CANNOT_ADD_CONF_CHANNEL 9018
+#define VE_BUFFER_TOO_SMALL 9019
+#define VE_CANNOT_EXECUTE_SETTING 9020
+#define VE_CANNOT_RETRIEVE_SETTING 9021
+// 9022 is not used
+#define VE_RTP_KEEPALIVE_FAILED 9023
+#define VE_SEND_DTMF_FAILED 9024
+#define VE_CANNOT_RETRIEVE_CNAME 9025
+#define VE_DECRYPTION_FAILED 9026
+#define VE_ENCRYPTION_FAILED 9027
+#define VE_CANNOT_RETRIEVE_RTP_STAT 9028
+#define VE_GQOS_ERROR 9029
+#define VE_BINDING_SOCKET_TO_LOCAL_ADDRESS_FAILED 9030
+#define VE_TOS_INVALID 9031
+#define VE_TOS_ERROR 9032
+#define VE_CANNOT_RETRIEVE_VALUE 9033
+
+// Critical errors that stops voice functionality
+#define VE_PLAY_UNDEFINED_SC_ERR 10001
+#define VE_REC_CANNOT_OPEN_SC 10002
+#define VE_SOCKET_ERROR 10003
+#define VE_MMSYSERR_INVALHANDLE 10004
+#define VE_MMSYSERR_NODRIVER 10005
+#define VE_MMSYSERR_NOMEM 10006
+#define VE_WAVERR_UNPREPARED 10007
+#define VE_WAVERR_STILLPLAYING 10008
+#define VE_UNDEFINED_SC_ERR 10009
+#define VE_UNDEFINED_SC_REC_ERR 10010
+#define VE_THREAD_ERROR 10011
+#define VE_CANNOT_START_RECORDING 10012
+#define VE_PLAY_CANNOT_OPEN_SC 10013
+#define VE_NO_WINSOCK_2 10014
+#define VE_SEND_SOCKET_ERROR 10015
+#define VE_BAD_FILE 10016
+#define VE_EXPIRED_COPY 10017
+#define VE_NOT_AUTHORISED 10018
+#define VE_RUNTIME_PLAY_ERROR 10019
+#define VE_RUNTIME_REC_ERROR 10020
+#define VE_BAD_ARGUMENT 10021
+#define VE_LINUX_API_ONLY 10022
+#define VE_REC_DEVICE_REMOVED 10023
+#define VE_NO_MEMORY 10024
+#define VE_BAD_HANDLE 10025
+#define VE_RTP_RTCP_MODULE_ERROR 10026
+#define VE_AUDIO_CODING_MODULE_ERROR 10027
+#define VE_AUDIO_DEVICE_MODULE_ERROR 10028
+#define VE_CANNOT_START_PLAYOUT 10029
+#define VE_CANNOT_STOP_RECORDING 10030
+#define VE_CANNOT_STOP_PLAYOUT 10031
+#define VE_CANNOT_INIT_CHANNEL 10032
+#define VE_RECV_SOCKET_ERROR 10033
+#define VE_SOCKET_TRANSPORT_MODULE_ERROR 10034
+#define VE_AUDIO_CONF_MIX_MODULE_ERROR 10035
+
+// Warnings for other platforms (reserved range 8061-8080)
+#define VE_IGNORED_FUNCTION 8061
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_ERRORS_H
diff --git a/voice_engine/include/voe_external_media.h b/voice_engine/include/voe_external_media.h
new file mode 100644
index 0000000..a4ef7e2
--- /dev/null
+++ b/voice_engine/include/voe_external_media.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// In some cases it is desirable to use an audio source or sink which may
+// not be available to the VoiceEngine, such as a DV camera. This sub-API
+// contains functions that allow for the use of such external recording
+// sources and playout sinks. It also describes how recorded data, or data
+// to be played out, can be modified outside the VoiceEngine.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// VoEMediaProcess media = VoEMediaProcess::GetInterface(voe);
+// base->Init();
+// ...
+// media->SetExternalRecordingStatus(true);
+// ...
+// base->Terminate();
+// base->Release();
+// media->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
+#define WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEMediaProcess
+{
+public:
+ // The VoiceEngine user should override the Process() method in a
+ // derived class. Process() will be called when audio is ready to
+ // be processed. The audio can be accessed in several different modes
+ // given by the |type| parameter. The function should modify the
+ // original data and ensure that it is copied back to the |audio10ms|
+ // array. The number of samples in the frame cannot be changed.
+ // The sampling frequency will depend upon the codec used.
+ // If |isStereo| is true, audio10ms will contain 16-bit PCM data
+ // samples in interleaved stereo format (L0,R0,L1,R1,...).
+ virtual void Process(const int channel, const ProcessingTypes type,
+ WebRtc_Word16 audio10ms[], const int length,
+ const int samplingFreq, const bool isStereo) = 0;
+
+protected:
+ virtual ~VoEMediaProcess() {}
+};
+
+class WEBRTC_DLLEXPORT VoEExternalMedia
+{
+public:
+ // Factory for the VoEExternalMedia sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoEExternalMedia* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoEExternalMedia sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Installs a VoEMediaProcess derived instance and activates external
+ // media for the specified |channel| and |type|.
+ virtual int RegisterExternalMediaProcessing(
+ int channel, ProcessingTypes type, VoEMediaProcess& processObject) = 0;
+
+ // Removes the VoEMediaProcess derived instance and deactivates external
+ // media for the specified |channel| and |type|.
+ virtual int DeRegisterExternalMediaProcessing(
+ int channel, ProcessingTypes type) = 0;
+
+ // Toogles state of external recording.
+ virtual int SetExternalRecordingStatus(bool enable) = 0;
+
+ // Toogles state of external playout.
+ virtual int SetExternalPlayoutStatus(bool enable) = 0;
+
+ // This function accepts externally recorded audio. During transmission,
+ // this method should be called at as regular an interval as possible
+ // with frames of corresponding size.
+ virtual int ExternalRecordingInsertData(
+ const WebRtc_Word16 speechData10ms[], int lengthSamples,
+ int samplingFreqHz, int current_delay_ms) = 0;
+
+ // This function gets audio for an external playout sink.
+ // During transmission, this function should be called every ~10 ms
+ // to obtain a new 10 ms frame of audio. The length of the block will
+ // be 160, 320, 440 or 480 samples (for 16, 32, 44 or 48 kHz sampling
+ // rates respectively).
+ virtual int ExternalPlayoutGetData(
+ WebRtc_Word16 speechData10ms[], int samplingFreqHz,
+ int current_delay_ms, int& lengthSamples) = 0;
+
+protected:
+ VoEExternalMedia() {}
+ virtual ~VoEExternalMedia() {}
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_H
diff --git a/voice_engine/include/voe_file.h b/voice_engine/include/voe_file.h
new file mode 100644
index 0000000..d968dcf
--- /dev/null
+++ b/voice_engine/include/voe_file.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - File playback.
+// - File recording.
+// - File conversion.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// VoEFile* file = VoEFile::GetInterface(voe);
+// base->Init();
+// int ch = base->CreateChannel();
+// ...
+// base->StartPlayout(ch);
+// file->StartPlayingFileAsMicrophone(ch, "data_file_16kHz.pcm", true);
+// ...
+// file->StopPlayingFileAsMicrophone(ch);
+// base->StopPlayout(ch);
+// ...
+// base->DeleteChannel(ch);
+// base->Terminate();
+// base->Release();
+// file->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_FILE_H
+#define WEBRTC_VOICE_ENGINE_VOE_FILE_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEFile
+{
+public:
+ // Factory for the VoEFile sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoEFile* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoEFile sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Starts playing and mixing files with the local speaker signal for
+ // playout.
+ virtual int StartPlayingFileLocally(
+ int channel,
+ const char fileNameUTF8[1024],
+ bool loop = false,
+ FileFormats format = kFileFormatPcm16kHzFile,
+ float volumeScaling = 1.0,
+ int startPointMs = 0,
+ int stopPointMs = 0) = 0;
+
+ // Starts playing and mixing streams with the local speaker signal for
+ // playout.
+ virtual int StartPlayingFileLocally(
+ int channel,
+ InStream* stream,
+ FileFormats format = kFileFormatPcm16kHzFile,
+ float volumeScaling = 1.0,
+ int startPointMs = 0, int stopPointMs = 0) = 0;
+
+ // Stops playback of a file on a specific |channel|.
+ virtual int StopPlayingFileLocally(int channel) = 0;
+
+ // Returns the current file playing state for a specific |channel|.
+ virtual int IsPlayingFileLocally(int channel) = 0;
+
+ // Sets the volume scaling for a speaker file that is already playing.
+ virtual int ScaleLocalFilePlayout(int channel, float scale) = 0;
+
+ // Starts reading data from a file and transmits the data either
+ // mixed with or instead of the microphone signal.
+ virtual int StartPlayingFileAsMicrophone(
+ int channel,
+ const char fileNameUTF8[1024],
+ bool loop = false ,
+ bool mixWithMicrophone = false,
+ FileFormats format = kFileFormatPcm16kHzFile,
+ float volumeScaling = 1.0) = 0;
+
+ // Starts reading data from a stream and transmits the data either
+ // mixed with or instead of the microphone signal.
+ virtual int StartPlayingFileAsMicrophone(
+ int channel,
+ InStream* stream,
+ bool mixWithMicrophone = false,
+ FileFormats format = kFileFormatPcm16kHzFile,
+ float volumeScaling = 1.0) = 0;
+
+ // Stops playing of a file as microphone signal for a specific |channel|.
+ virtual int StopPlayingFileAsMicrophone(int channel) = 0;
+
+ // Returns whether the |channel| is currently playing a file as microphone.
+ virtual int IsPlayingFileAsMicrophone(int channel) = 0;
+
+ // Sets the volume scaling for a microphone file that is already playing.
+ virtual int ScaleFileAsMicrophonePlayout(int channel, float scale) = 0;
+
+ // Starts recording the mixed playout audio.
+ virtual int StartRecordingPlayout(int channel,
+ const char* fileNameUTF8,
+ CodecInst* compression = NULL,
+ int maxSizeBytes = -1) = 0;
+
+ // Stops recording the mixed playout audio.
+ virtual int StopRecordingPlayout(int channel) = 0;
+
+ virtual int StartRecordingPlayout(int channel,
+ OutStream* stream,
+ CodecInst* compression = NULL) = 0;
+
+ // Starts recording the microphone signal to a file.
+ virtual int StartRecordingMicrophone(const char* fileNameUTF8,
+ CodecInst* compression = NULL,
+ int maxSizeBytes = -1) = 0;
+
+ // Starts recording the microphone signal to a stream.
+ virtual int StartRecordingMicrophone(OutStream* stream,
+ CodecInst* compression = NULL) = 0;
+
+ // Stops recording the microphone signal.
+ virtual int StopRecordingMicrophone() = 0;
+
+
+ // Gets the duration of a file.
+ virtual int GetFileDuration(const char* fileNameUTF8, int& durationMs,
+ FileFormats format = kFileFormatPcm16kHzFile) = 0;
+
+ // Gets the current played position of a file on a specific |channel|.
+ virtual int GetPlaybackPosition(int channel, int& positionMs) = 0;
+
+ virtual int ConvertPCMToWAV(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8) = 0;
+
+ virtual int ConvertPCMToWAV(InStream* streamIn,
+ OutStream* streamOut) = 0;
+
+ virtual int ConvertWAVToPCM(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8) = 0;
+
+ virtual int ConvertWAVToPCM(InStream* streamIn,
+ OutStream* streamOut) = 0;
+
+ virtual int ConvertPCMToCompressed(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8,
+ CodecInst* compression) = 0;
+
+ virtual int ConvertPCMToCompressed(InStream* streamIn,
+ OutStream* streamOut,
+ CodecInst* compression) = 0;
+
+ virtual int ConvertCompressedToPCM(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8) = 0;
+
+ virtual int ConvertCompressedToPCM(InStream* streamIn,
+ OutStream* streamOut) = 0;
+
+protected:
+ VoEFile() {}
+ virtual ~VoEFile() {}
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_FILE_H
diff --git a/voice_engine/include/voe_hardware.h b/voice_engine/include/voe_hardware.h
new file mode 100644
index 0000000..24ed1ff
--- /dev/null
+++ b/voice_engine/include/voe_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - Audio device handling.
+// - Device information.
+// - CPU load monitoring.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// VoEHardware* hardware = VoEHardware::GetInterface(voe);
+// base->Init();
+// ...
+// int n_devices = hardware->GetNumOfPlayoutDevices();
+// ...
+// base->Terminate();
+// base->Release();
+// hardware->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_HARDWARE_H
+#define WEBRTC_VOICE_ENGINE_VOE_HARDWARE_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEHardware
+{
+public:
+ // Factory for the VoEHardware sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoEHardware* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoEHardware sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Gets the number of audio devices available for recording.
+ virtual int GetNumOfRecordingDevices(int& devices) = 0;
+
+ // Gets the number of audio devices available for playout.
+ virtual int GetNumOfPlayoutDevices(int& devices) = 0;
+
+ // Gets the name of a specific recording device given by an |index|.
+ // On Windows Vista/7, it also retrieves an additional unique ID
+ // (GUID) for the recording device.
+ virtual int GetRecordingDeviceName(int index, char strNameUTF8[128],
+ char strGuidUTF8[128]) = 0;
+
+ // Gets the name of a specific playout device given by an |index|.
+ // On Windows Vista/7, it also retrieves an additional unique ID
+ // (GUID) for the playout device.
+ virtual int GetPlayoutDeviceName(int index, char strNameUTF8[128],
+ char strGuidUTF8[128]) = 0;
+
+ // Checks if the sound card is available to be opened for recording.
+ virtual int GetRecordingDeviceStatus(bool& isAvailable) = 0;
+
+ // Checks if the sound card is available to be opened for playout.
+ virtual int GetPlayoutDeviceStatus(bool& isAvailable) = 0;
+
+ // Sets the audio device used for recording.
+ virtual int SetRecordingDevice(
+ int index, StereoChannel recordingChannel = kStereoBoth) = 0;
+
+ // Sets the audio device used for playout.
+ virtual int SetPlayoutDevice(int index) = 0;
+
+ // Sets the type of audio device layer to use.
+ virtual int SetAudioDeviceLayer(AudioLayers audioLayer) = 0;
+
+ // Gets the currently used (active) audio device layer.
+ virtual int GetAudioDeviceLayer(AudioLayers& audioLayer) = 0;
+
+ // Gets the VoiceEngine's current CPU consumption in terms of the percent
+ // of total CPU availability. [Windows only]
+ virtual int GetCPULoad(int& loadPercent) = 0;
+
+ // Gets the computer's current CPU consumption in terms of the percent
+ // of the total CPU availability. This method may fail a few times on
+ // Windows because it needs a certain warm-up time before reporting the
+ // result. You should check the return value and either try again or
+ // give up when it fails.
+ virtual int GetSystemCPULoad(int& loadPercent) = 0;
+
+ // Not supported
+ virtual int ResetAudioDevice() = 0;
+
+ // Not supported
+ virtual int AudioDeviceControl(
+ unsigned int par1, unsigned int par2, unsigned int par3) = 0;
+
+ // Not supported
+ virtual int SetLoudspeakerStatus(bool enable) = 0;
+
+ // Not supported
+ virtual int GetLoudspeakerStatus(bool& enabled) = 0;
+
+ // *Experimental - not recommended for use.*
+ // Enables the Windows Core Audio built-in AEC. Fails on other platforms.
+ //
+ // Currently incompatible with the standard VoE AEC and AGC; don't attempt
+ // to enable them while this is active.
+ //
+ // Must be called before VoEBase::StartSend(). When enabled:
+ // 1. VoEBase::StartPlayout() must be called before VoEBase::StartSend().
+ // 2. VoEBase::StopSend() should be called before VoEBase::StopPlayout().
+ // The reverse order may cause garbage audio to be rendered or the
+ // capture side to halt until StopSend() is called.
+ //
+ // As a consequence, SetPlayoutDevice() should be used with caution
+ // during a call. It will function, but may cause the above issues for
+ // the duration it takes to complete. (In practice, it should complete
+ // fast enough to avoid audible degradation).
+ virtual int EnableBuiltInAEC(bool enable) = 0;
+ virtual bool BuiltInAECIsEnabled() const = 0;
+
+protected:
+ VoEHardware() {}
+ virtual ~VoEHardware() {}
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_HARDWARE_H
diff --git a/voice_engine/include/voe_neteq_stats.h b/voice_engine/include/voe_neteq_stats.h
new file mode 100644
index 0000000..4940bed
--- /dev/null
+++ b/voice_engine/include/voe_neteq_stats.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H
+#define WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoENetEqStats
+{
+public:
+ // Factory for the VoENetEqStats sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoENetEqStats* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoENetEqStats sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Get the "in-call" statistics from NetEQ.
+ // The statistics are reset after the query.
+ virtual int GetNetworkStatistics(int channel, NetworkStatistics& stats) = 0;
+
+protected:
+ VoENetEqStats() {}
+ virtual ~VoENetEqStats() {}
+};
+
+} // namespace webrtc
+
+#endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_H
diff --git a/voice_engine/include/voe_network.h b/voice_engine/include/voe_network.h
new file mode 100644
index 0000000..10acf1c
--- /dev/null
+++ b/voice_engine/include/voe_network.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - External protocol support.
+// - Extended port and address APIs.
+// - Port and address filters.
+// - Windows GQoS functions.
+// - Packet timeout notification.
+// - Dead-or-Alive connection observations.
+// - Transmission of raw RTP/RTCP packets into existing channels.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// VoENetwork* netw = VoENetwork::GetInterface(voe);
+// base->Init();
+// int ch = base->CreateChannel();
+// ...
+// netw->SetPeriodicDeadOrAliveStatus(ch, true);
+// ...
+// base->DeleteChannel(ch);
+// base->Terminate();
+// base->Release();
+// netw->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_NETWORK_H
+#define WEBRTC_VOICE_ENGINE_VOE_NETWORK_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+// VoEConnectionObserver
+class WEBRTC_DLLEXPORT VoEConnectionObserver
+{
+public:
+ // This method will be called peridically and deliver dead-or-alive
+ // notifications for a specified |channel| when the observer interface
+ // has been installed and activated.
+ virtual void OnPeriodicDeadOrAlive(const int channel, const bool alive) = 0;
+
+protected:
+ virtual ~VoEConnectionObserver() {}
+};
+
+// VoENetwork
+class WEBRTC_DLLEXPORT VoENetwork
+{
+public:
+ // Factory for the VoENetwork sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoENetwork* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoENetwork sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Installs and enables a user-defined external transport protocol for a
+ // specified |channel|.
+ virtual int RegisterExternalTransport(
+ int channel, Transport& transport) = 0;
+
+ // Removes and disables a user-defined external transport protocol for a
+ // specified |channel|.
+ virtual int DeRegisterExternalTransport(int channel) = 0;
+
+ // The packets received from the network should be passed to this
+ // function when external transport is enabled. Note that the data
+ // including the RTP-header must also be given to the VoiceEngine.
+ virtual int ReceivedRTPPacket(
+ int channel, const void* data, unsigned int length) = 0;
+
+ // The packets received from the network should be passed to this
+ // function when external transport is enabled. Note that the data
+ // including the RTCP-header must also be given to the VoiceEngine.
+ virtual int ReceivedRTCPPacket(
+ int channel, const void* data, unsigned int length) = 0;
+
+ // Gets the source ports and IP address of incoming packets on a
+ // specific |channel|.
+ virtual int GetSourceInfo(
+ int channel, int& rtpPort, int& rtcpPort, char ipAddr[64]) = 0;
+
+ // Gets the local (host) IP address.
+ virtual int GetLocalIP(char ipAddr[64], bool ipv6 = false) = 0;
+
+ // Enables IPv6 for a specified |channel|.
+ virtual int EnableIPv6(int channel) = 0;
+
+ // Gets the current IPv6 staus for a specified |channel|.
+ virtual bool IPv6IsEnabled(int channel) = 0;
+
+ // Enables a port and IP address filter for incoming packets on a
+ // specific |channel|.
+ virtual int SetSourceFilter(int channel,
+ int rtpPort, int rtcpPort = 0, const char ipAddr[64] = 0) = 0;
+
+ // Gets the current port and IP-address filter for a specified |channel|.
+ virtual int GetSourceFilter(
+ int channel, int& rtpPort, int& rtcpPort, char ipAddr[64]) = 0;
+
+ // Sets the six-bit Differentiated Services Code Point (DSCP) in the
+ // IP header of the outgoing stream for a specific |channel|.
+ virtual int SetSendTOS(int channel,
+ int DSCP, int priority = -1, bool useSetSockopt = false) = 0;
+
+ // Gets the six-bit DSCP in the IP header of the outgoing stream for
+ // a specific channel.
+ virtual int GetSendTOS(
+ int channel, int& DSCP, int& priority, bool& useSetSockopt) = 0;
+
+ // Sets the Generic Quality of Service (GQoS) service level.
+ // The Windows operating system then maps to a Differentiated Services
+ // Code Point (DSCP) and to an 802.1p setting. [Windows only]
+ virtual int SetSendGQoS(
+ int channel, bool enable, int serviceType, int overrideDSCP = 0) = 0;
+
+ // Gets the Generic Quality of Service (GQoS) service level.
+ virtual int GetSendGQoS(
+ int channel, bool& enabled, int& serviceType, int& overrideDSCP) = 0;
+
+ // Enables or disables warnings that report if packets have not been
+ // received in |timeoutSeconds| seconds for a specific |channel|.
+ virtual int SetPacketTimeoutNotification(
+ int channel, bool enable, int timeoutSeconds = 2) = 0;
+
+ // Gets the current time-out notification status.
+ virtual int GetPacketTimeoutNotification(
+ int channel, bool& enabled, int& timeoutSeconds) = 0;
+
+ // Installs the observer class implementation for a specified |channel|.
+ virtual int RegisterDeadOrAliveObserver(
+ int channel, VoEConnectionObserver& observer) = 0;
+
+ // Removes the observer class implementation for a specified |channel|.
+ virtual int DeRegisterDeadOrAliveObserver(int channel) = 0;
+
+ // Enables or disables the periodic dead-or-alive callback functionality
+ // for a specified |channel|.
+ virtual int SetPeriodicDeadOrAliveStatus(
+ int channel, bool enable, int sampleTimeSeconds = 2) = 0;
+
+ // Gets the current dead-or-alive notification status.
+ virtual int GetPeriodicDeadOrAliveStatus(
+ int channel, bool& enabled, int& sampleTimeSeconds) = 0;
+
+ // Handles sending a raw UDP data packet over an existing RTP or RTCP
+ // socket.
+ virtual int SendUDPPacket(
+ int channel, const void* data, unsigned int length,
+ int& transmittedBytes, bool useRtcpSocket = false) = 0;
+
+protected:
+ VoENetwork() {}
+ virtual ~VoENetwork() {}
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_NETWORK_H
diff --git a/voice_engine/include/voe_rtp_rtcp.h b/voice_engine/include/voe_rtp_rtcp.h
new file mode 100644
index 0000000..fd0ca0b
--- /dev/null
+++ b/voice_engine/include/voe_rtp_rtcp.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - Callbacks for RTP and RTCP events such as modified SSRC or CSRC.
+// - SSRC handling.
+// - Transmission of RTCP sender reports.
+// - Obtaining RTCP data from incoming RTCP sender reports.
+// - RTP and RTCP statistics (jitter, packet loss, RTT etc.).
+// - Forward Error Correction (FEC).
+// - Writing RTP and RTCP packets to binary files for off-line analysis of
+// the call quality.
+// - Inserting extra RTP packets into active audio stream.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// VoERTP_RTCP* rtp_rtcp = VoERTP_RTCP::GetInterface(voe);
+// base->Init();
+// int ch = base->CreateChannel();
+// ...
+// rtp_rtcp->SetLocalSSRC(ch, 12345);
+// ...
+// base->DeleteChannel(ch);
+// base->Terminate();
+// base->Release();
+// rtp_rtcp->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_H
+#define WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_H
+
+#include <vector>
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+// VoERTPObserver
+class WEBRTC_DLLEXPORT VoERTPObserver
+{
+public:
+ virtual void OnIncomingCSRCChanged(
+ const int channel, const unsigned int CSRC, const bool added) = 0;
+
+ virtual void OnIncomingSSRCChanged(
+ const int channel, const unsigned int SSRC) = 0;
+
+protected:
+ virtual ~VoERTPObserver() {}
+};
+
+// VoERTCPObserver
+class WEBRTC_DLLEXPORT VoERTCPObserver
+{
+public:
+ virtual void OnApplicationDataReceived(
+ const int channel, const unsigned char subType,
+ const unsigned int name, const unsigned char* data,
+ const unsigned short dataLengthInBytes) = 0;
+
+protected:
+ virtual ~VoERTCPObserver() {}
+};
+
+// CallStatistics
+struct CallStatistics
+{
+ unsigned short fractionLost;
+ unsigned int cumulativeLost;
+ unsigned int extendedMax;
+ unsigned int jitterSamples;
+ int rttMs;
+ int bytesSent;
+ int packetsSent;
+ int bytesReceived;
+ int packetsReceived;
+};
+
+// See section 6.4.1 in http://www.ietf.org/rfc/rfc3550.txt for details.
+struct SenderInfo {
+ uint32_t NTP_timestamp_high;
+ uint32_t NTP_timestamp_low;
+ uint32_t RTP_timestamp;
+ uint32_t sender_packet_count;
+ uint32_t sender_octet_count;
+};
+
+// See section 6.4.2 in http://www.ietf.org/rfc/rfc3550.txt for details.
+struct ReportBlock {
+ uint32_t sender_SSRC; // SSRC of sender
+ uint32_t source_SSRC;
+ uint8_t fraction_lost;
+ uint32_t cumulative_num_packets_lost;
+ uint32_t extended_highest_sequence_number;
+ uint32_t interarrival_jitter;
+ uint32_t last_SR_timestamp;
+ uint32_t delay_since_last_SR;
+};
+
+// VoERTP_RTCP
+class WEBRTC_DLLEXPORT VoERTP_RTCP
+{
+public:
+
+ // Factory for the VoERTP_RTCP sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoERTP_RTCP* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoERTP_RTCP sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Registers an instance of a VoERTPObserver derived class for a specified
+ // |channel|. It will allow the user to observe callbacks related to the
+ // RTP protocol such as changes in the incoming SSRC.
+ virtual int RegisterRTPObserver(int channel, VoERTPObserver& observer) = 0;
+
+ // Deregisters an instance of a VoERTPObserver derived class for a
+ // specified |channel|.
+ virtual int DeRegisterRTPObserver(int channel) = 0;
+
+ // Registers an instance of a VoERTCPObserver derived class for a specified
+ // |channel|.
+ virtual int RegisterRTCPObserver(
+ int channel, VoERTCPObserver& observer) = 0;
+
+ // Deregisters an instance of a VoERTCPObserver derived class for a
+ // specified |channel|.
+ virtual int DeRegisterRTCPObserver(int channel) = 0;
+
+ // Sets the local RTP synchronization source identifier (SSRC) explicitly.
+ virtual int SetLocalSSRC(int channel, unsigned int ssrc) = 0;
+
+ // Gets the local RTP SSRC of a specified |channel|.
+ virtual int GetLocalSSRC(int channel, unsigned int& ssrc) = 0;
+
+ // Gets the SSRC of the incoming RTP packets.
+ virtual int GetRemoteSSRC(int channel, unsigned int& ssrc) = 0;
+
+ // Sets the status of rtp-audio-level-indication on a specific |channel|.
+ virtual int SetRTPAudioLevelIndicationStatus(
+ int channel, bool enable, unsigned char ID = 1) = 0;
+
+ // Sets the status of rtp-audio-level-indication on a specific |channel|.
+ virtual int GetRTPAudioLevelIndicationStatus(
+ int channel, bool& enabled, unsigned char& ID) = 0;
+
+ // Gets the CSRCs of the incoming RTP packets.
+ virtual int GetRemoteCSRCs(int channel, unsigned int arrCSRC[15]) = 0;
+
+ // Sets the RTCP status on a specific |channel|.
+ virtual int SetRTCPStatus(int channel, bool enable) = 0;
+
+ // Gets the RTCP status on a specific |channel|.
+ virtual int GetRTCPStatus(int channel, bool& enabled) = 0;
+
+ // Sets the canonical name (CNAME) parameter for RTCP reports on a
+ // specific |channel|.
+ virtual int SetRTCP_CNAME(int channel, const char cName[256]) = 0;
+
+ // Gets the canonical name (CNAME) parameter for RTCP reports on a
+ // specific |channel|.
+ virtual int GetRTCP_CNAME(int channel, char cName[256]) = 0;
+
+ // Gets the canonical name (CNAME) parameter for incoming RTCP reports
+ // on a specific channel.
+ virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]) = 0;
+
+ // Gets RTCP data from incoming RTCP Sender Reports.
+ virtual int GetRemoteRTCPData(
+ int channel, unsigned int& NTPHigh, unsigned int& NTPLow,
+ unsigned int& timestamp, unsigned int& playoutTimestamp,
+ unsigned int* jitter = NULL, unsigned short* fractionLost = NULL) = 0;
+
+ // Gets RTP statistics for a specific |channel|.
+ virtual int GetRTPStatistics(
+ int channel, unsigned int& averageJitterMs, unsigned int& maxJitterMs,
+ unsigned int& discardedPackets) = 0;
+
+ // Gets RTCP statistics for a specific |channel|.
+ virtual int GetRTCPStatistics(int channel, CallStatistics& stats) = 0;
+
+ // Gets the sender info part of the last received RTCP Sender Report (SR)
+ // on a specified |channel|.
+ virtual int GetRemoteRTCPSenderInfo(
+ int channel, SenderInfo* sender_info) = 0;
+
+ // Gets the report block parts of the last received RTCP Sender Report (SR),
+ // or RTCP Receiver Report (RR) on a specified |channel|. Each vector
+ // element also contains the SSRC of the sender in addition to a report
+ // block.
+ virtual int GetRemoteRTCPReportBlocks(
+ int channel, std::vector<ReportBlock>* receive_blocks) = 0;
+
+ // Sends an RTCP APP packet on a specific |channel|.
+ virtual int SendApplicationDefinedRTCPPacket(
+ int channel, const unsigned char subType, unsigned int name,
+ const char* data, unsigned short dataLengthInBytes) = 0;
+
+ // Sets the Forward Error Correction (FEC) status on a specific |channel|.
+ virtual int SetFECStatus(
+ int channel, bool enable, int redPayloadtype = -1) = 0;
+
+ // Gets the FEC status on a specific |channel|.
+ virtual int GetFECStatus(
+ int channel, bool& enabled, int& redPayloadtype) = 0;
+
+ // Enables capturing of RTP packets to a binary file on a specific
+ // |channel| and for a given |direction|. The file can later be replayed
+ // using e.g. RTP Tools rtpplay since the binary file format is
+ // compatible with the rtpdump format.
+ virtual int StartRTPDump(
+ int channel, const char fileNameUTF8[1024],
+ RTPDirections direction = kRtpIncoming) = 0;
+
+ // Disables capturing of RTP packets to a binary file on a specific
+ // |channel| and for a given |direction|.
+ virtual int StopRTPDump(
+ int channel, RTPDirections direction = kRtpIncoming) = 0;
+
+ // Gets the the current RTP capturing state for the specified
+ // |channel| and |direction|.
+ virtual int RTPDumpIsActive(
+ int channel, RTPDirections direction = kRtpIncoming) = 0;
+
+ // Sends an extra RTP packet using an existing/active RTP session.
+ // It is possible to set the payload type, marker bit and payload
+ // of the extra RTP
+ virtual int InsertExtraRTPPacket(
+ int channel, unsigned char payloadType, bool markerBit,
+ const char* payloadData, unsigned short payloadSize) = 0;
+
+protected:
+ VoERTP_RTCP() {}
+ virtual ~VoERTP_RTCP() {}
+};
+
+} // namespace webrtc
+
+#endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_H
diff --git a/voice_engine/include/voe_video_sync.h b/voice_engine/include/voe_video_sync.h
new file mode 100644
index 0000000..ac3b84a
--- /dev/null
+++ b/voice_engine/include/voe_video_sync.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - RTP header modification (time stamp and sequence number fields).
+// - Playout delay tuning to synchronize the voice with video.
+// - Playout delay monitoring.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// VoEVideoSync* vsync = VoEVideoSync::GetInterface(voe);
+// base->Init();
+// ...
+// int buffer_ms(0);
+// vsync->GetPlayoutBufferSize(buffer_ms);
+// ...
+// base->Terminate();
+// base->Release();
+// vsync->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_H
+#define WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class RtpRtcp;
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEVideoSync
+{
+public:
+ // Factory for the VoEVideoSync sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoEVideoSync* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoEVideoSync sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Gets the current sound card buffer size (playout delay).
+ virtual int GetPlayoutBufferSize(int& bufferMs) = 0;
+
+ // Sets an additional delay for the playout jitter buffer.
+ virtual int SetMinimumPlayoutDelay(int channel, int delayMs) = 0;
+
+ // Gets the sum of the algorithmic delay, jitter buffer delay, and the
+ // playout buffer delay for a specified |channel|.
+ virtual int GetDelayEstimate(int channel, int& delayMs) = 0;
+
+ // Manual initialization of the RTP timestamp.
+ virtual int SetInitTimestamp(int channel, unsigned int timestamp) = 0;
+
+ // Manual initialization of the RTP sequence number.
+ virtual int SetInitSequenceNumber(int channel, short sequenceNumber) = 0;
+
+ // Get the received RTP timestamp
+ virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp) = 0;
+
+ virtual int GetRtpRtcp (int channel, RtpRtcp* &rtpRtcpModule) = 0;
+
+protected:
+ VoEVideoSync() { }
+ virtual ~VoEVideoSync() { }
+};
+
+} // namespace webrtc
+
+#endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_H
diff --git a/voice_engine/include/voe_volume_control.h b/voice_engine/include/voe_volume_control.h
new file mode 100644
index 0000000..6d64e96
--- /dev/null
+++ b/voice_engine/include/voe_volume_control.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// This sub-API supports the following functionalities:
+//
+// - Speaker volume controls.
+// - Microphone volume control.
+// - Non-linear speech level control.
+// - Mute functions.
+// - Additional stereo scaling methods.
+//
+// Usage example, omitting error checking:
+//
+// using namespace webrtc;
+// VoiceEngine* voe = VoiceEngine::Create();
+// VoEBase* base = VoEBase::GetInterface(voe);
+// VoEVolumeControl* volume = VoEVolumeControl::GetInterface(voe);
+// base->Init();
+// int ch = base->CreateChannel();
+// ...
+// volume->SetInputMute(ch, true);
+// ...
+// base->DeleteChannel(ch);
+// base->Terminate();
+// base->Release();
+// volume->Release();
+// VoiceEngine::Delete(voe);
+//
+#ifndef WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_H
+#define WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_H
+
+#include "common_types.h"
+
+namespace webrtc {
+
+class VoiceEngine;
+
+class WEBRTC_DLLEXPORT VoEVolumeControl
+{
+public:
+ // Factory for the VoEVolumeControl sub-API. Increases an internal
+ // reference counter if successful. Returns NULL if the API is not
+ // supported or if construction fails.
+ static VoEVolumeControl* GetInterface(VoiceEngine* voiceEngine);
+
+ // Releases the VoEVolumeControl sub-API and decreases an internal
+ // reference counter. Returns the new reference count. This value should
+ // be zero for all sub-API:s before the VoiceEngine object can be safely
+ // deleted.
+ virtual int Release() = 0;
+
+ // Sets the speaker |volume| level. Valid range is [0,255].
+ virtual int SetSpeakerVolume(unsigned int volume) = 0;
+
+ // Gets the speaker |volume| level.
+ virtual int GetSpeakerVolume(unsigned int& volume) = 0;
+
+ // Mutes the speaker device completely in the operating system.
+ virtual int SetSystemOutputMute(bool enable) = 0;
+
+ // Gets the output device mute state in the operating system.
+ virtual int GetSystemOutputMute(bool &enabled) = 0;
+
+ // Sets the microphone volume level. Valid range is [0,255].
+ virtual int SetMicVolume(unsigned int volume) = 0;
+
+ // Gets the microphone volume level.
+ virtual int GetMicVolume(unsigned int& volume) = 0;
+
+ // Mutes the microphone input signal completely without affecting
+ // the audio device volume.
+ virtual int SetInputMute(int channel, bool enable) = 0;
+
+ // Gets the current microphone input mute state.
+ virtual int GetInputMute(int channel, bool& enabled) = 0;
+
+ // Mutes the microphone device completely in the operating system.
+ virtual int SetSystemInputMute(bool enable) = 0;
+
+ // Gets the mute state of the input device in the operating system.
+ virtual int GetSystemInputMute(bool& enabled) = 0;
+
+ // Gets the microphone speech |level|, mapped non-linearly to the range
+ // [0,9].
+ virtual int GetSpeechInputLevel(unsigned int& level) = 0;
+
+ // Gets the speaker speech |level|, mapped non-linearly to the range
+ // [0,9].
+ virtual int GetSpeechOutputLevel(int channel, unsigned int& level) = 0;
+
+ // Gets the microphone speech |level|, mapped linearly to the range
+ // [0,32768].
+ virtual int GetSpeechInputLevelFullRange(unsigned int& level) = 0;
+
+ // Gets the speaker speech |level|, mapped linearly to the range [0,32768].
+ virtual int GetSpeechOutputLevelFullRange(
+ int channel, unsigned int& level) = 0;
+
+ // Sets a volume |scaling| applied to the outgoing signal of a specific
+ // channel. Valid scale range is [0.0, 10.0].
+ virtual int SetChannelOutputVolumeScaling(int channel, float scaling) = 0;
+
+ // Gets the current volume scaling for a specified |channel|.
+ virtual int GetChannelOutputVolumeScaling(int channel, float& scaling) = 0;
+
+ // Scales volume of the |left| and |right| channels independently.
+ // Valid scale range is [0.0, 1.0].
+ virtual int SetOutputVolumePan(int channel, float left, float right) = 0;
+
+ // Gets the current left and right scaling factors.
+ virtual int GetOutputVolumePan(int channel, float& left, float& right) = 0;
+
+protected:
+ VoEVolumeControl() {};
+ virtual ~VoEVolumeControl() {};
+};
+
+} // namespace webrtc
+
+#endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_H
diff --git a/voice_engine/level_indicator.cc b/voice_engine/level_indicator.cc
new file mode 100644
index 0000000..1b5cba5
--- /dev/null
+++ b/voice_engine/level_indicator.cc
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "level_indicator.h"
+#include "module_common_types.h"
+#include "signal_processing_library.h"
+
+namespace webrtc {
+
+namespace voe {
+
+
+// Number of bars on the indicator.
+// Note that the number of elements is specified because we are indexing it
+// in the range of 0-32
+const WebRtc_Word8 permutation[33] =
+ {0,1,2,3,4,4,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,9,9,9,9,9,9,9,9,9};
+
+
+AudioLevel::AudioLevel() :
+ _absMax(0),
+ _count(0),
+ _currentLevel(0),
+ _currentLevelFullRange(0)
+{
+}
+
+AudioLevel::~AudioLevel()
+{
+}
+
+void
+AudioLevel::Clear()
+{
+ _absMax = 0;
+ _count = 0;
+ _currentLevel = 0;
+ _currentLevelFullRange = 0;
+}
+
+void
+AudioLevel::ComputeLevel(const AudioFrame& audioFrame)
+{
+ WebRtc_Word16 absValue(0);
+
+ // Check speech level (works for 2 channels as well)
+ absValue = WebRtcSpl_MaxAbsValueW16(
+ audioFrame.data_,
+ audioFrame.samples_per_channel_*audioFrame.num_channels_);
+ if (absValue > _absMax)
+ _absMax = absValue;
+
+ // Update level approximately 10 times per second
+ if (_count++ == kUpdateFrequency)
+ {
+ _currentLevelFullRange = _absMax;
+
+ _count = 0;
+
+ // Highest value for a WebRtc_Word16 is 0x7fff = 32767
+ // Divide with 1000 to get in the range of 0-32 which is the range of
+ // the permutation vector
+ WebRtc_Word32 position = _absMax/1000;
+
+ // Make it less likely that the bar stays at position 0. I.e. only if
+ // its in the range 0-250 (instead of 0-1000)
+ if ((position == 0) && (_absMax > 250))
+ {
+ position = 1;
+ }
+ _currentLevel = permutation[position];
+
+ // Decay the absolute maximum (divide by 4)
+ _absMax >>= 2;
+ }
+}
+
+WebRtc_Word8
+AudioLevel::Level() const
+{
+ return _currentLevel;
+}
+
+WebRtc_Word16
+AudioLevel::LevelFullRange() const
+{
+ return _currentLevelFullRange;
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/level_indicator.h b/voice_engine/level_indicator.h
new file mode 100644
index 0000000..564b068
--- /dev/null
+++ b/voice_engine/level_indicator.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H
+#define WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H
+
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+
+namespace webrtc {
+
+class AudioFrame;
+namespace voe {
+
+class AudioLevel
+{
+public:
+ AudioLevel();
+ virtual ~AudioLevel();
+
+ void ComputeLevel(const AudioFrame& audioFrame);
+
+ WebRtc_Word8 Level() const;
+
+ WebRtc_Word16 LevelFullRange() const;
+
+ void Clear();
+
+private:
+ enum { kUpdateFrequency = 10};
+
+ WebRtc_Word16 _absMax;
+ WebRtc_Word16 _count;
+ WebRtc_Word8 _currentLevel;
+ WebRtc_Word16 _currentLevelFullRange;
+};
+
+} // namespace voe
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_LEVEL_INDICATOR_H
diff --git a/voice_engine/monitor_module.cc b/voice_engine/monitor_module.cc
new file mode 100644
index 0000000..07b17fb
--- /dev/null
+++ b/voice_engine/monitor_module.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "critical_section_wrapper.h"
+#include "monitor_module.h"
+
+namespace webrtc {
+
+namespace voe {
+
+MonitorModule::MonitorModule() :
+ _observerPtr(NULL),
+ _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _lastProcessTime(GET_TIME_IN_MS())
+{
+}
+
+MonitorModule::~MonitorModule()
+{
+ delete &_callbackCritSect;
+}
+
+WebRtc_Word32
+MonitorModule::RegisterObserver(MonitorObserver& observer)
+{
+ CriticalSectionScoped lock(&_callbackCritSect);
+ if (_observerPtr)
+ {
+ return -1;
+ }
+ _observerPtr = &observer;
+ return 0;
+}
+
+WebRtc_Word32
+MonitorModule::DeRegisterObserver()
+{
+ CriticalSectionScoped lock(&_callbackCritSect);
+ if (!_observerPtr)
+ {
+ return 0;
+ }
+ _observerPtr = NULL;
+ return 0;
+}
+
+WebRtc_Word32
+MonitorModule::Version(char* version,
+ WebRtc_UWord32& remainingBufferInBytes,
+ WebRtc_UWord32& position) const
+{
+ return 0;
+}
+
+WebRtc_Word32
+MonitorModule::ChangeUniqueId(const WebRtc_Word32 id)
+{
+ return 0;
+}
+
+WebRtc_Word32
+MonitorModule::TimeUntilNextProcess()
+{
+ WebRtc_UWord32 now = GET_TIME_IN_MS();
+ WebRtc_Word32 timeToNext =
+ kAverageProcessUpdateTimeMs - (now - _lastProcessTime);
+ return (timeToNext);
+}
+
+WebRtc_Word32
+MonitorModule::Process()
+{
+ _lastProcessTime = GET_TIME_IN_MS();
+ if (_observerPtr)
+ {
+ CriticalSectionScoped lock(&_callbackCritSect);
+ _observerPtr->OnPeriodicProcess();
+ }
+ return 0;
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/monitor_module.h b/voice_engine/monitor_module.h
new file mode 100644
index 0000000..7612c3c
--- /dev/null
+++ b/voice_engine/monitor_module.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_MONITOR_MODULE_H
+#define WEBRTC_VOICE_ENGINE_MONITOR_MODULE_H
+
+#include "module.h"
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+
+class MonitorObserver
+{
+public:
+ virtual void OnPeriodicProcess() = 0;
+protected:
+ virtual ~MonitorObserver() {}
+};
+
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+namespace voe {
+
+class MonitorModule : public Module
+{
+public:
+ WebRtc_Word32 RegisterObserver(MonitorObserver& observer);
+
+ WebRtc_Word32 DeRegisterObserver();
+
+ MonitorModule();
+
+ virtual ~MonitorModule();
+public: // module
+ WebRtc_Word32 Version(char* version,
+ WebRtc_UWord32& remainingBufferInBytes,
+ WebRtc_UWord32& position) const;
+
+ WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
+
+ WebRtc_Word32 TimeUntilNextProcess();
+
+ WebRtc_Word32 Process();
+private:
+ enum { kAverageProcessUpdateTimeMs = 1000 };
+ MonitorObserver* _observerPtr;
+ CriticalSectionWrapper& _callbackCritSect;
+ WebRtc_Word32 _lastProcessTime;
+};
+
+} // namespace voe
+
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_MONITOR_MODULE
diff --git a/voice_engine/output_mixer.cc b/voice_engine/output_mixer.cc
new file mode 100644
index 0000000..daf0d4a
--- /dev/null
+++ b/voice_engine/output_mixer.cc
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "output_mixer.h"
+
+#include "audio_processing.h"
+#include "audio_frame_operations.h"
+#include "critical_section_wrapper.h"
+#include "file_wrapper.h"
+#include "output_mixer_internal.h"
+#include "statistics.h"
+#include "trace.h"
+#include "voe_external_media.h"
+
+namespace webrtc {
+
+namespace voe {
+
+void
+OutputMixer::NewMixedAudio(const WebRtc_Word32 id,
+ const AudioFrame& generalAudioFrame,
+ const AudioFrame** uniqueAudioFrames,
+ const WebRtc_UWord32 size)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size);
+
+ _audioFrame = generalAudioFrame;
+ _audioFrame.id_ = id;
+}
+
+void OutputMixer::MixedParticipants(
+ const WebRtc_Word32 id,
+ const ParticipantStatistics* participantStatistics,
+ const WebRtc_UWord32 size)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::MixedParticipants(id=%d, size=%u)", id, size);
+}
+
+void OutputMixer::VADPositiveParticipants(
+ const WebRtc_Word32 id,
+ const ParticipantStatistics* participantStatistics,
+ const WebRtc_UWord32 size)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::VADPositiveParticipants(id=%d, size=%u)",
+ id, size);
+}
+
+void OutputMixer::MixedAudioLevel(const WebRtc_Word32 id,
+ const WebRtc_UWord32 level)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::MixedAudioLevel(id=%d, level=%u)", id, level);
+}
+
+void OutputMixer::PlayNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::PlayNotification(id=%d, durationMs=%d)",
+ id, durationMs);
+ // Not implement yet
+}
+
+void OutputMixer::RecordNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::RecordNotification(id=%d, durationMs=%d)",
+ id, durationMs);
+
+ // Not implement yet
+}
+
+void OutputMixer::PlayFileEnded(const WebRtc_Word32 id)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::PlayFileEnded(id=%d)", id);
+
+ // not needed
+}
+
+void OutputMixer::RecordFileEnded(const WebRtc_Word32 id)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::RecordFileEnded(id=%d)", id);
+ assert(id == _instanceId);
+
+ CriticalSectionScoped cs(&_fileCritSect);
+ _outputFileRecording = false;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::RecordFileEnded() =>"
+ "output file recorder module is shutdown");
+}
+
+WebRtc_Word32
+OutputMixer::Create(OutputMixer*& mixer, const WebRtc_UWord32 instanceId)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
+ "OutputMixer::Create(instanceId=%d)", instanceId);
+ mixer = new OutputMixer(instanceId);
+ if (mixer == NULL)
+ {
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
+ "OutputMixer::Create() unable to allocate memory for"
+ "mixer");
+ return -1;
+ }
+ return 0;
+}
+
+OutputMixer::OutputMixer(const WebRtc_UWord32 instanceId) :
+ _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _mixerModule(*AudioConferenceMixer::Create(instanceId)),
+ _audioLevel(),
+ _dtmfGenerator(instanceId),
+ _instanceId(instanceId),
+ _externalMediaCallbackPtr(NULL),
+ _externalMedia(false),
+ _panLeft(1.0f),
+ _panRight(1.0f),
+ _mixingFrequencyHz(8000),
+ _outputFileRecorderPtr(NULL),
+ _outputFileRecording(false)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::OutputMixer() - ctor");
+
+ if ((_mixerModule.RegisterMixedStreamCallback(*this) == -1) ||
+ (_mixerModule.RegisterMixerStatusCallback(*this, 100) == -1))
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::OutputMixer() failed to register mixer"
+ "callbacks");
+ }
+
+ _dtmfGenerator.Init();
+}
+
+void
+OutputMixer::Destroy(OutputMixer*& mixer)
+{
+ if (mixer)
+ {
+ delete mixer;
+ mixer = NULL;
+ }
+}
+
+OutputMixer::~OutputMixer()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::~OutputMixer() - dtor");
+ if (_externalMedia)
+ {
+ DeRegisterExternalMediaProcessing();
+ }
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+ if (_outputFileRecorderPtr)
+ {
+ _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+ _outputFileRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ }
+ }
+ _mixerModule.UnRegisterMixerStatusCallback();
+ _mixerModule.UnRegisterMixedStreamCallback();
+ delete &_mixerModule;
+ delete &_callbackCritSect;
+ delete &_fileCritSect;
+}
+
+WebRtc_Word32
+OutputMixer::SetEngineInformation(voe::Statistics& engineStatistics)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::SetEngineInformation()");
+ _engineStatisticsPtr = &engineStatistics;
+ return 0;
+}
+
+WebRtc_Word32
+OutputMixer::SetAudioProcessingModule(
+ AudioProcessing* audioProcessingModule)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::SetAudioProcessingModule("
+ "audioProcessingModule=0x%x)", audioProcessingModule);
+ _audioProcessingModulePtr = audioProcessingModule;
+ return 0;
+}
+
+int OutputMixer::RegisterExternalMediaProcessing(
+ VoEMediaProcess& proccess_object)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::RegisterExternalMediaProcessing()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+ _externalMediaCallbackPtr = &proccess_object;
+ _externalMedia = true;
+
+ return 0;
+}
+
+int OutputMixer::DeRegisterExternalMediaProcessing()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::DeRegisterExternalMediaProcessing()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+ _externalMedia = false;
+ _externalMediaCallbackPtr = NULL;
+
+ return 0;
+}
+
+int OutputMixer::PlayDtmfTone(WebRtc_UWord8 eventCode, int lengthMs,
+ int attenuationDb)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "OutputMixer::PlayDtmfTone()");
+ if (_dtmfGenerator.AddTone(eventCode, lengthMs, attenuationDb) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(VE_STILL_PLAYING_PREV_DTMF,
+ kTraceError,
+ "OutputMixer::PlayDtmfTone()");
+ return -1;
+ }
+ return 0;
+}
+
+int OutputMixer::StartPlayingDtmfTone(WebRtc_UWord8 eventCode,
+ int attenuationDb)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "OutputMixer::StartPlayingDtmfTone()");
+ if (_dtmfGenerator.StartTone(eventCode, attenuationDb) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_STILL_PLAYING_PREV_DTMF,
+ kTraceError,
+ "OutputMixer::StartPlayingDtmfTone())");
+ return -1;
+ }
+ return 0;
+}
+
+int OutputMixer::StopPlayingDtmfTone()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "OutputMixer::StopPlayingDtmfTone()");
+ return (_dtmfGenerator.StopTone());
+}
+
+WebRtc_Word32
+OutputMixer::SetMixabilityStatus(MixerParticipant& participant,
+ const bool mixable)
+{
+ return _mixerModule.SetMixabilityStatus(participant, mixable);
+}
+
+WebRtc_Word32
+OutputMixer::SetAnonymousMixabilityStatus(MixerParticipant& participant,
+ const bool mixable)
+{
+ return _mixerModule.SetAnonymousMixabilityStatus(participant,mixable);
+}
+
+WebRtc_Word32
+OutputMixer::MixActiveChannels()
+{
+ return _mixerModule.Process();
+}
+
+int
+OutputMixer::GetSpeechOutputLevel(WebRtc_UWord32& level)
+{
+ WebRtc_Word8 currentLevel = _audioLevel.Level();
+ level = static_cast<WebRtc_UWord32> (currentLevel);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "GetSpeechOutputLevel() => level=%u", level);
+ return 0;
+}
+
+int
+OutputMixer::GetSpeechOutputLevelFullRange(WebRtc_UWord32& level)
+{
+ WebRtc_Word16 currentLevel = _audioLevel.LevelFullRange();
+ level = static_cast<WebRtc_UWord32> (currentLevel);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "GetSpeechOutputLevelFullRange() => level=%u", level);
+ return 0;
+}
+
+int
+OutputMixer::SetOutputVolumePan(float left, float right)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::SetOutputVolumePan()");
+ _panLeft = left;
+ _panRight = right;
+ return 0;
+}
+
+int
+OutputMixer::GetOutputVolumePan(float& left, float& right)
+{
+ left = _panLeft;
+ right = _panRight;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "GetOutputVolumePan() => left=%2.1f, right=%2.1f",
+ left, right);
+ return 0;
+}
+
+int OutputMixer::StartRecordingPlayout(const char* fileName,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::StartRecordingPlayout(fileName=%s)", fileName);
+
+ if (_outputFileRecording)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+ "StartRecordingPlayout() is already recording");
+ return 0;
+ }
+
+ FileFormats format;
+ const WebRtc_UWord32 notificationTime(0);
+ CodecInst dummyCodec={100,"L16",16000,320,1,320000};
+
+ if ((codecInst != NULL) &&
+ ((codecInst->channels < 1) || (codecInst->channels > 2)))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_ARGUMENT, kTraceError,
+ "StartRecordingPlayout() invalid compression");
+ return(-1);
+ }
+ if(codecInst == NULL)
+ {
+ format = kFileFormatPcm16kHzFile;
+ codecInst=&dummyCodec;
+ }
+ else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+ {
+ format = kFileFormatWavFile;
+ }
+ else
+ {
+ format = kFileFormatCompressedFile;
+ }
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ // Destroy the old instance
+ if (_outputFileRecorderPtr)
+ {
+ _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ }
+
+ _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
+ _instanceId,
+ (const FileFormats)format);
+ if (_outputFileRecorderPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartRecordingPlayout() fileRecorder format isnot correct");
+ return -1;
+ }
+
+ if (_outputFileRecorderPtr->StartRecordingAudioFile(
+ fileName,
+ (const CodecInst&)*codecInst,
+ notificationTime) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartRecordingAudioFile() failed to start file recording");
+ _outputFileRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ return -1;
+ }
+ _outputFileRecorderPtr->RegisterModuleFileCallback(this);
+ _outputFileRecording = true;
+
+ return 0;
+}
+
+int OutputMixer::StartRecordingPlayout(OutStream* stream,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::StartRecordingPlayout()");
+
+ if (_outputFileRecording)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+ "StartRecordingPlayout() is already recording");
+ return 0;
+ }
+
+ FileFormats format;
+ const WebRtc_UWord32 notificationTime(0);
+ CodecInst dummyCodec={100,"L16",16000,320,1,320000};
+
+ if (codecInst != NULL && codecInst->channels != 1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_ARGUMENT, kTraceError,
+ "StartRecordingPlayout() invalid compression");
+ return(-1);
+ }
+ if(codecInst == NULL)
+ {
+ format = kFileFormatPcm16kHzFile;
+ codecInst=&dummyCodec;
+ }
+ else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+ {
+ format = kFileFormatWavFile;
+ }
+ else
+ {
+ format = kFileFormatCompressedFile;
+ }
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ // Destroy the old instance
+ if (_outputFileRecorderPtr)
+ {
+ _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ }
+
+ _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
+ _instanceId,
+ (const FileFormats)format);
+ if (_outputFileRecorderPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartRecordingPlayout() fileRecorder format isnot correct");
+ return -1;
+ }
+
+ if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream,
+ *codecInst,
+ notificationTime) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+ "StartRecordingAudioFile() failed to start file recording");
+ _outputFileRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ return -1;
+ }
+
+ _outputFileRecorderPtr->RegisterModuleFileCallback(this);
+ _outputFileRecording = true;
+
+ return 0;
+}
+
+int OutputMixer::StopRecordingPlayout()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::StopRecordingPlayout()");
+
+ if (!_outputFileRecording)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
+ "StopRecordingPlayout() file isnot recording");
+ return -1;
+ }
+
+ CriticalSectionScoped cs(&_fileCritSect);
+
+ if (_outputFileRecorderPtr->StopRecording() != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_STOP_RECORDING_FAILED, kTraceError,
+ "StopRecording(), could not stop recording");
+ return -1;
+ }
+ _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
+ _outputFileRecorderPtr = NULL;
+ _outputFileRecording = false;
+
+ return 0;
+}
+
+int OutputMixer::GetMixedAudio(int sample_rate_hz,
+ int num_channels,
+ AudioFrame* frame) {
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::GetMixedAudio(sample_rate_hz=%d, num_channels=%d)",
+ sample_rate_hz, num_channels);
+
+ // --- Record playout if enabled
+ {
+ CriticalSectionScoped cs(&_fileCritSect);
+ if (_outputFileRecording && _outputFileRecorderPtr)
+ _outputFileRecorderPtr->RecordAudioToFile(_audioFrame);
+ }
+
+ frame->num_channels_ = num_channels;
+ frame->sample_rate_hz_ = sample_rate_hz;
+ // TODO(andrew): Ideally the downmixing would occur much earlier, in
+ // AudioCodingModule.
+ return RemixAndResample(_audioFrame, &_resampler, frame);
+}
+
+WebRtc_Word32
+OutputMixer::DoOperationsOnCombinedSignal()
+{
+ if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz)
+ {
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+ "OutputMixer::DoOperationsOnCombinedSignal() => "
+ "mixing frequency = %d", _audioFrame.sample_rate_hz_);
+ _mixingFrequencyHz = _audioFrame.sample_rate_hz_;
+ }
+
+ // --- Insert inband Dtmf tone
+ if (_dtmfGenerator.IsAddingTone())
+ {
+ InsertInbandDtmfTone();
+ }
+
+ // Scale left and/or right channel(s) if balance is active
+ if (_panLeft != 1.0 || _panRight != 1.0)
+ {
+ if (_audioFrame.num_channels_ == 1)
+ {
+ AudioFrameOperations::MonoToStereo(&_audioFrame);
+ }
+ else
+ {
+ // Pure stereo mode (we are receiving a stereo signal).
+ }
+
+ assert(_audioFrame.num_channels_ == 2);
+ AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
+ }
+
+ // --- Far-end Voice Quality Enhancement (AudioProcessing Module)
+
+ APMAnalyzeReverseStream();
+
+ // --- External media processing
+
+ if (_externalMedia)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ const bool isStereo = (_audioFrame.num_channels_ == 2);
+ if (_externalMediaCallbackPtr)
+ {
+ _externalMediaCallbackPtr->Process(
+ -1,
+ kPlaybackAllChannelsMixed,
+ (WebRtc_Word16*)_audioFrame.data_,
+ _audioFrame.samples_per_channel_,
+ _audioFrame.sample_rate_hz_,
+ isStereo);
+ }
+ }
+
+ // --- Measure audio level (0-9) for the combined signal
+ _audioLevel.ComputeLevel(_audioFrame);
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// Private methods
+// ----------------------------------------------------------------------------
+
+void OutputMixer::APMAnalyzeReverseStream() {
+ // Convert from mixing to AudioProcessing sample rate, determined by the send
+ // side. Downmix to mono.
+ AudioFrame frame;
+ frame.num_channels_ = 1;
+ frame.sample_rate_hz_ = _audioProcessingModulePtr->sample_rate_hz();
+ if (RemixAndResample(_audioFrame, &_apmResampler, &frame) == -1)
+ return;
+
+ if (_audioProcessingModulePtr->AnalyzeReverseStream(&frame) == -1) {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
+ "AudioProcessingModule::AnalyzeReverseStream() => error");
+ }
+}
+
+int
+OutputMixer::InsertInbandDtmfTone()
+{
+ WebRtc_UWord16 sampleRate(0);
+ _dtmfGenerator.GetSampleRate(sampleRate);
+ if (sampleRate != _audioFrame.sample_rate_hz_)
+ {
+ // Update sample rate of Dtmf tone since the mixing frequency changed.
+ _dtmfGenerator.SetSampleRate(
+ (WebRtc_UWord16)(_audioFrame.sample_rate_hz_));
+ // Reset the tone to be added taking the new sample rate into account.
+ _dtmfGenerator.ResetTone();
+ }
+
+ WebRtc_Word16 toneBuffer[320];
+ WebRtc_UWord16 toneSamples(0);
+ if (_dtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "OutputMixer::InsertInbandDtmfTone() inserting Dtmf"
+ "tone failed");
+ return -1;
+ }
+
+ // replace mixed audio with Dtmf tone
+ if (_audioFrame.num_channels_ == 1)
+ {
+ // mono
+ memcpy(_audioFrame.data_, toneBuffer, sizeof(WebRtc_Word16)
+ * toneSamples);
+ } else
+ {
+ // stereo
+ for (int i = 0; i < _audioFrame.samples_per_channel_; i++)
+ {
+ _audioFrame.data_[2 * i] = toneBuffer[i];
+ _audioFrame.data_[2 * i + 1] = 0;
+ }
+ }
+ assert(_audioFrame.samples_per_channel_ == toneSamples);
+
+ return 0;
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/output_mixer.h b/voice_engine/output_mixer.h
new file mode 100644
index 0000000..29ca858
--- /dev/null
+++ b/voice_engine/output_mixer.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H_
+#define WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_H_
+
+#include "audio_conference_mixer.h"
+#include "audio_conference_mixer_defines.h"
+#include "common_types.h"
+#include "dtmf_inband.h"
+#include "file_recorder.h"
+#include "level_indicator.h"
+#include "resampler.h"
+#include "voice_engine_defines.h"
+
+namespace webrtc {
+
+class AudioProcessing;
+class CriticalSectionWrapper;
+class FileWrapper;
+class VoEMediaProcess;
+
+namespace voe {
+
+class Statistics;
+
+class OutputMixer : public AudioMixerOutputReceiver,
+ public AudioMixerStatusReceiver,
+ public FileCallback
+{
+public:
+ static WebRtc_Word32 Create(OutputMixer*& mixer,
+ const WebRtc_UWord32 instanceId);
+
+ static void Destroy(OutputMixer*& mixer);
+
+ WebRtc_Word32 SetEngineInformation(Statistics& engineStatistics);
+
+ WebRtc_Word32 SetAudioProcessingModule(
+ AudioProcessing* audioProcessingModule);
+
+ // VoEExternalMedia
+ int RegisterExternalMediaProcessing(
+ VoEMediaProcess& proccess_object);
+
+ int DeRegisterExternalMediaProcessing();
+
+ // VoEDtmf
+ int PlayDtmfTone(WebRtc_UWord8 eventCode,
+ int lengthMs,
+ int attenuationDb);
+
+ int StartPlayingDtmfTone(WebRtc_UWord8 eventCode,
+ int attenuationDb);
+
+ int StopPlayingDtmfTone();
+
+ WebRtc_Word32 MixActiveChannels();
+
+ WebRtc_Word32 DoOperationsOnCombinedSignal();
+
+ WebRtc_Word32 SetMixabilityStatus(MixerParticipant& participant,
+ const bool mixable);
+
+ WebRtc_Word32 SetAnonymousMixabilityStatus(MixerParticipant& participant,
+ const bool mixable);
+
+ int GetMixedAudio(int sample_rate_hz, int num_channels,
+ AudioFrame* audioFrame);
+
+ // VoEVolumeControl
+ int GetSpeechOutputLevel(WebRtc_UWord32& level);
+
+ int GetSpeechOutputLevelFullRange(WebRtc_UWord32& level);
+
+ int SetOutputVolumePan(float left, float right);
+
+ int GetOutputVolumePan(float& left, float& right);
+
+ // VoEFile
+ int StartRecordingPlayout(const char* fileName,
+ const CodecInst* codecInst);
+
+ int StartRecordingPlayout(OutStream* stream,
+ const CodecInst* codecInst);
+ int StopRecordingPlayout();
+
+ virtual ~OutputMixer();
+
+ // from AudioMixerOutputReceiver
+ virtual void NewMixedAudio(
+ const WebRtc_Word32 id,
+ const AudioFrame& generalAudioFrame,
+ const AudioFrame** uniqueAudioFrames,
+ const WebRtc_UWord32 size);
+
+ // from AudioMixerStatusReceiver
+ virtual void MixedParticipants(
+ const WebRtc_Word32 id,
+ const ParticipantStatistics* participantStatistics,
+ const WebRtc_UWord32 size);
+
+ virtual void VADPositiveParticipants(
+ const WebRtc_Word32 id,
+ const ParticipantStatistics* participantStatistics,
+ const WebRtc_UWord32 size);
+
+ virtual void MixedAudioLevel(const WebRtc_Word32 id,
+ const WebRtc_UWord32 level);
+
+ // For file recording
+ void PlayNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs);
+
+ void RecordNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs);
+
+ void PlayFileEnded(const WebRtc_Word32 id);
+ void RecordFileEnded(const WebRtc_Word32 id);
+
+private:
+ OutputMixer(const WebRtc_UWord32 instanceId);
+ void APMAnalyzeReverseStream();
+ int InsertInbandDtmfTone();
+
+ // uses
+ Statistics* _engineStatisticsPtr;
+ AudioProcessing* _audioProcessingModulePtr;
+
+ // owns
+ CriticalSectionWrapper& _callbackCritSect;
+ // protect the _outputFileRecorderPtr and _outputFileRecording
+ CriticalSectionWrapper& _fileCritSect;
+ AudioConferenceMixer& _mixerModule;
+ AudioFrame _audioFrame;
+ Resampler _resampler; // converts mixed audio to fit ADM format
+ Resampler _apmResampler; // converts mixed audio to fit APM rate
+ AudioLevel _audioLevel; // measures audio level for the combined signal
+ DtmfInband _dtmfGenerator;
+ int _instanceId;
+ VoEMediaProcess* _externalMediaCallbackPtr;
+ bool _externalMedia;
+ float _panLeft;
+ float _panRight;
+ int _mixingFrequencyHz;
+ FileRecorder* _outputFileRecorderPtr;
+ bool _outputFileRecording;
+};
+
+} // namespace voe
+
+} // namespace werbtc
+
+#endif // VOICE_ENGINE_OUTPUT_MIXER_H_
diff --git a/voice_engine/output_mixer_internal.cc b/voice_engine/output_mixer_internal.cc
new file mode 100644
index 0000000..b78d8cd
--- /dev/null
+++ b/voice_engine/output_mixer_internal.cc
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "output_mixer_internal.h"
+
+#include "audio_frame_operations.h"
+#include "common_audio/resampler/include/resampler.h"
+#include "module_common_types.h"
+#include "trace.h"
+
+namespace webrtc {
+namespace voe {
+
+int RemixAndResample(const AudioFrame& src_frame,
+ Resampler* resampler,
+ AudioFrame* dst_frame) {
+ const int16_t* audio_ptr = src_frame.data_;
+ int audio_ptr_num_channels = src_frame.num_channels_;
+ int16_t mono_audio[AudioFrame::kMaxDataSizeSamples];
+
+ // Downmix before resampling.
+ if (src_frame.num_channels_ == 2 && dst_frame->num_channels_ == 1) {
+ AudioFrameOperations::StereoToMono(src_frame.data_,
+ src_frame.samples_per_channel_,
+ mono_audio);
+ audio_ptr = mono_audio;
+ audio_ptr_num_channels = 1;
+ }
+
+ const ResamplerType resampler_type = audio_ptr_num_channels == 1 ?
+ kResamplerSynchronous : kResamplerSynchronousStereo;
+ if (resampler->ResetIfNeeded(src_frame.sample_rate_hz_,
+ dst_frame->sample_rate_hz_,
+ resampler_type) == -1) {
+ *dst_frame = src_frame;
+ WEBRTC_TRACE(kTraceError, kTraceVoice, -1,
+ "%s ResetIfNeeded failed", __FUNCTION__);
+ return -1;
+ }
+
+ int out_length = 0;
+ if (resampler->Push(audio_ptr,
+ src_frame.samples_per_channel_* audio_ptr_num_channels,
+ dst_frame->data_,
+ AudioFrame::kMaxDataSizeSamples,
+ out_length) == 0) {
+ dst_frame->samples_per_channel_ = out_length / audio_ptr_num_channels;
+ } else {
+ *dst_frame = src_frame;
+ WEBRTC_TRACE(kTraceError, kTraceVoice, -1,
+ "%s resampling failed", __FUNCTION__);
+ return -1;
+ }
+
+ // Upmix after resampling.
+ if (src_frame.num_channels_ == 1 && dst_frame->num_channels_ == 2) {
+ // The audio in dst_frame really is mono at this point; MonoToStereo will
+ // set this back to stereo.
+ dst_frame->num_channels_ = 1;
+ AudioFrameOperations::MonoToStereo(dst_frame);
+ }
+ return 0;
+}
+
+} // namespace voe
+} // namespace webrtc
diff --git a/voice_engine/output_mixer_internal.h b/voice_engine/output_mixer_internal.h
new file mode 100644
index 0000000..8d23a14
--- /dev/null
+++ b/voice_engine/output_mixer_internal.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_INTERNAL_H_
+#define WEBRTC_VOICE_ENGINE_OUTPUT_MIXER_INTERNAL_H_
+
+namespace webrtc {
+
+class AudioFrame;
+class Resampler;
+
+namespace voe {
+
+// Upmix or downmix and resample the audio in |src_frame| to |dst_frame|.
+// Expects |dst_frame| to have its |num_channels_| and |sample_rate_hz_| set to
+// the desired values. Updates |samples_per_channel_| accordingly.
+//
+// On failure, returns -1 and copies |src_frame| to |dst_frame|.
+int RemixAndResample(const AudioFrame& src_frame,
+ Resampler* resampler,
+ AudioFrame* dst_frame);
+
+} // namespace voe
+} // namespace webrtc
+
+#endif // VOICE_ENGINE_OUTPUT_MIXER_INTERNAL_H_
diff --git a/voice_engine/output_mixer_unittest.cc b/voice_engine/output_mixer_unittest.cc
new file mode 100644
index 0000000..fe678a0
--- /dev/null
+++ b/voice_engine/output_mixer_unittest.cc
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+
+#include "gtest/gtest.h"
+
+#include "output_mixer.h"
+#include "output_mixer_internal.h"
+
+namespace webrtc {
+namespace voe {
+namespace {
+
+class OutputMixerTest : public ::testing::Test {
+ protected:
+ OutputMixerTest() {
+ src_frame_.sample_rate_hz_ = 16000;
+ src_frame_.samples_per_channel_ = src_frame_.sample_rate_hz_ / 100;
+ src_frame_.num_channels_ = 1;
+ dst_frame_ = src_frame_;
+ golden_frame_ = src_frame_;
+ }
+
+ void RunResampleTest(int src_channels, int src_sample_rate_hz,
+ int dst_channels, int dst_sample_rate_hz);
+
+ Resampler resampler_;
+ AudioFrame src_frame_;
+ AudioFrame dst_frame_;
+ AudioFrame golden_frame_;
+};
+
+// Sets the signal value to increase by |data| with every sample. Floats are
+// used so non-integer values result in rounding error, but not an accumulating
+// error.
+void SetMonoFrame(AudioFrame* frame, float data, int sample_rate_hz) {
+ frame->num_channels_ = 1;
+ frame->sample_rate_hz_ = sample_rate_hz;
+ frame->samples_per_channel_ = sample_rate_hz / 100;
+ for (int i = 0; i < frame->samples_per_channel_; i++) {
+ frame->data_[i] = data * i;
+ }
+}
+
+// Keep the existing sample rate.
+void SetMonoFrame(AudioFrame* frame, float data) {
+ SetMonoFrame(frame, data, frame->sample_rate_hz_);
+}
+
+// Sets the signal value to increase by |left| and |right| with every sample in
+// each channel respectively.
+void SetStereoFrame(AudioFrame* frame, float left, float right,
+ int sample_rate_hz) {
+ frame->num_channels_ = 2;
+ frame->sample_rate_hz_ = sample_rate_hz;
+ frame->samples_per_channel_ = sample_rate_hz / 100;
+ for (int i = 0; i < frame->samples_per_channel_; i++) {
+ frame->data_[i * 2] = left * i;
+ frame->data_[i * 2 + 1] = right * i;
+ }
+}
+
+// Keep the existing sample rate.
+void SetStereoFrame(AudioFrame* frame, float left, float right) {
+ SetStereoFrame(frame, left, right, frame->sample_rate_hz_);
+}
+
+void VerifyParams(const AudioFrame& ref_frame, const AudioFrame& test_frame) {
+ EXPECT_EQ(ref_frame.num_channels_, test_frame.num_channels_);
+ EXPECT_EQ(ref_frame.samples_per_channel_, test_frame.samples_per_channel_);
+ EXPECT_EQ(ref_frame.sample_rate_hz_, test_frame.sample_rate_hz_);
+}
+
+// Computes the best SNR based on the error between |ref_frame| and
+// |test_frame|. It allows for up to a 30 sample delay between the signals to
+// compensate for the resampling delay.
+float ComputeSNR(const AudioFrame& ref_frame, const AudioFrame& test_frame) {
+ VerifyParams(ref_frame, test_frame);
+ float best_snr = 0;
+ int best_delay = 0;
+ for (int delay = 0; delay < 30; delay++) {
+ float mse = 0;
+ float variance = 0;
+ for (int i = 0; i < ref_frame.samples_per_channel_ *
+ ref_frame.num_channels_ - delay; i++) {
+ int error = ref_frame.data_[i] - test_frame.data_[i + delay];
+ mse += error * error;
+ variance += ref_frame.data_[i] * ref_frame.data_[i];
+ }
+ float snr = 100; // We assign 100 dB to the zero-error case.
+ if (mse > 0)
+ snr = 10 * log10(variance / mse);
+ if (snr > best_snr) {
+ best_snr = snr;
+ best_delay = delay;
+ }
+ }
+ printf("SNR=%.1f dB at delay=%d\n", best_snr, best_delay);
+ return best_snr;
+}
+
+void VerifyFramesAreEqual(const AudioFrame& ref_frame,
+ const AudioFrame& test_frame) {
+ VerifyParams(ref_frame, test_frame);
+ for (int i = 0; i < ref_frame.samples_per_channel_ * ref_frame.num_channels_;
+ i++) {
+ EXPECT_EQ(ref_frame.data_[i], test_frame.data_[i]);
+ }
+}
+
+void OutputMixerTest::RunResampleTest(int src_channels,
+ int src_sample_rate_hz,
+ int dst_channels,
+ int dst_sample_rate_hz) {
+ Resampler resampler; // Create a new one with every test.
+ const int16_t kSrcLeft = 60; // Shouldn't overflow for any used sample rate.
+ const int16_t kSrcRight = 30;
+ const float kResamplingFactor = (1.0 * src_sample_rate_hz) /
+ dst_sample_rate_hz;
+ const float kDstLeft = kResamplingFactor * kSrcLeft;
+ const float kDstRight = kResamplingFactor * kSrcRight;
+ const float kDstMono = (kDstLeft + kDstRight) / 2;
+ if (src_channels == 1)
+ SetMonoFrame(&src_frame_, kSrcLeft, src_sample_rate_hz);
+ else
+ SetStereoFrame(&src_frame_, kSrcLeft, kSrcRight, src_sample_rate_hz);
+
+ if (dst_channels == 1) {
+ SetMonoFrame(&dst_frame_, 0, dst_sample_rate_hz);
+ if (src_channels == 1)
+ SetMonoFrame(&golden_frame_, kDstLeft, dst_sample_rate_hz);
+ else
+ SetMonoFrame(&golden_frame_, kDstMono, dst_sample_rate_hz);
+ } else {
+ SetStereoFrame(&dst_frame_, 0, 0, dst_sample_rate_hz);
+ if (src_channels == 1)
+ SetStereoFrame(&golden_frame_, kDstLeft, kDstLeft, dst_sample_rate_hz);
+ else
+ SetStereoFrame(&golden_frame_, kDstLeft, kDstRight, dst_sample_rate_hz);
+ }
+
+ printf("(%d, %d Hz) -> (%d, %d Hz) ", // SNR reported on the same line later.
+ src_channels, src_sample_rate_hz, dst_channels, dst_sample_rate_hz);
+ EXPECT_EQ(0, RemixAndResample(src_frame_, &resampler, &dst_frame_));
+ EXPECT_GT(ComputeSNR(golden_frame_, dst_frame_), 40.0f);
+}
+
+TEST_F(OutputMixerTest, RemixAndResampleFailsWithBadSampleRate) {
+ SetMonoFrame(&dst_frame_, 10, 44100);
+ EXPECT_EQ(-1, RemixAndResample(src_frame_, &resampler_, &dst_frame_));
+ VerifyFramesAreEqual(src_frame_, dst_frame_);
+}
+
+TEST_F(OutputMixerTest, RemixAndResampleCopyFrameSucceeds) {
+ // Stereo -> stereo.
+ SetStereoFrame(&src_frame_, 10, 10);
+ SetStereoFrame(&dst_frame_, 0, 0);
+ EXPECT_EQ(0, RemixAndResample(src_frame_, &resampler_, &dst_frame_));
+ VerifyFramesAreEqual(src_frame_, dst_frame_);
+
+ // Mono -> mono.
+ SetMonoFrame(&src_frame_, 20);
+ SetMonoFrame(&dst_frame_, 0);
+ EXPECT_EQ(0, RemixAndResample(src_frame_, &resampler_, &dst_frame_));
+ VerifyFramesAreEqual(src_frame_, dst_frame_);
+}
+
+TEST_F(OutputMixerTest, RemixAndResampleMixingOnlySucceeds) {
+ // Stereo -> mono.
+ SetStereoFrame(&dst_frame_, 0, 0);
+ SetMonoFrame(&src_frame_, 10);
+ SetStereoFrame(&golden_frame_, 10, 10);
+ EXPECT_EQ(0, RemixAndResample(src_frame_, &resampler_, &dst_frame_));
+ VerifyFramesAreEqual(dst_frame_, golden_frame_);
+
+ // Mono -> stereo.
+ SetMonoFrame(&dst_frame_, 0);
+ SetStereoFrame(&src_frame_, 10, 20);
+ SetMonoFrame(&golden_frame_, 15);
+ EXPECT_EQ(0, RemixAndResample(src_frame_, &resampler_, &dst_frame_));
+ VerifyFramesAreEqual(golden_frame_, dst_frame_);
+}
+
+TEST_F(OutputMixerTest, RemixAndResampleSucceeds) {
+ // We don't attempt to be exhaustive here, but just get good coverage. Some
+ // combinations of rates will not be resampled, and some give an odd
+ // resampling factor which makes it more difficult to evaluate.
+ const int kSampleRates[] = {16000, 32000, 48000};
+ const int kSampleRatesSize = sizeof(kSampleRates) / sizeof(*kSampleRates);
+ const int kChannels[] = {1, 2};
+ const int kChannelsSize = sizeof(kChannels) / sizeof(*kChannels);
+ for (int src_rate = 0; src_rate < kSampleRatesSize; src_rate++) {
+ for (int dst_rate = 0; dst_rate < kSampleRatesSize; dst_rate++) {
+ for (int src_channel = 0; src_channel < kChannelsSize; src_channel++) {
+ for (int dst_channel = 0; dst_channel < kChannelsSize; dst_channel++) {
+ RunResampleTest(kChannels[src_channel], kSampleRates[src_rate],
+ kChannels[dst_channel], kSampleRates[dst_rate]);
+ }
+ }
+ }
+ }
+}
+
+} // namespace
+} // namespace voe
+} // namespace webrtc
diff --git a/voice_engine/shared_data.cc b/voice_engine/shared_data.cc
new file mode 100644
index 0000000..7bea1e0
--- /dev/null
+++ b/voice_engine/shared_data.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "shared_data.h"
+
+#include "audio_processing.h"
+#include "critical_section_wrapper.h"
+#include "channel.h"
+#include "output_mixer.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+
+namespace webrtc {
+
+namespace voe {
+
+static WebRtc_Word32 _gInstanceCounter = 0;
+
+SharedData::SharedData() :
+ _instanceId(++_gInstanceCounter),
+ _apiCritPtr(CriticalSectionWrapper::CreateCriticalSection()),
+ _channelManager(_gInstanceCounter),
+ _engineStatistics(_gInstanceCounter),
+ _audioDevicePtr(NULL),
+ _audioProcessingModulePtr(NULL),
+ _moduleProcessThreadPtr(ProcessThread::CreateProcessThread()),
+ _externalRecording(false),
+ _externalPlayout(false)
+{
+ Trace::CreateTrace();
+ Trace::SetLevelFilter(WEBRTC_VOICE_ENGINE_DEFAULT_TRACE_FILTER);
+ if (OutputMixer::Create(_outputMixerPtr, _gInstanceCounter) == 0)
+ {
+ _outputMixerPtr->SetEngineInformation(_engineStatistics);
+ }
+ if (TransmitMixer::Create(_transmitMixerPtr, _gInstanceCounter) == 0)
+ {
+ _transmitMixerPtr->SetEngineInformation(*_moduleProcessThreadPtr,
+ _engineStatistics,
+ _channelManager);
+ }
+ _audioDeviceLayer = AudioDeviceModule::kPlatformDefaultAudio;
+}
+
+SharedData::~SharedData()
+{
+ OutputMixer::Destroy(_outputMixerPtr);
+ TransmitMixer::Destroy(_transmitMixerPtr);
+ if (_audioDevicePtr) {
+ _audioDevicePtr->Release();
+ }
+ AudioProcessing::Destroy(_audioProcessingModulePtr);
+ delete _apiCritPtr;
+ ProcessThread::DestroyProcessThread(_moduleProcessThreadPtr);
+ Trace::ReturnTrace();
+}
+
+void SharedData::set_audio_device(AudioDeviceModule* audio_device)
+{
+ // AddRef first in case the pointers are equal.
+ if (audio_device)
+ audio_device->AddRef();
+ if (_audioDevicePtr)
+ _audioDevicePtr->Release();
+ _audioDevicePtr = audio_device;
+}
+
+void SharedData::set_audio_processing(AudioProcessing* audio_processing) {
+ if (_audioProcessingModulePtr)
+ AudioProcessing::Destroy(_audioProcessingModulePtr);
+ _audioProcessingModulePtr = audio_processing;
+}
+
+WebRtc_UWord16 SharedData::NumOfSendingChannels()
+{
+ WebRtc_Word32 numOfChannels = _channelManager.NumOfChannels();
+ if (numOfChannels <= 0)
+ {
+ return 0;
+ }
+
+ WebRtc_UWord16 nChannelsSending(0);
+ WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
+
+ _channelManager.GetChannelIds(channelsArray, numOfChannels);
+ for (int i = 0; i < numOfChannels; i++)
+ {
+ voe::ScopedChannel sc(_channelManager, channelsArray[i]);
+ Channel* chPtr = sc.ChannelPtr();
+ if (chPtr)
+ {
+ if (chPtr->Sending())
+ {
+ nChannelsSending++;
+ }
+ }
+ }
+ delete [] channelsArray;
+ return nChannelsSending;
+}
+
+void SharedData::SetLastError(const WebRtc_Word32 error) const {
+ _engineStatistics.SetLastError(error);
+}
+
+void SharedData::SetLastError(const WebRtc_Word32 error,
+ const TraceLevel level) const {
+ _engineStatistics.SetLastError(error, level);
+}
+
+void SharedData::SetLastError(const WebRtc_Word32 error, const TraceLevel level,
+ const char* msg) const {
+ _engineStatistics.SetLastError(error, level, msg);
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/shared_data.h b/voice_engine/shared_data.h
new file mode 100644
index 0000000..191e369
--- /dev/null
+++ b/voice_engine/shared_data.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_SHARED_DATA_H
+#define WEBRTC_VOICE_ENGINE_SHARED_DATA_H
+
+#include "voice_engine_defines.h"
+
+#include "channel_manager.h"
+#include "statistics.h"
+#include "process_thread.h"
+
+#include "audio_device.h"
+#include "audio_processing.h"
+
+class ProcessThread;
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+namespace voe {
+
+class TransmitMixer;
+class OutputMixer;
+
+class SharedData
+{
+public:
+ // Public accessors.
+ WebRtc_UWord32 instance_id() const { return _instanceId; }
+ Statistics& statistics() { return _engineStatistics; }
+ ChannelManager& channel_manager() { return _channelManager; }
+ AudioDeviceModule* audio_device() { return _audioDevicePtr; }
+ void set_audio_device(AudioDeviceModule* audio_device);
+ AudioProcessing* audio_processing() { return _audioProcessingModulePtr; }
+ void set_audio_processing(AudioProcessing* audio_processing);
+ TransmitMixer* transmit_mixer() { return _transmitMixerPtr; }
+ OutputMixer* output_mixer() { return _outputMixerPtr; }
+ CriticalSectionWrapper* crit_sec() { return _apiCritPtr; }
+ bool ext_recording() const { return _externalRecording; }
+ void set_ext_recording(bool value) { _externalRecording = value; }
+ bool ext_playout() const { return _externalPlayout; }
+ void set_ext_playout(bool value) { _externalPlayout = value; }
+ ProcessThread* process_thread() { return _moduleProcessThreadPtr; }
+ AudioDeviceModule::AudioLayer audio_device_layer() const {
+ return _audioDeviceLayer;
+ }
+ void set_audio_device_layer(AudioDeviceModule::AudioLayer layer) {
+ _audioDeviceLayer = layer;
+ }
+
+ WebRtc_UWord16 NumOfSendingChannels();
+
+ // Convenience methods for calling statistics().SetLastError().
+ void SetLastError(const WebRtc_Word32 error) const;
+ void SetLastError(const WebRtc_Word32 error, const TraceLevel level) const;
+ void SetLastError(const WebRtc_Word32 error, const TraceLevel level,
+ const char* msg) const;
+
+protected:
+ const WebRtc_UWord32 _instanceId;
+ CriticalSectionWrapper* _apiCritPtr;
+ ChannelManager _channelManager;
+ Statistics _engineStatistics;
+ AudioDeviceModule* _audioDevicePtr;
+ OutputMixer* _outputMixerPtr;
+ TransmitMixer* _transmitMixerPtr;
+ AudioProcessing* _audioProcessingModulePtr;
+ ProcessThread* _moduleProcessThreadPtr;
+
+ bool _externalRecording;
+ bool _externalPlayout;
+
+ AudioDeviceModule::AudioLayer _audioDeviceLayer;
+
+ SharedData();
+ virtual ~SharedData();
+};
+
+} // namespace voe
+
+} // namespace webrtc
+#endif // WEBRTC_VOICE_ENGINE_SHARED_DATA_H
diff --git a/voice_engine/statistics.cc b/voice_engine/statistics.cc
new file mode 100644
index 0000000..4f1bc79
--- /dev/null
+++ b/voice_engine/statistics.cc
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cassert>
+#include <stdio.h>
+
+#include "statistics.h"
+
+#include "trace.h"
+#include "critical_section_wrapper.h"
+
+namespace webrtc {
+
+namespace voe {
+
+Statistics::Statistics(const WebRtc_UWord32 instanceId) :
+ _critPtr(CriticalSectionWrapper::CreateCriticalSection()),
+ _instanceId(instanceId),
+ _lastError(0),
+ _isInitialized(false)
+{
+}
+
+Statistics::~Statistics()
+{
+ if (_critPtr)
+ {
+ delete _critPtr;
+ _critPtr = NULL;
+ }
+}
+
+WebRtc_Word32 Statistics::SetInitialized()
+{
+ _isInitialized = true;
+ return 0;
+}
+
+WebRtc_Word32 Statistics::SetUnInitialized()
+{
+ _isInitialized = false;
+ return 0;
+}
+
+bool Statistics::Initialized() const
+{
+ return _isInitialized;
+}
+
+WebRtc_Word32 Statistics::SetLastError(const WebRtc_Word32 error) const
+{
+ CriticalSectionScoped cs(_critPtr);
+ _lastError = error;
+ return 0;
+}
+
+WebRtc_Word32 Statistics::SetLastError(const WebRtc_Word32 error,
+ const TraceLevel level) const
+{
+ CriticalSectionScoped cs(_critPtr);
+ _lastError = error;
+ WEBRTC_TRACE(level, kTraceVoice, VoEId(_instanceId,-1),
+ "error code is set to %d",
+ _lastError);
+ return 0;
+}
+
+WebRtc_Word32 Statistics::SetLastError(
+ const WebRtc_Word32 error,
+ const TraceLevel level, const char* msg) const
+{
+ CriticalSectionScoped cs(_critPtr);
+ char traceMessage[KTraceMaxMessageSize];
+ assert(strlen(msg) < KTraceMaxMessageSize);
+ _lastError = error;
+ sprintf(traceMessage, "%s (error=%d)", msg, error);
+ WEBRTC_TRACE(level, kTraceVoice, VoEId(_instanceId,-1), "%s",
+ traceMessage);
+ return 0;
+}
+
+WebRtc_Word32 Statistics::LastError() const
+{
+ CriticalSectionScoped cs(_critPtr);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "LastError() => %d", _lastError);
+ return _lastError;
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/statistics.h b/voice_engine/statistics.h
new file mode 100644
index 0000000..fc0bf8c
--- /dev/null
+++ b/voice_engine/statistics.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_STATISTICS_H
+#define WEBRTC_VOICE_ENGINE_STATISTICS_H
+
+#include "common_types.h"
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+#include "voe_errors.h"
+
+namespace webrtc {
+class CriticalSectionWrapper;
+
+namespace voe {
+
+class Statistics
+{
+ public:
+ enum {KTraceMaxMessageSize = 256};
+ public:
+ Statistics(const WebRtc_UWord32 instanceId);
+ ~Statistics();
+
+ WebRtc_Word32 SetInitialized();
+ WebRtc_Word32 SetUnInitialized();
+ bool Initialized() const;
+ WebRtc_Word32 SetLastError(const WebRtc_Word32 error) const;
+ WebRtc_Word32 SetLastError(const WebRtc_Word32 error,
+ const TraceLevel level) const;
+ WebRtc_Word32 SetLastError(const WebRtc_Word32 error,
+ const TraceLevel level,
+ const char* msg) const;
+ WebRtc_Word32 LastError() const;
+
+ private:
+ CriticalSectionWrapper* _critPtr;
+ const WebRtc_UWord32 _instanceId;
+ mutable WebRtc_Word32 _lastError;
+ bool _isInitialized;
+};
+
+} // namespace voe
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_STATISTICS_H
diff --git a/voice_engine/test/android/android_test/.classpath b/voice_engine/test/android/android_test/.classpath
new file mode 100644
index 0000000..86a15c9
--- /dev/null
+++ b/voice_engine/test/android/android_test/.classpath
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<classpath>
+ <classpathentry kind="con" path="com.android.ide.eclipse.adt.ANDROID_FRAMEWORK"/>
+ <classpathentry kind="src" path="src"/>
+ <classpathentry kind="src" path="gen"/>
+ <classpathentry kind="output" path="bin"/>
+</classpath>
diff --git a/voice_engine/test/android/android_test/.project b/voice_engine/test/android/android_test/.project
new file mode 100644
index 0000000..990e2f5
--- /dev/null
+++ b/voice_engine/test/android/android_test/.project
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>AndroidTest</name>
+ <comment></comment>
+ <projects>
+ </projects>
+ <buildSpec>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>org.eclipse.jdt.core.javabuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ <buildCommand>
+ <name>com.android.ide.eclipse.adt.ApkBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>com.android.ide.eclipse.adt.AndroidNature</nature>
+ <nature>org.eclipse.jdt.core.javanature</nature>
+ </natures>
+</projectDescription>
diff --git a/voice_engine/test/android/android_test/Android.mk b/voice_engine/test/android/android_test/Android.mk
new file mode 100644
index 0000000..a3f5ce6
--- /dev/null
+++ b/voice_engine/test/android/android_test/Android.mk
@@ -0,0 +1,25 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+ src/org/webrtc/voiceengine/test/AndroidTest.java
+
+LOCAL_PACKAGE_NAME := webrtc-voice-demo
+LOCAL_CERTIFICATE := platform
+
+LOCAL_JNI_SHARED_LIBRARIES := libwebrtc-voice-demo-jni
+
+include $(BUILD_PACKAGE)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/voice_engine/test/android/android_test/AndroidManifest.xml b/voice_engine/test/android/android_test/AndroidManifest.xml
new file mode 100644
index 0000000..a614f8d
--- /dev/null
+++ b/voice_engine/test/android/android_test/AndroidManifest.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. -->
+<!-- -->
+<!-- Use of this source code is governed by a BSD-style license -->
+<!-- that can be found in the LICENSE file in the root of the source -->
+<!-- tree. An additional intellectual property rights grant can be found -->
+<!-- in the file PATENTS. All contributing project authors may -->
+<!-- be found in the AUTHORS file in the root of the source tree. -->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ android:versionCode="1"
+ android:versionName="1.0" package="org.webrtc.voiceengine.test">
+ <application android:icon="@drawable/icon"
+ android:label="@string/app_name"
+ android:debuggable="true">
+ <activity android:name=".AndroidTest"
+ android:label="@string/app_name"
+ android:screenOrientation="portrait">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+
+ </application>
+ <uses-sdk android:minSdkVersion="3" />
+
+ <uses-permission android:name="android.permission.MODIFY_AUDIO_SETTINGS" />
+ <uses-permission android:name="android.permission.RECORD_AUDIO" />
+ <uses-permission android:name="android.permission.INTERNET" />
+
+</manifest>
diff --git a/voice_engine/test/android/android_test/default.properties b/voice_engine/test/android/android_test/default.properties
new file mode 100644
index 0000000..6ed608e
--- /dev/null
+++ b/voice_engine/test/android/android_test/default.properties
@@ -0,0 +1,11 @@
+# This file is automatically generated by Android Tools.
+# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
+#
+# This file must be checked in Version Control Systems.
+#
+# To customize properties used by the Ant build system use,
+# "build.properties", and override values to adapt the script to your
+# project structure.
+
+# Project target, OpenSL ES requires API level 9
+target=android-9
diff --git a/voice_engine/test/android/android_test/gen/org/webrtc/voiceengine/test/R.java b/voice_engine/test/android/android_test/gen/org/webrtc/voiceengine/test/R.java
new file mode 100644
index 0000000..ec8f5b4
--- /dev/null
+++ b/voice_engine/test/android/android_test/gen/org/webrtc/voiceengine/test/R.java
@@ -0,0 +1,30 @@
+/* AUTO-GENERATED FILE. DO NOT MODIFY.
+ *
+ * This class was automatically generated by the
+ * aapt tool from the resource data it found. It
+ * should not be modified by hand.
+ */
+
+package org.webrtc.voiceengine.test;
+
+public final class R {
+ public static final class attr {
+ }
+ public static final class drawable {
+ public static final int icon=0x7f020000;
+ }
+ public static final class id {
+ public static final int Button01=0x7f050002;
+ public static final int Button02=0x7f050005;
+ public static final int EditText01=0x7f050001;
+ public static final int Spinner01=0x7f050003;
+ public static final int Spinner02=0x7f050004;
+ public static final int TextView01=0x7f050000;
+ }
+ public static final class layout {
+ public static final int main=0x7f030000;
+ }
+ public static final class string {
+ public static final int app_name=0x7f040000;
+ }
+}
diff --git a/voice_engine/test/android/android_test/jni/Android.mk b/voice_engine/test/android/android_test/jni/Android.mk
new file mode 100644
index 0000000..d7e04b5
--- /dev/null
+++ b/voice_engine/test/android/android_test/jni/Android.mk
@@ -0,0 +1,152 @@
+# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+ifdef NDK_ROOT
+
+MY_WEBRTC_ROOT_PATH := $(call my-dir)
+
+MY_WEBRTC_SRC_PATH := ../../../../../../..
+
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/common_audio/resampler/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/common_audio/signal_processing/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/common_audio/vad/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_coding/neteq/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_coding/codecs/cng/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_coding/codecs/g711/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_coding/codecs/g722/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_coding/codecs/pcm16b/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_coding/codecs/ilbc/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_coding/codecs/iSAC/fix/source/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_coding/codecs/iSAC/main/source/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_coding/main/source/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_conference_mixer/source/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_device/main/source/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_processing/aec/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_processing/aecm/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_processing/agc/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_processing/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_processing/ns/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/audio_processing/utility/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/media_file/source/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/rtp_rtcp/source/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/udp_transport/source/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/modules/utility/source/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/system_wrappers/source/Android.mk
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/voice_engine/Android.mk
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE := libwebrtc_audio_preprocessing
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_WHOLE_STATIC_LIBRARIES := \
+ libwebrtc_spl \
+ libwebrtc_resampler \
+ libwebrtc_apm \
+ libwebrtc_apm_utility \
+ libwebrtc_vad \
+ libwebrtc_ns \
+ libwebrtc_agc \
+ libwebrtc_aec \
+ libwebrtc_aecm \
+ libwebrtc_system_wrappers \
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libstlport_shared
+
+LOCAL_LDLIBS := \
+ -lgcc \
+ -llog
+
+LOCAL_PRELINK_MODULE := false
+
+include $(BUILD_SHARED_LIBRARY)
+
+###
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE := libwebrtc-voice-jni
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_WHOLE_STATIC_LIBRARIES := \
+ libwebrtc_system_wrappers \
+ libwebrtc_audio_device \
+ libwebrtc_pcm16b \
+ libwebrtc_cng \
+ libwebrtc_audio_coding \
+ libwebrtc_rtp_rtcp \
+ libwebrtc_media_file \
+ libwebrtc_udp_transport \
+ libwebrtc_utility \
+ libwebrtc_neteq \
+ libwebrtc_audio_conference_mixer \
+ libwebrtc_isac \
+ libwebrtc_ilbc \
+ libwebrtc_isacfix \
+ libwebrtc_g722 \
+ libwebrtc_g711 \
+ libwebrtc_voe_core
+
+LOCAL_SHARED_LIBRARIES := \
+ libcutils \
+ libstlport_shared \
+ libwebrtc_audio_preprocessing
+
+LOCAL_LDLIBS := \
+ -lgcc \
+ -llog \
+ -lOpenSLES
+
+LOCAL_PRELINK_MODULE := false
+
+include $(BUILD_SHARED_LIBRARY)
+
+###
+
+include $(MY_WEBRTC_ROOT_PATH)/$(MY_WEBRTC_SRC_PATH)/src/voice_engine/test/cmd_test/Android.mk
+
+else
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_TAGS := tests
+LOCAL_MODULE := libwebrtc-voice-demo-jni
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_SRC_FILES := android_test.cc
+LOCAL_CFLAGS := \
+ '-DWEBRTC_TARGET_PC' \
+ '-DWEBRTC_ANDROID'
+
+LOCAL_C_INCLUDES := \
+ $(LOCAL_PATH)/../../../auto_test \
+ $(LOCAL_PATH)/../../../../interface \
+ $(LOCAL_PATH)/../../../../../.. \
+ $(LOCAL_PATH)/../../../../../../system_wrappers/interface
+
+LOCAL_PRELINK_MODULE := false
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils \
+ libstlport \
+ libandroid \
+ libwebrtc \
+ libGLESv2
+
+include $(BUILD_SHARED_LIBRARY)
+
+endif
diff --git a/voice_engine/test/android/android_test/jni/Application.mk b/voice_engine/test/android/android_test/jni/Application.mk
new file mode 100644
index 0000000..03c35ac
--- /dev/null
+++ b/voice_engine/test/android/android_test/jni/Application.mk
@@ -0,0 +1,11 @@
+# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+# Build both ARMv5TE and ARMv7-A machine code.
+APP_ABI := armeabi armeabi-v7a x86
+APP_STL := stlport_shared
diff --git a/voice_engine/test/android/android_test/jni/android_test.cc b/voice_engine/test/android/android_test/jni/android_test.cc
new file mode 100644
index 0000000..8c5fdff
--- /dev/null
+++ b/voice_engine/test/android/android_test/jni/android_test.cc
@@ -0,0 +1,1554 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <android/log.h>
+
+#include "org_webrtc_voiceengine_test_AndroidTest.h"
+
+#include "thread_wrapper.h"
+
+#include "voe_base.h"
+#include "voe_codec.h"
+#include "voe_file.h"
+#include "voe_network.h"
+#include "voe_audio_processing.h"
+#include "voe_volume_control.h"
+#include "voe_hardware.h"
+#include "voe_rtp_rtcp.h"
+#include "voe_encryption.h"
+
+#include "voe_test_interface.h"
+
+//#define USE_SRTP
+//#define INIT_FROM_THREAD
+//#define START_CALL_FROM_THREAD
+
+#define WEBRTC_LOG_TAG "*WEBRTCN*" // As in WEBRTC Native...
+#define VALIDATE_BASE_POINTER \
+ if (!veData1.base) \
+ { \
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+ "Base pointer doesn't exist"); \
+ return -1; \
+ }
+#define VALIDATE_CODEC_POINTER \
+ if (!veData1.codec) \
+ { \
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+ "Codec pointer doesn't exist"); \
+ return -1; \
+ }
+#define VALIDATE_FILE_POINTER \
+ if (!veData1.file) \
+ { \
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+ "File pointer doesn't exist"); \
+ return -1; \
+ }
+#define VALIDATE_NETWORK_POINTER \
+ if (!veData1.netw) \
+ { \
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+ "Network pointer doesn't exist"); \
+ return -1; \
+ }
+#define VALIDATE_APM_POINTER \
+ if (!veData1.codec) \
+ { \
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+ "Apm pointer doesn't exist"); \
+ return -1; \
+ }
+#define VALIDATE_VOLUME_POINTER \
+ if (!veData1.volume) \
+ { \
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+ "Volume pointer doesn't exist"); \
+ return -1; \
+ }
+#define VALIDATE_HARDWARE_POINTER \
+ if (!veData1.hardware) \
+ { \
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+ "Hardware pointer doesn't exist"); \
+ return -1; \
+ }
+#define VALIDATE_RTP_RTCP_POINTER \
+ if (!veData1.rtp_rtcp) \
+ { \
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+ "RTP / RTCP pointer doesn't exist"); \
+ return -1; \
+ }
+#define VALIDATE_ENCRYPT_POINTER \
+ if (!veData1.encrypt) \
+ { \
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, \
+ "Encrypt pointer doesn't exist"); \
+ return -1; \
+ }
+
+// Register functions in JNI_OnLoad()
+// How do we ensure that VoE is deleted? JNI_OnUnload?
+// What happens if class is unloaded? When loaded again, NativeInit will be
+// called again. Keep what we have?
+// Should we do something in JNI_OnUnload?
+// General design: create a class or keep global struct with "C" functions?
+// Otherwise make sure symbols are as unique as possible.
+
+// TestType enumerator
+enum TestType
+{
+ Invalid = -1,
+ Standard = 0,
+ Extended = 1,
+ Stress = 2,
+ Unit = 3,
+ CPU = 4
+};
+
+// ExtendedSelection enumerator
+enum ExtendedSelection
+{
+ XSEL_Invalid = -1,
+ XSEL_None = 0,
+ XSEL_All,
+ XSEL_Base,
+ XSEL_CallReport,
+ XSEL_Codec,
+ XSEL_DTMF,
+ XSEL_Encryption,
+ XSEL_ExternalMedia,
+ XSEL_File,
+ XSEL_Hardware,
+ XSEL_NetEqStats,
+ XSEL_Network,
+ XSEL_PTT,
+ XSEL_RTP_RTCP,
+ XSEL_VideoSync,
+ XSEL_VideoSyncExtended,
+ XSEL_VolumeControl,
+ XSEL_VQE,
+ XSEL_APM,
+ XSEL_VQMon
+};
+
+using namespace webrtc;
+
+class my_transportation;
+
+// VoiceEngine data struct
+typedef struct
+{
+ // VoiceEngine
+ VoiceEngine* ve;
+ // Sub-APIs
+ VoEBase* base;
+ VoECodec* codec;
+ VoEFile* file;
+ VoENetwork* netw;
+ VoEAudioProcessing* apm;
+ VoEVolumeControl* volume;
+ VoEHardware* hardware;
+ VoERTP_RTCP* rtp_rtcp;
+ VoEEncryption* encrypt;
+ // Other
+ my_transportation* extTrans;
+ JavaVM* jvm;
+} VoiceEngineData;
+
+// my_transportation is used when useExtTrans is enabled
+class my_transportation : public Transport
+{
+ public:
+ my_transportation(VoENetwork * network) :
+ netw(network) {
+ }
+
+ int SendPacket(int channel,const void *data,int len);
+ int SendRTCPPacket(int channel, const void *data, int len);
+ private:
+ VoENetwork * netw;
+};
+
+int my_transportation::SendPacket(int channel,const void *data,int len)
+{
+ netw->ReceivedRTPPacket(channel, data, len);
+ return len;
+}
+
+int my_transportation::SendRTCPPacket(int channel, const void *data, int len)
+{
+ netw->ReceivedRTCPPacket(channel, data, len);
+ return len;
+}
+
+//Global variables visible in this file
+static VoiceEngineData veData1;
+static VoiceEngineData veData2;
+
+// "Local" functions (i.e. not Java accessible)
+static bool GetSubApis(VoiceEngineData &veData);
+static bool ReleaseSubApis(VoiceEngineData &veData);
+
+class ThreadTest
+{
+public:
+ ThreadTest();
+ ~ThreadTest();
+ int RunTest();
+ int CloseTest();
+private:
+ static bool Run(void* ptr);
+ bool Process();
+private:
+ ThreadWrapper* _thread;
+};
+
+ThreadTest::~ThreadTest()
+{
+ if (_thread)
+ {
+ _thread->SetNotAlive();
+ if (_thread->Stop())
+ {
+ delete _thread;
+ _thread = NULL;
+ }
+ }
+}
+
+ThreadTest::ThreadTest() :
+ _thread(NULL)
+{
+ _thread = ThreadWrapper::CreateThread(Run, this, kNormalPriority,
+ "ThreadTest thread");
+}
+
+bool ThreadTest::Run(void* ptr)
+{
+ return static_cast<ThreadTest*> (ptr)->Process();
+}
+
+bool ThreadTest::Process()
+{
+ // Attach this thread to JVM
+ /*JNIEnv* env = NULL;
+ jint res = veData1.jvm->AttachCurrentThread(&env, NULL);
+ char msg[32];
+ sprintf(msg, "res=%d, env=%d", res, env);
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG, msg);*/
+
+#ifdef INIT_FROM_THREAD
+ VALIDATE_BASE_POINTER;
+ veData1.base->Init();
+#endif
+
+#ifdef START_CALL_FROM_THREAD
+ // receiving instance
+ veData2.ve = VoiceEngine::Create();
+ GetSubApis(veData2);
+ veData2.base->Init();
+ veData2.base->CreateChannel();
+ if(veData2.base->SetLocalReceiver(0, 1234) < 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "set local receiver 2 failed");
+ }
+ veData2.hardware->SetLoudspeakerStatus(false);
+ veData2.volume->SetSpeakerVolume(204);
+ veData2.base->StartReceive(0);
+ if(veData2.base->StartPlayout(0) < 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "start playout failed");
+ }
+
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "receiving instance started from thread");
+
+ // sending instance
+ veData1.ve = VoiceEngine::Create();
+ GetSubApis(veData1);
+ veData1.base->Init();
+ if(veData1.base->CreateChannel() < 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "create channel failed");
+ }
+ if(veData1.base->SetLocalReceiver(0, 1256) < 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "set local receiver failed");
+ }
+ if(veData1.base->SetSendDestination(0, 1234, "127.0.0.1") < 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "set send destination failed");
+ }
+ if(veData1.base->StartSend(0) < 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "start send failed");
+ }
+
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "sending instance started from thread");
+#endif
+
+ _thread->SetNotAlive();
+ _thread->Stop();
+
+ //res = veData1.jvm->DetachCurrentThread();
+
+ return true;
+}
+
+int ThreadTest::RunTest()
+{
+ if (_thread)
+ {
+ unsigned int id;
+ _thread->Start(id);
+ }
+ return 0;
+}
+
+int ThreadTest::CloseTest()
+{
+ VALIDATE_BASE_POINTER
+
+ veData1.base->DeleteChannel(0);
+ veData2.base->DeleteChannel(0);
+ veData1.base->Terminate();
+ veData2.base->Terminate();
+
+ // Release sub-APIs
+ ReleaseSubApis(veData1);
+ ReleaseSubApis(veData2);
+
+ // Delete
+ VoiceEngine::Delete(veData1.ve);
+ VoiceEngine::Delete(veData2.ve);
+ veData2.ve = NULL;
+ veData2.ve = NULL;
+
+ return 0;
+}
+
+ThreadTest threadTest;
+
+//////////////////////////////////////////////////////////////////
+// General functions
+//////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////
+// JNI_OnLoad
+//
+jint JNI_OnLoad(JavaVM* vm, void* /*reserved*/)
+{
+ if (!vm)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "JNI_OnLoad did not receive a valid VM pointer");
+ return -1;
+ }
+
+ // Get JNI
+ JNIEnv* env;
+ if (JNI_OK != vm->GetEnv(reinterpret_cast<void**> (&env),
+ JNI_VERSION_1_4))
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "JNI_OnLoad could not get JNI env");
+ return -1;
+ }
+
+ // Get class to register the native functions with
+ // jclass regClass = env->FindClass("webrtc/android/AndroidTest");
+ // if (!regClass) {
+ // return -1; // Exception thrown
+ // }
+
+ // Register native functions
+ // JNINativeMethod methods[1];
+ // methods[0].name = NULL;
+ // methods[0].signature = NULL;
+ // methods[0].fnPtr = NULL;
+ // if (JNI_OK != env->RegisterNatives(regClass, methods, 1))
+ // {
+ // return -1;
+ // }
+
+ // Init VoiceEngine data
+ memset(&veData1, 0, sizeof(veData1));
+ memset(&veData2, 0, sizeof(veData2));
+
+ // Store the JVM
+ veData1.jvm = vm;
+ veData2.jvm = vm;
+
+ return JNI_VERSION_1_4;
+}
+
+/////////////////////////////////////////////
+// Native initialization
+//
+JNIEXPORT jboolean JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_NativeInit(
+ JNIEnv * env,
+ jclass)
+{
+ // Look up and cache any interesting class, field and method IDs for
+ // any used java class here
+
+ return true;
+}
+
+/////////////////////////////////////////////
+// Run auto standard test
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_RunAutoTest(
+ JNIEnv *env,
+ jobject context,
+ jint testType,
+ jint extendedSel)
+{
+ TestType tType(Invalid);
+
+ switch (testType)
+ {
+ case 0:
+ return 0;
+ case 1:
+ tType = Standard;
+ break;
+ case 2:
+ tType = Extended;
+ break;
+ case 3:
+ tType = Stress;
+ break;
+ case 4:
+ tType = Unit;
+ break;
+ default:
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "RunAutoTest - Invalid TestType");
+ return -1;
+ }
+
+ ExtendedSelection xsel(XSEL_Invalid);
+
+ switch (extendedSel)
+ {
+ case 0:
+ xsel = XSEL_None;
+ break;
+ case 1:
+ xsel = XSEL_All;
+ break;
+ case 2:
+ xsel = XSEL_Base;
+ break;
+ case 3:
+ xsel = XSEL_CallReport;
+ break;
+ case 4:
+ xsel = XSEL_Codec;
+ break;
+ case 5:
+ xsel = XSEL_DTMF;
+ break;
+ case 6:
+ xsel = XSEL_Encryption;
+ break;
+ case 7:
+ xsel = XSEL_ExternalMedia;
+ break;
+ case 8:
+ xsel = XSEL_File;
+ break;
+ case 9:
+ xsel = XSEL_Hardware;
+ break;
+ case 10:
+ xsel = XSEL_NetEqStats;
+ break;
+ case 11:
+ xsel = XSEL_Network;
+ break;
+ case 12:
+ xsel = XSEL_PTT;
+ break;
+ case 13:
+ xsel = XSEL_RTP_RTCP;
+ break;
+ case 14:
+ xsel = XSEL_VideoSync;
+ break;
+ case 15:
+ xsel = XSEL_VideoSyncExtended;
+ break;
+ case 16:
+ xsel = XSEL_VolumeControl;
+ break;
+ case 17:
+ xsel = XSEL_APM;
+ break;
+ case 18:
+ xsel = XSEL_VQMon;
+ break;
+ default:
+ xsel = XSEL_Invalid;
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "RunAutoTest - Invalid extendedType");
+ return -1;
+ }
+
+ // Set instance independent Java objects
+ VoiceEngine::SetAndroidObjects(veData1.jvm, env, context);
+
+ // Call voe test interface function
+ // TODO(leozwang) add autotest setAndroidObjects(veData1.jvm, context);
+ // jint retVal = runAutoTest(tType, xsel);
+
+ // Clear instance independent Java objects
+ VoiceEngine::SetAndroidObjects(NULL, NULL, NULL);
+
+ return 0;
+}
+
+//////////////////////////////////////////////////////////////////
+// VoiceEngine API wrapper functions
+//////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////
+// Create VoiceEngine instance
+//
+JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Create(
+ JNIEnv *env,
+ jobject context)
+{
+ // Check if already created
+ if (veData1.ve)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "VoE already created");
+ return false;
+ }
+
+ // Set instance independent Java objects
+ VoiceEngine::SetAndroidObjects(veData1.jvm, env, context);
+
+#ifdef START_CALL_FROM_THREAD
+ threadTest.RunTest();
+#else
+ // Create
+ veData1.ve = VoiceEngine::Create();
+ if (!veData1.ve)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Create VoE failed");
+ return false;
+ }
+
+ // Get sub-APIs
+ if (!GetSubApis(veData1))
+ {
+ // If not OK, release all sub-APIs and delete VoE
+ ReleaseSubApis(veData1);
+ if (!VoiceEngine::Delete(veData1.ve))
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Delete VoE failed");
+ }
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+/////////////////////////////////////////////
+// Delete VoiceEngine instance
+//
+JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Delete(
+ JNIEnv *,
+ jobject)
+{
+#ifdef START_CALL_FROM_THREAD
+ threadTest.CloseTest();
+#else
+ // Check if exists
+ if (!veData1.ve)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "VoE does not exist");
+ return false;
+ }
+
+ // Release sub-APIs
+ ReleaseSubApis(veData1);
+
+ // Delete
+ if (!VoiceEngine::Delete(veData1.ve))
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Delete VoE failed");
+ return false;
+ }
+
+ veData1.ve = NULL;
+#endif
+
+ // Clear instance independent Java objects
+ VoiceEngine::SetAndroidObjects(NULL, NULL, NULL);
+
+ return true;
+}
+
+/////////////////////////////////////////////
+// [Base] Initialize VoiceEngine
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Init(
+ JNIEnv *,
+ jobject,
+ jboolean enableTrace,
+ jboolean useExtTrans)
+{
+ VALIDATE_BASE_POINTER;
+
+ if (enableTrace)
+ {
+ if (0 != VoiceEngine::SetTraceFile("/sdcard/trace.txt"))
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Could not enable trace");
+ }
+ if (0 != VoiceEngine::SetTraceFilter(kTraceAll))
+ {
+ __android_log_write(ANDROID_LOG_WARN, WEBRTC_LOG_TAG,
+ "Could not set trace filter");
+ }
+ }
+
+ if (useExtTrans)
+ {
+ VALIDATE_NETWORK_POINTER;
+ veData1.extTrans = new my_transportation(veData1.netw);
+ }
+
+ int retVal = 0;
+#ifdef INIT_FROM_THREAD
+ threadTest.RunTest();
+ usleep(200000);
+#else
+ retVal = veData1.base->Init();
+#endif
+ return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] Terminate VoiceEngine
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Terminate(
+ JNIEnv *,
+ jobject)
+{
+ VALIDATE_BASE_POINTER;
+
+ jint retVal = veData1.base->Terminate();
+
+ delete veData1.extTrans;
+ veData1.extTrans = NULL;
+
+ return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] Create channel
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_CreateChannel(
+ JNIEnv *,
+ jobject)
+{
+ VALIDATE_BASE_POINTER;
+ jint channel = veData1.base->CreateChannel();
+
+ if (veData1.extTrans)
+ {
+ VALIDATE_NETWORK_POINTER;
+ __android_log_print(ANDROID_LOG_DEBUG, WEBRTC_LOG_TAG,
+ "Enabling external transport on channel %d",
+ channel);
+ if (veData1.netw->RegisterExternalTransport(channel, *veData1.extTrans)
+ < 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Could not set external transport");
+ return -1;
+ }
+ }
+
+ return channel;
+}
+
+/////////////////////////////////////////////
+// [Base] Delete channel
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_DeleteChannel(
+ JNIEnv *,
+ jobject,
+ jint channel)
+{
+ VALIDATE_BASE_POINTER;
+ return veData1.base->DeleteChannel(channel);
+}
+
+/////////////////////////////////////////////
+// [Base] SetLocalReceiver
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetLocalReceiver(
+ JNIEnv *,
+ jobject,
+ jint channel,
+ jint port)
+{
+ VALIDATE_BASE_POINTER;
+ return veData1.base->SetLocalReceiver(channel, port);
+}
+
+/////////////////////////////////////////////
+// [Base] SetSendDestination
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetSendDestination(
+ JNIEnv *env,
+ jobject,
+ jint channel,
+ jint port,
+ jstring ipaddr)
+{
+ VALIDATE_BASE_POINTER;
+
+ const char* ipaddrNative = env->GetStringUTFChars(ipaddr, NULL);
+ if (!ipaddrNative)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Could not get UTF string");
+ return -1;
+ }
+
+ jint retVal = veData1.base->SetSendDestination(channel, port, ipaddrNative);
+
+ env->ReleaseStringUTFChars(ipaddr, ipaddrNative);
+
+ return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] StartListen
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartListen(
+ JNIEnv *,
+ jobject,
+ jint channel)
+{
+#ifdef USE_SRTP
+ VALIDATE_ENCRYPT_POINTER;
+ bool useForRTCP = false;
+ if (veData1.encrypt->EnableSRTPReceive(
+ channel,CIPHER_AES_128_COUNTER_MODE,30,AUTH_HMAC_SHA1,
+ 16,4, ENCRYPTION_AND_AUTHENTICATION,
+ (unsigned char*)nikkey, useForRTCP) != 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Failed to enable SRTP receive");
+ return -1;
+ }
+#endif
+
+ VALIDATE_BASE_POINTER;
+ int retVal = veData1.base->StartReceive(channel);
+
+ return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] Start playout
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayout(
+ JNIEnv *,
+ jobject,
+ jint channel)
+{
+ VALIDATE_BASE_POINTER;
+ int retVal = veData1.base->StartPlayout(channel);
+
+ return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] Start send
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartSend(
+ JNIEnv *,
+ jobject,
+ jint channel)
+{
+ /* int dscp(0), serviceType(-1), overrideDscp(0), res(0);
+ bool gqosEnabled(false), useSetSockOpt(false);
+
+ if (veData1.netw->SetSendTOS(channel, 13, useSetSockOpt) != 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Failed to set TOS");
+ return -1;
+ }
+
+ res = veData1.netw->GetSendTOS(channel, dscp, useSetSockOpt);
+ if (res != 0 || dscp != 13 || useSetSockOpt != true)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Failed to get TOS");
+ return -1;
+ } */
+
+ /* if (veData1.rtp_rtcp->SetFECStatus(channel, 1) != 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Failed to enable FEC");
+ return -1;
+ } */
+#ifdef USE_SRTP
+ VALIDATE_ENCRYPT_POINTER;
+ bool useForRTCP = false;
+ if (veData1.encrypt->EnableSRTPSend(
+ channel,CIPHER_AES_128_COUNTER_MODE,30,AUTH_HMAC_SHA1,
+ 16,4, ENCRYPTION_AND_AUTHENTICATION,
+ (unsigned char*)nikkey, useForRTCP) != 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Failed to enable SRTP send");
+ return -1;
+ }
+#endif
+
+ VALIDATE_BASE_POINTER;
+ int retVal = veData1.base->StartSend(channel);
+
+ return retVal;
+}
+
+/////////////////////////////////////////////
+// [Base] Stop listen
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopListen(
+ JNIEnv *,
+ jobject,
+ jint channel)
+{
+#ifdef USE_SRTP
+ VALIDATE_ENCRYPT_POINTER;
+ if (veData1.encrypt->DisableSRTPReceive(channel) != 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Failed to disable SRTP receive");
+ return -1;
+ }
+#endif
+
+ VALIDATE_BASE_POINTER;
+ return veData1.base->StopReceive(channel);
+}
+
+/////////////////////////////////////////////
+// [Base] Stop playout
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayout(
+ JNIEnv *,
+ jobject,
+ jint channel)
+{
+ VALIDATE_BASE_POINTER;
+ return veData1.base->StopPlayout(channel);
+}
+
+/////////////////////////////////////////////
+// [Base] Stop send
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopSend(
+ JNIEnv *,
+ jobject,
+ jint channel)
+{
+ /* if (veData1.rtp_rtcp->SetFECStatus(channel, 0) != 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Failed to disable FEC");
+ return -1;
+ } */
+
+#ifdef USE_SRTP
+ VALIDATE_ENCRYPT_POINTER;
+ if (veData1.encrypt->DisableSRTPSend(channel) != 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Failed to disable SRTP send");
+ return -1;
+ }
+#endif
+
+ VALIDATE_BASE_POINTER;
+ return veData1.base->StopSend(channel);
+}
+
+/////////////////////////////////////////////
+// [codec] Number of codecs
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_NumOfCodecs(
+ JNIEnv *,
+ jobject)
+{
+ VALIDATE_CODEC_POINTER;
+ return veData1.codec->NumOfCodecs();
+}
+
+/////////////////////////////////////////////
+// [codec] Set send codec
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetSendCodec(
+ JNIEnv *,
+ jobject,
+ jint channel,
+ jint index)
+{
+ VALIDATE_CODEC_POINTER;
+
+ CodecInst codec;
+
+ if (veData1.codec->GetCodec(index, codec) != 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Failed to get codec");
+ return -1;
+ }
+
+ return veData1.codec->SetSendCodec(channel, codec);
+}
+
+/////////////////////////////////////////////
+// [codec] Set VAD status
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetVADStatus(
+ JNIEnv *,
+ jobject,
+ jint channel,
+ jboolean enable,
+ jint mode)
+{
+ VALIDATE_CODEC_POINTER;
+
+ VadModes VADmode = kVadConventional;
+
+ switch (mode)
+ {
+ case 0:
+ break; // already set
+ case 1:
+ VADmode = kVadAggressiveLow;
+ break;
+ case 2:
+ VADmode = kVadAggressiveMid;
+ break;
+ case 3:
+ VADmode = kVadAggressiveHigh;
+ break;
+ default:
+ VADmode = (VadModes) 17; // force error
+ break;
+ }
+
+ return veData1.codec->SetVADStatus(channel, enable, VADmode);
+}
+
+/////////////////////////////////////////////
+// [apm] SetNSStatus
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetNSStatus(
+ JNIEnv *,
+ jobject,
+ jboolean enable,
+ jint mode)
+{
+ VALIDATE_APM_POINTER;
+
+ NsModes NSmode = kNsDefault;
+
+ switch (mode)
+ {
+ case 0:
+ NSmode = kNsUnchanged;
+ break;
+ case 1:
+ break; // already set
+ case 2:
+ NSmode = kNsConference;
+ break;
+ case 3:
+ NSmode = kNsLowSuppression;
+ break;
+ case 4:
+ NSmode = kNsModerateSuppression;
+ break;
+ case 5:
+ NSmode = kNsHighSuppression;
+ break;
+ case 6:
+ NSmode = kNsVeryHighSuppression;
+ break;
+ default:
+ NSmode = (NsModes) 17; // force error
+ break;
+ }
+
+ return veData1.apm->SetNsStatus(enable, NSmode);
+}
+
+/////////////////////////////////////////////
+// [apm] SetAGCStatus
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetAGCStatus(
+ JNIEnv *,
+ jobject,
+ jboolean enable,
+ jint mode)
+{
+ VALIDATE_APM_POINTER;
+
+ AgcModes AGCmode = kAgcDefault;
+
+ switch (mode)
+ {
+ case 0:
+ AGCmode = kAgcUnchanged;
+ break;
+ case 1:
+ break; // already set
+ case 2:
+ AGCmode = kAgcAdaptiveAnalog;
+ break;
+ case 3:
+ AGCmode = kAgcAdaptiveDigital;
+ break;
+ case 4:
+ AGCmode = kAgcFixedDigital;
+ break;
+ default:
+ AGCmode = (AgcModes) 17; // force error
+ break;
+ }
+
+ /* AgcConfig agcConfig;
+ agcConfig.targetLeveldBOv = 3;
+ agcConfig.digitalCompressionGaindB = 50;
+ agcConfig.limiterEnable = 0;
+
+ if (veData1.apm->SetAGCConfig(agcConfig) != 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Failed to set AGC config");
+ return -1;
+ } */
+
+ return veData1.apm->SetAgcStatus(enable, AGCmode);
+}
+
+/////////////////////////////////////////////
+// [apm] SetECStatus
+//
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetECStatus(
+ JNIEnv *,
+ jobject,
+ jboolean enable,
+ jint mode)
+{
+ VALIDATE_APM_POINTER;
+
+ EcModes ECmode = kEcDefault;
+
+ switch (mode)
+ {
+ case 0:
+ ECmode = kEcDefault;
+ break;
+ case 1:
+ break; // already set
+ case 2:
+ ECmode = kEcConference;
+ break;
+ case 3:
+ ECmode = kEcAec;
+ break;
+ case 4:
+ ECmode = kEcAecm;
+ break;
+ default:
+ ECmode = (EcModes) 17; // force error
+ break;
+ }
+
+ return veData1.apm->SetEcStatus(enable, ECmode);
+}
+
+/////////////////////////////////////////////
+// [File] Start play file locally
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayingFileLocally(
+ JNIEnv * env,
+ jobject,
+ jint channel,
+ jstring fileName,
+ jboolean loop)
+{
+ VALIDATE_FILE_POINTER;
+
+ const char* fileNameNative = env->GetStringUTFChars(fileName, NULL);
+ if (!fileNameNative)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Could not get UTF string");
+ return -1;
+ }
+
+ jint retVal = veData1.file->StartPlayingFileLocally(channel,
+ fileNameNative, loop);
+
+ env->ReleaseStringUTFChars(fileName, fileNameNative);
+
+ return retVal;
+}
+
+/////////////////////////////////////////////
+// [File] Stop play file locally
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayingFileLocally(
+ JNIEnv *,
+ jobject,
+ jint channel)
+{
+ VALIDATE_FILE_POINTER;
+ return veData1.file->StopPlayingFileLocally(channel);
+}
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StartRecordingPlayout
+ * Signature: (ILjava/lang/String;Z)I
+ */
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StartRecordingPlayout(
+ JNIEnv * env,
+ jobject,
+ jint channel,
+ jstring fileName,
+ jboolean)
+{
+ VALIDATE_FILE_POINTER;
+
+ const char* fileNameNative = env->GetStringUTFChars(fileName, NULL);
+ if (!fileNameNative)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Could not get UTF string");
+ return -1;
+ }
+
+ jint retVal = veData1.file->StartRecordingPlayout(channel, fileNameNative,
+ 0);
+
+ env->ReleaseStringUTFChars(fileName, fileNameNative);
+
+ return retVal;
+}
+
+/////////////////////////////////////////////
+// [File] Stop Recording Playout
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StopRecordingPlayout(
+ JNIEnv *,
+ jobject,
+ jint channel)
+{
+ VALIDATE_FILE_POINTER;
+ return veData1.file->StopRecordingPlayout(channel);
+}
+
+/////////////////////////////////////////////
+// [File] Start playing file as microphone
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayingFileAsMicrophone(
+ JNIEnv *env,
+ jobject,
+ jint channel,
+ jstring fileName,
+ jboolean loop)
+{
+ VALIDATE_FILE_POINTER;
+
+ const char* fileNameNative = env->GetStringUTFChars(fileName, NULL);
+ if (!fileNameNative)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Could not get UTF string");
+ return -1;
+ }
+
+ jint retVal = veData1.file->StartPlayingFileAsMicrophone(channel,
+ fileNameNative,
+ loop);
+
+ env->ReleaseStringUTFChars(fileName, fileNameNative);
+
+ return retVal;
+}
+
+/////////////////////////////////////////////
+// [File] Stop playing file as microphone
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayingFileAsMicrophone(
+ JNIEnv *,
+ jobject,
+ jint channel)
+{
+ VALIDATE_FILE_POINTER;
+ return veData1.file->StopPlayingFileAsMicrophone(channel);
+}
+
+/////////////////////////////////////////////
+// [Volume] Set speaker volume
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetSpeakerVolume(
+ JNIEnv *,
+ jobject,
+ jint level)
+{
+ VALIDATE_VOLUME_POINTER;
+ if (veData1.volume->SetSpeakerVolume(level) != 0)
+ {
+ return -1;
+ }
+
+ unsigned int storedVolume = 0;
+ if (veData1.volume->GetSpeakerVolume(storedVolume) != 0)
+ {
+ return -1;
+ }
+
+ if (storedVolume != level)
+ {
+ return -1;
+ }
+
+ return 0;
+}
+
+/////////////////////////////////////////////
+// [Hardware] Set loudspeaker status
+//
+JNIEXPORT jint JNICALL
+Java_org_webrtc_voiceengine_test_AndroidTest_SetLoudspeakerStatus(
+ JNIEnv *,
+ jobject,
+ jboolean enable)
+{
+ VALIDATE_HARDWARE_POINTER;
+ if (veData1.hardware->SetLoudspeakerStatus(enable) != 0)
+ {
+ return -1;
+ }
+
+ /*VALIDATE_RTP_RTCP_POINTER;
+
+ if (veData1.rtp_rtcp->SetFECStatus(0, enable, -1) != 0)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Could not set FEC");
+ return -1;
+ }
+ else if(enable)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Could enable FEC");
+ }
+ else
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Could disable FEC");
+ }*/
+
+ return 0;
+}
+
+//////////////////////////////////////////////////////////////////
+// "Local" functions (i.e. not Java accessible)
+//////////////////////////////////////////////////////////////////
+
+/////////////////////////////////////////////
+// Get all sub-APIs
+//
+bool GetSubApis(VoiceEngineData &veData)
+{
+ bool getOK = true;
+
+ // Base
+ veData.base = VoEBase::GetInterface(veData.ve);
+ if (!veData.base)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Get base sub-API failed");
+ getOK = false;
+ }
+
+ // Codec
+ veData.codec = VoECodec::GetInterface(veData.ve);
+ if (!veData.codec)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Get codec sub-API failed");
+ getOK = false;
+ }
+
+ // File
+ veData.file = VoEFile::GetInterface(veData.ve);
+ if (!veData.file)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Get file sub-API failed");
+ getOK = false;
+ }
+
+ // Network
+ veData.netw = VoENetwork::GetInterface(veData.ve);
+ if (!veData.netw)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Get network sub-API failed");
+ getOK = false;
+ }
+
+ // AudioProcessing module
+ veData.apm = VoEAudioProcessing::GetInterface(veData.ve);
+ if (!veData.apm)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Get apm sub-API failed");
+ getOK = false;
+ }
+
+ // Volume
+ veData.volume = VoEVolumeControl::GetInterface(veData.ve);
+ if (!veData.volume)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Get volume sub-API failed");
+ getOK = false;
+ }
+
+ // Hardware
+ veData.hardware = VoEHardware::GetInterface(veData.ve);
+ if (!veData.hardware)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Get hardware sub-API failed");
+ getOK = false;
+ }
+
+ // RTP / RTCP
+ veData.rtp_rtcp = VoERTP_RTCP::GetInterface(veData.ve);
+ if (!veData.rtp_rtcp)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Get rtp_rtcp sub-API failed");
+ getOK = false;
+ }
+
+ // Encrypt
+ veData.encrypt = VoEEncryption::GetInterface(veData.ve);
+ if (!veData.encrypt)
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Get encrypt sub-API failed");
+ getOK = false;
+ }
+
+ return getOK;
+}
+
+/////////////////////////////////////////////
+// Release all sub-APIs
+//
+bool ReleaseSubApis(VoiceEngineData &veData)
+{
+ bool releaseOK = true;
+
+ // Base
+ if (veData.base)
+ {
+ if (0 != veData.base->Release())
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Release base sub-API failed");
+ releaseOK = false;
+ }
+ else
+ {
+ veData.base = NULL;
+ }
+ }
+
+ // Codec
+ if (veData.codec)
+ {
+ if (0 != veData.codec->Release())
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Release codec sub-API failed");
+ releaseOK = false;
+ }
+ else
+ {
+ veData.codec = NULL;
+ }
+ }
+
+ // File
+ if (veData.file)
+ {
+ if (0 != veData.file->Release())
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Release file sub-API failed");
+ releaseOK = false;
+ }
+ else
+ {
+ veData.file = NULL;
+ }
+ }
+
+ // Network
+ if (veData.netw)
+ {
+ if (0 != veData.netw->Release())
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Release network sub-API failed");
+ releaseOK = false;
+ }
+ else
+ {
+ veData.netw = NULL;
+ }
+ }
+
+ // apm
+ if (veData.apm)
+ {
+ if (0 != veData.apm->Release())
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Release apm sub-API failed");
+ releaseOK = false;
+ }
+ else
+ {
+ veData.apm = NULL;
+ }
+ }
+
+ // Volume
+ if (veData.volume)
+ {
+ if (0 != veData.volume->Release())
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Release volume sub-API failed");
+ releaseOK = false;
+ }
+ else
+ {
+ veData.volume = NULL;
+ }
+ }
+
+ // Hardware
+ if (veData.hardware)
+ {
+ if (0 != veData.hardware->Release())
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Release hardware sub-API failed");
+ releaseOK = false;
+ }
+ else
+ {
+ veData.hardware = NULL;
+ }
+ }
+
+ // RTP RTCP
+ if (veData.rtp_rtcp)
+ {
+ if (0 != veData.rtp_rtcp->Release())
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Release rtp_rtcp sub-API failed");
+ releaseOK = false;
+ }
+ else
+ {
+ veData.rtp_rtcp = NULL;
+ }
+ }
+
+ // Encrypt
+ if (veData.encrypt)
+ {
+ if (0 != veData.encrypt->Release())
+ {
+ __android_log_write(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
+ "Release encrypt sub-API failed");
+ releaseOK = false;
+ }
+ else
+ {
+ veData.encrypt = NULL;
+ }
+ }
+
+ return releaseOK;
+}
diff --git a/voice_engine/test/android/android_test/jni/org_webrtc_voiceengine_test_AndroidTest.h b/voice_engine/test/android/android_test/jni/org_webrtc_voiceengine_test_AndroidTest.h
new file mode 100644
index 0000000..60fe839
--- /dev/null
+++ b/voice_engine/test/android/android_test/jni/org_webrtc_voiceengine_test_AndroidTest.h
@@ -0,0 +1,253 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class org_webrtc_voiceengine_test_AndroidTest */
+
+#ifndef _Included_org_webrtc_voiceengine_test_AndroidTest
+#define _Included_org_webrtc_voiceengine_test_AndroidTest
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: NativeInit
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_NativeInit
+ (JNIEnv *, jclass);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: RunAutoTest
+ * Signature: (II)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_RunAutoTest
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: Create
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Create
+ (JNIEnv *, jobject);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: Delete
+ * Signature: ()Z
+ */
+JNIEXPORT jboolean JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Delete
+ (JNIEnv *, jobject);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: Init
+ * Signature: (IIIZZ)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Init
+ (JNIEnv *, jobject, jboolean, jboolean);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: Terminate
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_Terminate
+ (JNIEnv *, jobject);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: CreateChannel
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_CreateChannel
+ (JNIEnv *, jobject);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: DeleteChannel
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_DeleteChannel
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: SetLocalReceiver
+ * Signature: (II)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetLocalReceiver
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: SetSendDestination
+ * Signature: (IILjava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetSendDestination
+ (JNIEnv *, jobject, jint, jint, jstring);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StartListen
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartListen
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StartPlayout
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayout
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StartSend
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartSend
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StopListen
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopListen
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StopPlayout
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayout
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StopSend
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopSend
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StartPlayingFileLocally
+ * Signature: (ILjava/lang/String;Z)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayingFileLocally
+ (JNIEnv *, jobject, jint, jstring, jboolean);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StopPlayingFileLocally
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayingFileLocally
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StartRecordingPlayout
+ * Signature: (ILjava/lang/String;Z)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartRecordingPlayout
+ (JNIEnv *, jobject, jint, jstring, jboolean);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StopRecordingPlayout
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopRecordingPlayout
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StartPlayingFileAsMicrophone
+ * Signature: (ILjava/lang/String;Z)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StartPlayingFileAsMicrophone
+ (JNIEnv *, jobject, jint, jstring, jboolean);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: StopPlayingFileAsMicrophone
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_StopPlayingFileAsMicrophone
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: NumOfCodecs
+ * Signature: ()I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_NumOfCodecs
+ (JNIEnv *, jobject);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: SetSendCodec
+ * Signature: (II)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetSendCodec
+ (JNIEnv *, jobject, jint, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: SetVADStatus
+ * Signature: (IZI)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetVADStatus
+ (JNIEnv *, jobject, jint, jboolean, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: SetNSStatus
+ * Signature: (ZI)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetNSStatus
+ (JNIEnv *, jobject, jboolean, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: SetAGCStatus
+ * Signature: (ZI)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetAGCStatus
+ (JNIEnv *, jobject, jboolean, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: SetECStatus
+ * Signature: (ZI)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetECStatus
+ (JNIEnv *, jobject, jboolean, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: SetSpeakerVolume
+ * Signature: (I)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetSpeakerVolume
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: org_webrtc_voiceengine_test_AndroidTest
+ * Method: SetLoudspeakerStatus
+ * Signature: (Z)I
+ */
+JNIEXPORT jint JNICALL Java_org_webrtc_voiceengine_test_AndroidTest_SetLoudspeakerStatus
+ (JNIEnv *, jobject, jboolean);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/voice_engine/test/android/android_test/res/drawable/icon.png b/voice_engine/test/android/android_test/res/drawable/icon.png
new file mode 100644
index 0000000..a07c69f
--- /dev/null
+++ b/voice_engine/test/android/android_test/res/drawable/icon.png
Binary files differ
diff --git a/voice_engine/test/android/android_test/res/layout/main.xml b/voice_engine/test/android/android_test/res/layout/main.xml
new file mode 100644
index 0000000..4165a07
--- /dev/null
+++ b/voice_engine/test/android/android_test/res/layout/main.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. -->
+<!-- -->
+<!-- Use of this source code is governed by a BSD-style license -->
+<!-- that can be found in the LICENSE file in the root of the source -->
+<!-- tree. An additional intellectual property rights grant can be found -->
+<!-- in the file PATENTS. All contributing project authors may -->
+<!-- be found in the AUTHORS file in the root of the source tree. -->
+
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+ android:orientation="vertical"
+ android:layout_width="fill_parent"
+ android:layout_height="fill_parent">
+
+ <TextView android:text="@+id/TextView01"
+ android:id="@+id/TextView01"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content">
+ </TextView>
+ <EditText android:text="@+id/EditText01"
+ android:id="@+id/EditText01"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content">
+ </EditText>
+ <Button android:text="@+id/Button01"
+ android:id="@+id/Button01"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content">
+ </Button>
+ <Spinner android:id="@+id/Spinner01"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content">
+ </Spinner>
+ <Spinner android:id="@+id/Spinner02"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content">
+ </Spinner>
+ <Button android:text="@+id/Button02"
+ android:id="@+id/Button02"
+ android:layout_width="wrap_content"
+ android:layout_height="wrap_content">
+ </Button>
+</LinearLayout>
diff --git a/voice_engine/test/android/android_test/res/values/strings.xml b/voice_engine/test/android/android_test/res/values/strings.xml
new file mode 100644
index 0000000..29ec4ee
--- /dev/null
+++ b/voice_engine/test/android/android_test/res/values/strings.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (c) 2011 The WebRTC project authors. All Rights Reserved. -->
+<!-- -->
+<!-- Use of this source code is governed by a BSD-style license -->
+<!-- that can be found in the LICENSE file in the root of the source -->
+<!-- tree. An additional intellectual property rights grant can be found -->
+<!-- in the file PATENTS. All contributing project authors may -->
+<!-- be found in the AUTHORS file in the root of the source tree. -->
+
+<resources>
+
+ <string name="app_name">WebRtc VoE</string>
+</resources>
diff --git a/voice_engine/test/android/android_test/src/org/webrtc/voiceengine/test/AndroidTest.java b/voice_engine/test/android/android_test/src/org/webrtc/voiceengine/test/AndroidTest.java
new file mode 100644
index 0000000..71b22b0
--- /dev/null
+++ b/voice_engine/test/android/android_test/src/org/webrtc/voiceengine/test/AndroidTest.java
@@ -0,0 +1,1190 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license that can be found
+ * in the LICENSE file in the root of the source tree. An additional
+ * intellectual property rights grant can be found in the file PATENTS. All
+ * contributing project authors may be found in the AUTHORS file in the root of
+ * the source tree.
+ */
+
+/*
+ * VoiceEngine Android test application. It starts either auto test or acts like
+ * a GUI test.
+ */
+
+package org.webrtc.voiceengine.test;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.IOException;
+
+import android.app.Activity;
+import android.content.Context;
+import android.media.AudioFormat;
+import android.media.AudioManager;
+import android.media.AudioRecord;
+import android.media.AudioTrack;
+import android.media.MediaRecorder;
+import android.os.Bundle;
+import android.util.Log;
+import android.view.View;
+import android.widget.AdapterView;
+import android.widget.ArrayAdapter;
+import android.widget.Button;
+import android.widget.EditText;
+import android.widget.Spinner;
+import android.widget.TextView;
+
+public class AndroidTest extends Activity {
+ private byte[] _playBuffer = null;
+ private short[] _circBuffer = new short[8000]; // can hold 50 frames
+
+ private int _recIndex = 0;
+ private int _playIndex = 0;
+ // private int _streamVolume = 4;
+ private int _maxVolume = 0; // Android max level (commonly 5)
+ // VoE level (0-255), corresponds to level 4 out of 5
+ private int _volumeLevel = 204;
+
+ private Thread _playThread;
+ private Thread _recThread;
+ private Thread _autotestThread;
+
+ private static AudioTrack _at;
+ private static AudioRecord _ar;
+
+ private File _fr = null;
+ private FileInputStream _in = null;
+
+ private boolean _isRunningPlay = false;
+ private boolean _isRunningRec = false;
+ private boolean _settingSet = true;
+ private boolean _isCallActive = false;
+ private boolean _runAutotest = false; // ENABLE AUTOTEST HERE!
+
+ private int _channel = -1;
+ private int _codecIndex = 0;
+ private int _ecIndex = 0;
+ private int _nsIndex = 0;
+ private int _agcIndex = 0;
+ private int _vadIndex = 0;
+ private int _audioIndex = 3;
+ private int _settingMenu = 0;
+ private int _receivePort = 1234;
+ private int _destinationPort = 1234;
+ private String _destinationIP = "127.0.0.1";
+
+ // "Build" settings
+ private final boolean _playFromFile = false;
+ // Set to true to send data to native code and back
+ private final boolean _runThroughNativeLayer = true;
+ private final boolean enableSend = true;
+ private final boolean enableReceive = true;
+ private final boolean useNativeThread = false;
+
+ /** Called when the activity is first created. */
+ public void onCreate(Bundle savedInstanceState) {
+ super.onCreate(savedInstanceState);
+ setContentView(R.layout.main);
+
+ TextView tv = (TextView) findViewById(R.id.TextView01);
+ tv.setText("");
+
+ final EditText ed = (EditText) findViewById(R.id.EditText01);
+ ed.setWidth(200);
+ ed.setText(_destinationIP);
+
+ final Button buttonStart = (Button) findViewById(R.id.Button01);
+ buttonStart.setWidth(200);
+ if (_runAutotest) {
+ buttonStart.setText("Run test");
+ } else {
+ buttonStart.setText("Start Call");
+ }
+ // button.layout(50, 50, 100, 40);
+ buttonStart.setOnClickListener(new View.OnClickListener() {
+ public void onClick(View v) {
+
+ if (_runAutotest) {
+ startAutoTest();
+ } else {
+ if (_isCallActive) {
+
+ if (stopCall() != -1) {
+ _isCallActive = false;
+ buttonStart.setText("Start Call");
+ }
+ } else {
+
+ _destinationIP = ed.getText().toString();
+ if (startCall() != -1) {
+ _isCallActive = true;
+ buttonStart.setText("Stop Call");
+ }
+ }
+ }
+
+ // displayTextFromFile();
+ // recordAudioToFile();
+ // if(!_playFromFile)
+ // {
+ // recAudioInThread();
+ // }
+ // playAudioInThread();
+ }
+ });
+
+ final Button buttonStop = (Button) findViewById(R.id.Button02);
+ buttonStop.setWidth(200);
+ buttonStop.setText("Close app");
+ buttonStop.setOnClickListener(new View.OnClickListener() {
+ public void onClick(View v) {
+
+ if (!_runAutotest) {
+ ShutdownVoE();
+ }
+
+ // This call terminates and should close the activity
+ finish();
+
+ // playAudioFromFile();
+ // if(!_playFromFile)
+ // {
+ // stopRecAudio();
+ // }
+ // stopPlayAudio();
+ }
+ });
+
+
+ String ap1[] = {"EC off", "AECM"};
+ final ArrayAdapter<String> adapterAp1 = new ArrayAdapter<String>(
+ this,
+ android.R.layout.simple_spinner_dropdown_item,
+ ap1);
+ String ap2[] =
+ {"NS off", "NS low", "NS moderate", "NS high",
+ "NS very high"};
+ final ArrayAdapter<String> adapterAp2 = new ArrayAdapter<String>(
+ this,
+ android.R.layout.simple_spinner_dropdown_item,
+ ap2);
+ String ap3[] = {"AGC off", "AGC adaptive", "AGC fixed"};
+ final ArrayAdapter<String> adapterAp3 = new ArrayAdapter<String>(
+ this,
+ android.R.layout.simple_spinner_dropdown_item,
+ ap3);
+ String ap4[] =
+ {"VAD off", "VAD conventional", "VAD high rate",
+ "VAD mid rate", "VAD low rate"};
+ final ArrayAdapter<String> adapterAp4 = new ArrayAdapter<String>(
+ this,
+ android.R.layout.simple_spinner_dropdown_item,
+ ap4);
+ String codecs[] = {"iSAC", "PCMU", "PCMA", "iLBC"};
+ final ArrayAdapter<String> adapterCodecs = new ArrayAdapter<String>(
+ this,
+ android.R.layout.simple_spinner_dropdown_item,
+ codecs);
+
+ final Spinner spinnerSettings1 = (Spinner) findViewById(R.id.Spinner01);
+ final Spinner spinnerSettings2 = (Spinner) findViewById(R.id.Spinner02);
+ spinnerSettings1.setMinimumWidth(200);
+ String settings[] =
+ {"Codec", "Echo Control", "Noise Suppression",
+ "Automatic Gain Control",
+ "Voice Activity Detection"};
+ ArrayAdapter<String> adapterSettings1 = new ArrayAdapter<String>(
+ this,
+ android.R.layout.simple_spinner_dropdown_item,
+ settings);
+ spinnerSettings1.setAdapter(adapterSettings1);
+ spinnerSettings1.setOnItemSelectedListener(
+ new AdapterView.OnItemSelectedListener() {
+ public void onItemSelected(AdapterView adapterView, View view,
+ int position, long id) {
+
+ _settingMenu = position;
+ _settingSet = false;
+ if (position == 0) {
+ spinnerSettings2.setAdapter(adapterCodecs);
+ spinnerSettings2.setSelection(_codecIndex);
+ }
+ if (position == 1) {
+ spinnerSettings2.setAdapter(adapterAp1);
+ spinnerSettings2.setSelection(_ecIndex);
+ }
+ if (position == 2) {
+ spinnerSettings2.setAdapter(adapterAp2);
+ spinnerSettings2.setSelection(_nsIndex);
+ }
+ if (position == 3) {
+ spinnerSettings2.setAdapter(adapterAp3);
+ spinnerSettings2.setSelection(_agcIndex);
+ }
+ if (position == 4) {
+ spinnerSettings2.setAdapter(adapterAp4);
+ spinnerSettings2.setSelection(_vadIndex);
+ }
+ }
+
+ public void onNothingSelected(AdapterView adapterView) {
+ WebrtcLog("No setting1 selected");
+ }
+ });
+
+ spinnerSettings2.setMinimumWidth(200);
+ ArrayAdapter<String> adapterSettings2 = new ArrayAdapter<String>(
+ this,
+ android.R.layout.simple_spinner_dropdown_item,
+ codecs);
+ spinnerSettings2.setAdapter(adapterSettings2);
+ spinnerSettings2.setOnItemSelectedListener(
+ new AdapterView.OnItemSelectedListener() {
+ public void onItemSelected(AdapterView adapterView, View view,
+ int position, long id) {
+
+ // avoid unintentional setting
+ if (_settingSet == false) {
+ _settingSet = true;
+ return;
+ }
+
+ // Change volume
+ if (_settingMenu == 0) {
+ WebrtcLog("Selected audio " + position);
+ setAudioProperties(position);
+ spinnerSettings2.setSelection(_audioIndex);
+ }
+
+ // Change codec
+ if (_settingMenu == 1) {
+ _codecIndex = position;
+ WebrtcLog("Selected codec " + position);
+ if (0 != SetSendCodec(_channel, _codecIndex)) {
+ WebrtcLog("VoE set send codec failed");
+ }
+ }
+
+ // Change EC
+ if (_settingMenu == 2) {
+ boolean enable = true;
+ int ECmode = 5; // AECM
+ int AESmode = 0;
+
+ _ecIndex = position;
+ WebrtcLog("Selected EC " + position);
+
+ if (position == 0) {
+ enable = false;
+ }
+ if (position > 1) {
+ ECmode = 4; // AES
+ AESmode = position - 1;
+ }
+
+ if (0 != SetECStatus(enable, ECmode)) {
+ WebrtcLog("VoE set EC status failed");
+ }
+ }
+
+ // Change NS
+ if (_settingMenu == 3) {
+ boolean enable = true;
+
+ _nsIndex = position;
+ WebrtcLog("Selected NS " + position);
+
+ if (position == 0) {
+ enable = false;
+ }
+ if (0 != SetNSStatus(enable, position + 2)) {
+ WebrtcLog("VoE set NS status failed");
+ }
+ }
+
+ // Change AGC
+ if (_settingMenu == 4) {
+ boolean enable = true;
+
+ _agcIndex = position;
+ WebrtcLog("Selected AGC " + position);
+
+ if (position == 0) {
+ enable = false;
+ position = 1; // default
+ }
+ if (0 != SetAGCStatus(enable, position + 2)) {
+ WebrtcLog("VoE set AGC status failed");
+ }
+ }
+
+ // Change VAD
+ if (_settingMenu == 5) {
+ boolean enable = true;
+
+ _vadIndex = position;
+ WebrtcLog("Selected VAD " + position);
+
+ if (position == 0) {
+ enable = false;
+ position++;
+ }
+ if (0 != SetVADStatus(_channel, enable, position - 1)) {
+ WebrtcLog("VoE set VAD status failed");
+ }
+ }
+ }
+
+ public void onNothingSelected(AdapterView adapterView) {
+ }
+ });
+
+ // Setup VoiceEngine
+ if (!_runAutotest && !useNativeThread) SetupVoE();
+
+ // Suggest to use the voice call audio stream for hardware volume
+ // controls
+ setVolumeControlStream(AudioManager.STREAM_VOICE_CALL);
+
+ // Get max Android volume and adjust default volume to map exactly to an
+ // Android level
+ AudioManager am =
+ (AudioManager) getSystemService(Context.AUDIO_SERVICE);
+ _maxVolume = am.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
+ if (_maxVolume <= 0) {
+ WebrtcLog("Could not get max volume!");
+ } else {
+ int androidVolumeLevel = (_volumeLevel * _maxVolume) / 255;
+ _volumeLevel = (androidVolumeLevel * 255) / _maxVolume;
+ }
+
+ WebrtcLog("Started Webrtc Android Test");
+ }
+
+ // Will be called when activity is shutdown.
+ // NOTE: Activity may be killed without this function being called,
+ // but then we should not need to clean up.
+ protected void onDestroy() {
+ super.onDestroy();
+ // ShutdownVoE();
+ }
+
+ private void SetupVoE() {
+ // Create VoiceEngine
+ Create(); // Error logging is done in native API wrapper
+
+ // Initialize
+ if (0 != Init(false, false)) {
+ WebrtcLog("VoE init failed");
+ }
+
+ // Create channel
+ _channel = CreateChannel();
+ if (0 != _channel) {
+ WebrtcLog("VoE create channel failed");
+ }
+
+ }
+
+ private void ShutdownVoE() {
+ // Delete channel
+ if (0 != DeleteChannel(_channel)) {
+ WebrtcLog("VoE delete channel failed");
+ }
+
+ // Terminate
+ if (0 != Terminate()) {
+ WebrtcLog("VoE terminate failed");
+ }
+
+ // Delete VoiceEngine
+ Delete(); // Error logging is done in native API wrapper
+ }
+
+ int startCall() {
+
+ if (useNativeThread == true) {
+
+ Create();
+ return 0;
+ }
+
+ if (enableReceive == true) {
+ // Set local receiver
+ if (0 != SetLocalReceiver(_channel, _receivePort)) {
+ WebrtcLog("VoE set local receiver failed");
+ }
+
+ if (0 != StartListen(_channel)) {
+ WebrtcLog("VoE start listen failed");
+ return -1;
+ }
+
+ // Route audio to earpiece
+ if (0 != SetLoudspeakerStatus(false)) {
+ WebrtcLog("VoE set louspeaker status failed");
+ return -1;
+ }
+
+ /*
+ * WebrtcLog("VoE start record now"); if (0 !=
+ * StartRecordingPlayout(_channel, "/sdcard/singleUserDemoOut.pcm",
+ * false)) { WebrtcLog("VoE Recording Playout failed"); }
+ * WebrtcLog("VoE start Recording Playout end");
+ */
+ // Start playout
+ if (0 != StartPlayout(_channel)) {
+ WebrtcLog("VoE start playout failed");
+ return -1;
+ }
+
+ // Start playout file
+ // if (0 != StartPlayingFileLocally(_channel,
+ // "/sdcard/singleUserDemo.pcm", true)) {
+ // WebrtcLog("VoE start playout file failed");
+ // return -1;
+ // }
+ }
+
+ if (enableSend == true) {
+ if (0 != SetSendDestination(_channel, _destinationPort,
+ _destinationIP)) {
+ WebrtcLog("VoE set send destination failed");
+ return -1;
+ }
+
+ if (0 != SetSendCodec(_channel, _codecIndex)) {
+ WebrtcLog("VoE set send codec failed");
+ return -1;
+ }
+
+ /*
+ * if (0 != StartPlayingFileAsMicrophone(_channel,
+ * "/sdcard/singleUserDemo.pcm", true)) {
+ * WebrtcLog("VoE start playing file as microphone failed"); }
+ */
+ if (0 != StartSend(_channel)) {
+ WebrtcLog("VoE start send failed");
+ return -1;
+ }
+
+ // if (0 != StartPlayingFileAsMicrophone(_channel,
+ // "/sdcard/singleUserDemo.pcm", true)) {
+ // WebrtcLog("VoE start playing file as microphone failed");
+ // return -1;
+ // }
+ }
+
+ return 0;
+ }
+
+ int stopCall() {
+
+ if (useNativeThread == true) {
+
+ Delete();
+ return 0;
+ }
+
+ if (enableSend == true) {
+ // Stop playing file as microphone
+ /*
+ * if (0 != StopPlayingFileAsMicrophone(_channel)) {
+ * WebrtcLog("VoE stop playing file as microphone failed"); return
+ * -1; }
+ */
+ // Stop send
+ if (0 != StopSend(_channel)) {
+ WebrtcLog("VoE stop send failed");
+ return -1;
+ }
+ }
+
+ if (enableReceive == true) {
+ // if (0 != StopRecordingPlayout(_channel)) {
+ // WebrtcLog("VoE stop Recording Playout failed");
+ // }
+ // WebrtcLog("VoE stop Recording Playout ended");
+
+ // Stop listen
+ if (0 != StopListen(_channel)) {
+ WebrtcLog("VoE stop listen failed");
+ return -1;
+ }
+
+ // Stop playout file
+ // if (0 != StopPlayingFileLocally(_channel)) {
+ // WebrtcLog("VoE stop playout file failed");
+ // return -1;
+ // }
+
+ // Stop playout
+ if (0 != StopPlayout(_channel)) {
+ WebrtcLog("VoE stop playout failed");
+ return -1;
+ }
+
+ // Route audio to loudspeaker
+ if (0 != SetLoudspeakerStatus(true)) {
+ WebrtcLog("VoE set louspeaker status failed");
+ return -1;
+ }
+ }
+
+ return 0;
+ }
+
+ int startAutoTest() {
+
+ _autotestThread = new Thread(_autotestProc);
+ _autotestThread.start();
+
+ return 0;
+ }
+
+ private Runnable _autotestProc = new Runnable() {
+ public void run() {
+ // TODO(xians): choose test from GUI
+ // 1 = standard, not used
+ // 2 = extended, 2 = base
+ RunAutoTest(1, 2);
+ }
+ };
+
+ int setAudioProperties(int val) {
+
+ // AudioManager am = (AudioManager)
+ // getSystemService(Context.AUDIO_SERVICE);
+
+ if (val == 0) {
+ // _streamVolume =
+ // am.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
+ // am.setStreamVolume(AudioManager.STREAM_VOICE_CALL,
+ // (_streamVolume+1), 0);
+
+ int androidVolumeLevel = (_volumeLevel * _maxVolume) / 255;
+ if (androidVolumeLevel < _maxVolume) {
+ _volumeLevel = ((androidVolumeLevel + 1) * 255) / _maxVolume;
+ if (0 != SetSpeakerVolume(_volumeLevel)) {
+ WebrtcLog("VoE set speaker volume failed");
+ }
+ }
+ } else if (val == 1) {
+ // _streamVolume =
+ // am.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
+ // am.setStreamVolume(AudioManager.STREAM_VOICE_CALL,
+ // (_streamVolume-1), 0);
+
+ int androidVolumeLevel = (_volumeLevel * _maxVolume) / 255;
+ if (androidVolumeLevel > 0) {
+ _volumeLevel = ((androidVolumeLevel - 1) * 255) / _maxVolume;
+ if (0 != SetSpeakerVolume(_volumeLevel)) {
+ WebrtcLog("VoE set speaker volume failed");
+ }
+ }
+ } else if (val == 2) {
+ // route audio to back speaker
+ if (0 != SetLoudspeakerStatus(true)) {
+ WebrtcLog("VoE set loudspeaker status failed");
+ }
+ _audioIndex = 2;
+ } else if (val == 3) {
+ // route audio to earpiece
+ if (0 != SetLoudspeakerStatus(false)) {
+ WebrtcLog("VoE set loudspeaker status failed");
+ }
+ _audioIndex = 3;
+ }
+
+ return 0;
+ }
+
+ int displayTextFromFile() {
+
+ TextView tv = (TextView) findViewById(R.id.TextView01);
+ FileReader fr = null;
+ char[] fileBuffer = new char[64];
+
+ try {
+ fr = new FileReader("/sdcard/test.txt");
+ } catch (FileNotFoundException e) {
+ e.printStackTrace();
+ tv.setText("File not found!");
+ }
+
+ try {
+ fr.read(fileBuffer);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+
+ String readString = new String(fileBuffer);
+ tv.setText(readString);
+ // setContentView(tv);
+
+ return 0;
+ }
+
+ int recordAudioToFile() {
+ File fr = null;
+ // final to be reachable within onPeriodicNotification
+ byte[] recBuffer = new byte[320];
+
+ int recBufSize =
+ AudioRecord.getMinBufferSize(16000,
+ AudioFormat.CHANNEL_CONFIGURATION_MONO,
+ AudioFormat.ENCODING_PCM_16BIT);
+ AudioRecord rec =
+ new AudioRecord(MediaRecorder.AudioSource.MIC, 16000,
+ AudioFormat.CHANNEL_CONFIGURATION_MONO,
+ AudioFormat.ENCODING_PCM_16BIT,
+ recBufSize);
+
+ fr = new File("/sdcard/record.pcm");
+ FileOutputStream out = null;
+ try {
+ out = new FileOutputStream(fr);
+ } catch (FileNotFoundException e1) {
+ e1.printStackTrace();
+ }
+
+ // start recording
+ try {
+ rec.startRecording();
+ } catch (IllegalStateException e) {
+ e.printStackTrace();
+ }
+
+ for (int i = 0; i < 550; i++) {
+ // note, there is a short version of write as well!
+ int wrBytes = rec.read(recBuffer, 0, 320);
+
+ try {
+ out.write(recBuffer);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ // stop playout
+ try {
+ rec.stop();
+ } catch (IllegalStateException e) {
+ e.printStackTrace();
+ }
+
+ return 0;
+ }
+
+ int playAudioFromFile() {
+
+ File fr = null;
+ // final to be reachable within onPeriodicNotification
+ // final byte[] playBuffer = new byte [320000];
+ // final to be reachable within onPeriodicNotification
+ final byte[] playBuffer = new byte[320];
+
+ final int playBufSize =
+ AudioTrack.getMinBufferSize(16000,
+ AudioFormat.CHANNEL_CONFIGURATION_MONO,
+ AudioFormat.ENCODING_PCM_16BIT);
+ // final int playBufSize = 1920; // 100 ms buffer
+ // byte[] playBuffer = new byte [playBufSize];
+ final AudioTrack play =
+ new AudioTrack(AudioManager.STREAM_VOICE_CALL, 16000,
+ AudioFormat.CHANNEL_CONFIGURATION_MONO,
+ AudioFormat.ENCODING_PCM_16BIT,
+ playBufSize, AudioTrack.MODE_STREAM);
+
+ // implementation of the playpos callback functions
+ play.setPlaybackPositionUpdateListener(
+ new AudioTrack.OnPlaybackPositionUpdateListener() {
+
+ int count = 0;
+
+ public void onPeriodicNotification(AudioTrack track) {
+ // int wrBytes = play.write(playBuffer, count, 320);
+ count += 320;
+ }
+
+ public void onMarkerReached(AudioTrack track) {
+
+ }
+ });
+
+ // set the notification period = 160 samples
+ // int ret = play.setPositionNotificationPeriod(160);
+
+ fr = new File("/sdcard/record.pcm");
+ FileInputStream in = null;
+ try {
+ in = new FileInputStream(fr);
+ } catch (FileNotFoundException e1) {
+ e1.printStackTrace();
+ }
+
+ // try {
+ // in.read(playBuffer);
+ // } catch (IOException e) {
+ // e.printStackTrace();
+ // }
+
+ // play all at once
+ // int wrBytes = play.write(playBuffer, 0, 320000);
+
+
+ // start playout
+ try {
+ play.play();
+ } catch (IllegalStateException e) {
+ e.printStackTrace();
+ }
+
+ // returns the number of samples that has been written
+ // int headPos = play.getPlaybackHeadPosition();
+
+ // play with multiple writes
+ for (int i = 0; i < 500; i++) {
+ try {
+ in.read(playBuffer);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+
+
+ // note, there is a short version of write as well!
+ int wrBytes = play.write(playBuffer, 0, 320);
+
+ Log.d("testWrite", "wrote");
+ }
+
+ // stop playout
+ try {
+ play.stop();
+ } catch (IllegalStateException e) {
+ e.printStackTrace();
+ }
+
+ return 0;
+ }
+
+ int playAudioInThread() {
+
+ if (_isRunningPlay) {
+ return 0;
+ }
+
+ // File fr = null;
+ // final byte[] playBuffer = new byte[320];
+ if (_playFromFile) {
+ _playBuffer = new byte[320];
+ } else {
+ // reset index
+ _playIndex = 0;
+ }
+ // within
+ // onPeriodicNotification
+
+ // Log some info (static)
+ WebrtcLog("Creating AudioTrack object");
+ final int minPlayBufSize =
+ AudioTrack.getMinBufferSize(16000,
+ AudioFormat.CHANNEL_CONFIGURATION_MONO,
+ AudioFormat.ENCODING_PCM_16BIT);
+ WebrtcLog("Min play buf size = " + minPlayBufSize);
+ WebrtcLog("Min volume = " + AudioTrack.getMinVolume());
+ WebrtcLog("Max volume = " + AudioTrack.getMaxVolume());
+ WebrtcLog("Native sample rate = "
+ + AudioTrack.getNativeOutputSampleRate(
+ AudioManager.STREAM_VOICE_CALL));
+
+ final int playBufSize = minPlayBufSize; // 3200; // 100 ms buffer
+ // byte[] playBuffer = new byte [playBufSize];
+ try {
+ _at = new AudioTrack(
+ AudioManager.STREAM_VOICE_CALL,
+ 16000,
+ AudioFormat.CHANNEL_CONFIGURATION_MONO,
+ AudioFormat.ENCODING_PCM_16BIT,
+ playBufSize, AudioTrack.MODE_STREAM);
+ } catch (Exception e) {
+ WebrtcLog(e.getMessage());
+ }
+
+ // Log some info (non-static)
+ WebrtcLog("Notification marker pos = "
+ + _at.getNotificationMarkerPosition());
+ WebrtcLog("Play head pos = " + _at.getPlaybackHeadPosition());
+ WebrtcLog("Pos notification dt = "
+ + _at.getPositionNotificationPeriod());
+ WebrtcLog("Playback rate = " + _at.getPlaybackRate());
+ WebrtcLog("Sample rate = " + _at.getSampleRate());
+
+ // implementation of the playpos callback functions
+ // _at.setPlaybackPositionUpdateListener(
+ // new AudioTrack.OnPlaybackPositionUpdateListener() {
+ //
+ // int count = 3200;
+ //
+ // public void onPeriodicNotification(AudioTrack track) {
+ // // int wrBytes = play.write(playBuffer, count, 320);
+ // count += 320;
+ // }
+ //
+ // public void onMarkerReached(AudioTrack track) {
+ // }
+ // });
+
+ // set the notification period = 160 samples
+ // int ret = _at.setPositionNotificationPeriod(160);
+
+ if (_playFromFile) {
+ _fr = new File("/sdcard/singleUserDemo.pcm");
+ try {
+ _in = new FileInputStream(_fr);
+ } catch (FileNotFoundException e1) {
+ e1.printStackTrace();
+ }
+ }
+
+ // try {
+ // in.read(playBuffer);
+ // } catch (IOException e) {
+ // e.printStackTrace();
+ // }
+
+ _isRunningPlay = true;
+
+ // buffer = new byte[3200];
+ _playThread = new Thread(_playProc);
+ // ar.startRecording();
+ // bytesRead = 3200;
+ // recording = true;
+ _playThread.start();
+
+ return 0;
+ }
+
+ int stopPlayAudio() {
+ if (!_isRunningPlay) {
+ return 0;
+ }
+
+ _isRunningPlay = false;
+
+ return 0;
+ }
+
+ private Runnable _playProc = new Runnable() {
+ public void run() {
+
+ // set high thread priority
+ android.os.Process.setThreadPriority(
+ android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
+
+ // play all at once
+ // int wrBytes = play.write(playBuffer, 0, 320000);
+
+ // fill the buffer
+ // play.write(playBuffer, 0, 3200);
+
+ // play.flush();
+
+ // start playout
+ try {
+ _at.play();
+ } catch (IllegalStateException e) {
+ e.printStackTrace();
+ }
+
+ // play with multiple writes
+ int i = 0;
+ for (; i < 3000 && _isRunningPlay; i++) {
+
+ if (_playFromFile) {
+ try {
+ _in.read(_playBuffer);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+
+ int wrBytes = _at.write(_playBuffer, 0 /* i * 320 */, 320);
+ } else {
+ int wrSamples =
+ _at.write(_circBuffer, _playIndex * 160,
+ 160);
+
+ // WebrtcLog("Played 10 ms from buffer, _playIndex = " +
+ // _playIndex);
+ // WebrtcLog("Diff = " + (_recIndex - _playIndex));
+
+ if (_playIndex == 49) {
+ _playIndex = 0;
+ } else {
+ _playIndex += 1;
+ }
+ }
+
+ // WebrtcLog("Wrote 10 ms to buffer, head = "
+ // + _at.getPlaybackHeadPosition());
+ }
+
+ // stop playout
+ try {
+ _at.stop();
+ } catch (IllegalStateException e) {
+ e.printStackTrace();
+ }
+
+ // returns the number of samples that has been written
+ WebrtcLog("Test stopped, i = " + i + ", head = "
+ + _at.getPlaybackHeadPosition());
+ int headPos = _at.getPlaybackHeadPosition();
+
+ // flush the buffers
+ _at.flush();
+
+ // release the object
+ _at.release();
+ _at = null;
+
+ // try {
+ // Thread.sleep() must be within a try - catch block
+ // Thread.sleep(3000);
+ // }catch (Exception e){
+ // System.out.println(e.getMessage());
+ // }
+
+ _isRunningPlay = false;
+
+ }
+ };
+
+ int recAudioInThread() {
+
+ if (_isRunningRec) {
+ return 0;
+ }
+
+ // within
+ // onPeriodicNotification
+
+ // reset index
+ _recIndex = 20;
+
+ // Log some info (static)
+ WebrtcLog("Creating AudioRecord object");
+ final int minRecBufSize = AudioRecord.getMinBufferSize(16000,
+ AudioFormat.CHANNEL_CONFIGURATION_MONO,
+ AudioFormat.ENCODING_PCM_16BIT);
+ WebrtcLog("Min rec buf size = " + minRecBufSize);
+ // WebrtcLog("Min volume = " + AudioTrack.getMinVolume());
+ // WebrtcLog("Max volume = " + AudioTrack.getMaxVolume());
+ // WebrtcLog("Native sample rate = "
+ // + AudioRecord
+ // .getNativeInputSampleRate(AudioManager.STREAM_VOICE_CALL));
+
+ final int recBufSize = minRecBufSize; // 3200; // 100 ms buffer
+ try {
+ _ar = new AudioRecord(
+ MediaRecorder.AudioSource.MIC,
+ 16000,
+ AudioFormat.CHANNEL_CONFIGURATION_MONO,
+ AudioFormat.ENCODING_PCM_16BIT,
+ recBufSize);
+ } catch (Exception e) {
+ WebrtcLog(e.getMessage());
+ }
+
+ // Log some info (non-static)
+ WebrtcLog("Notification marker pos = "
+ + _ar.getNotificationMarkerPosition());
+ // WebrtcLog("Play head pos = " + _ar.getRecordHeadPosition());
+ WebrtcLog("Pos notification dt rec= "
+ + _ar.getPositionNotificationPeriod());
+ // WebrtcLog("Playback rate = " + _ar.getRecordRate());
+ // WebrtcLog("Playback rate = " + _ar.getPlaybackRate());
+ WebrtcLog("Sample rate = " + _ar.getSampleRate());
+ // WebrtcLog("Playback rate = " + _ar.getPlaybackRate());
+ // WebrtcLog("Playback rate = " + _ar.getPlaybackRate());
+
+ _isRunningRec = true;
+
+ _recThread = new Thread(_recProc);
+
+ _recThread.start();
+
+ return 0;
+ }
+
+ int stopRecAudio() {
+ if (!_isRunningRec) {
+ return 0;
+ }
+
+ _isRunningRec = false;
+
+ return 0;
+ }
+
+ private Runnable _recProc = new Runnable() {
+ public void run() {
+
+ // set high thread priority
+ android.os.Process.setThreadPriority(
+ android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
+
+ // start recording
+ try {
+ _ar.startRecording();
+ } catch (IllegalStateException e) {
+ e.printStackTrace();
+ }
+
+ // keep recording to circular buffer
+ // for a while
+ int i = 0;
+ int rdSamples = 0;
+ short[] tempBuffer = new short[160]; // Only used for native case
+
+ for (; i < 3000 && _isRunningRec; i++) {
+ if (_runThroughNativeLayer) {
+ rdSamples = _ar.read(tempBuffer, 0, 160);
+ // audioLoop(tempBuffer, 160); // Insert into native layer
+ } else {
+ rdSamples = _ar.read(_circBuffer, _recIndex * 160, 160);
+
+ // WebrtcLog("Recorded 10 ms to buffer, _recIndex = " +
+ // _recIndex);
+ // WebrtcLog("rdSamples = " + rdSamples);
+
+ if (_recIndex == 49) {
+ _recIndex = 0;
+ } else {
+ _recIndex += 1;
+ }
+ }
+ }
+
+ // stop recording
+ try {
+ _ar.stop();
+ } catch (IllegalStateException e) {
+ e.printStackTrace();
+ }
+
+ // release the object
+ _ar.release();
+ _ar = null;
+
+ // try {
+ // Thread.sleep() must be within a try - catch block
+ // Thread.sleep(3000);
+ // }catch (Exception e){
+ // System.out.println(e.getMessage());
+ // }
+
+ _isRunningRec = false;
+
+ // returns the number of samples that has been written
+ // WebrtcLog("Test stopped, i = " + i + ", head = "
+ // + _at.getPlaybackHeadPosition());
+ // int headPos = _at.getPlaybackHeadPosition();
+ }
+ };
+
+ private void WebrtcLog(String msg) {
+ Log.d("*Webrtc*", msg);
+ }
+
+ // //////////////// Native function prototypes ////////////////////
+
+ private native static boolean NativeInit();
+
+ private native int RunAutoTest(int testType, int extendedSel);
+
+ private native boolean Create();
+
+ private native boolean Delete();
+
+ private native int Init(boolean enableTrace, boolean useExtTrans);
+
+ private native int Terminate();
+
+ private native int CreateChannel();
+
+ private native int DeleteChannel(int channel);
+
+ private native int SetLocalReceiver(int channel, int port);
+
+ private native int SetSendDestination(int channel, int port,
+ String ipaddr);
+
+ private native int StartListen(int channel);
+
+ private native int StartPlayout(int channel);
+
+ private native int StartSend(int channel);
+
+ private native int StopListen(int channel);
+
+ private native int StopPlayout(int channel);
+
+ private native int StopSend(int channel);
+
+ private native int StartPlayingFileLocally(int channel, String fileName,
+ boolean loop);
+
+ private native int StopPlayingFileLocally(int channel);
+
+ private native int StartRecordingPlayout(int channel, String fileName,
+ boolean loop);
+
+ private native int StopRecordingPlayout(int channel);
+
+ private native int StartPlayingFileAsMicrophone(int channel,
+ String fileName, boolean loop);
+
+ private native int StopPlayingFileAsMicrophone(int channel);
+
+ private native int NumOfCodecs();
+
+ private native int SetSendCodec(int channel, int index);
+
+ private native int SetVADStatus(int channel, boolean enable, int mode);
+
+ private native int SetNSStatus(boolean enable, int mode);
+
+ private native int SetAGCStatus(boolean enable, int mode);
+
+ private native int SetECStatus(boolean enable, int mode);
+
+ private native int SetSpeakerVolume(int volume);
+
+ private native int SetLoudspeakerStatus(boolean enable);
+
+ /*
+ * this is used to load the 'webrtc-voice-demo-jni'
+ * library on application startup.
+ * The library has already been unpacked into
+ * /data/data/webrtc.android.AndroidTest/lib/libwebrtc-voice-demo-jni.so
+ * at installation time by the package manager.
+ */
+ static {
+ Log.d("*Webrtc*", "Loading webrtc-voice-demo-jni...");
+ System.loadLibrary("webrtc-voice-demo-jni");
+
+ Log.d("*Webrtc*", "Calling native init...");
+ if (!NativeInit()) {
+ Log.e("*Webrtc*", "Native init failed");
+ throw new RuntimeException("Native init failed");
+ } else {
+ Log.d("*Webrtc*", "Native init successful");
+ }
+ }
+}
diff --git a/voice_engine/test/auto_test/Android.mk b/voice_engine/test/auto_test/Android.mk
new file mode 100644
index 0000000..cfb9d67
--- /dev/null
+++ b/voice_engine/test/auto_test/Android.mk
@@ -0,0 +1,56 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH:= $(call my-dir)
+
+# voice engine test app
+
+include $(CLEAR_VARS)
+
+include $(LOCAL_PATH)/../../../../../android-webrtc.mk
+
+LOCAL_MODULE_TAGS := tests
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_SRC_FILES:= \
+ automated_mode.cc \
+ voe_cpu_test.cc \
+ voe_standard_test.cc \
+ voe_stress_test.cc \
+ voe_unit_test.cc \
+ voe_extended_test.cc \
+ voe_standard_integration_test.cc
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+ '-DWEBRTC_TARGET_PC' \
+ '-DWEBRTC_LINUX' \
+ '-DWEBRTC_ANDROID'
+
+LOCAL_C_INCLUDES := \
+ $(LOCAL_PATH)/../../interface \
+ $(LOCAL_PATH)/../../../.. \
+ $(LOCAL_PATH)/../../../../modules/audio_device/main/interface \
+ $(LOCAL_PATH)/../../../../modules/interface \
+ $(LOCAL_PATH)/../../../../system_wrappers/interface \
+ $(LOCAL_PATH)/../../../../../test \
+ external/gtest/include \
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils \
+ libstlport \
+ libwebrtc
+
+LOCAL_MODULE:= webrtc_voe_autotest
+
+ifdef NDK_ROOT
+include $(BUILD_EXECUTABLE)
+else
+include external/stlport/libstlport.mk
+include $(BUILD_NATIVE_TEST)
+endif
+
diff --git a/voice_engine/test/auto_test/automated_mode.cc b/voice_engine/test/auto_test/automated_mode.cc
new file mode 100644
index 0000000..a8b6244
--- /dev/null
+++ b/voice_engine/test/auto_test/automated_mode.cc
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "gtest/gtest.h"
+
+void InitializeGoogleTest(int* argc, char** argv) {
+ testing::InitGoogleTest(argc, argv);
+}
+
+int RunInAutomatedMode() {
+ return RUN_ALL_TESTS();
+}
diff --git a/voice_engine/test/auto_test/automated_mode.h b/voice_engine/test/auto_test/automated_mode.h
new file mode 100644
index 0000000..599f021
--- /dev/null
+++ b/voice_engine/test/auto_test/automated_mode.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_AUTOMATED_MODE_H_
+#define SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_AUTOMATED_MODE_H_
+
+void InitializeGoogleTest(int* argc, char** argv);
+int RunInAutomatedMode();
+
+#endif // SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_AUTOMATED_MODE_H_
diff --git a/voice_engine/test/auto_test/extended/agc_config_test.cc b/voice_engine/test/auto_test/extended/agc_config_test.cc
new file mode 100644
index 0000000..ee7e062
--- /dev/null
+++ b/voice_engine/test/auto_test/extended/agc_config_test.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
+
+class AgcConfigTest : public AfterStreamingFixture {
+ protected:
+ void SetUp() {
+ // These should be defaults for the AGC config.
+ default_agc_config_.digitalCompressionGaindB = 9;
+ default_agc_config_.limiterEnable = true;
+ default_agc_config_.targetLeveldBOv = 3;
+ }
+
+ webrtc::AgcConfig default_agc_config_;
+};
+
+TEST_F(AgcConfigTest, HasCorrectDefaultConfiguration) {
+ webrtc::AgcConfig agc_config;
+
+ EXPECT_EQ(0, voe_apm_->GetAgcConfig(agc_config));
+
+ EXPECT_EQ(default_agc_config_.targetLeveldBOv, agc_config.targetLeveldBOv);
+ EXPECT_EQ(default_agc_config_.digitalCompressionGaindB,
+ agc_config.digitalCompressionGaindB);
+ EXPECT_EQ(default_agc_config_.limiterEnable, agc_config.limiterEnable);
+}
+
+TEST_F(AgcConfigTest, DealsWithInvalidParameters) {
+ webrtc::AgcConfig agc_config = default_agc_config_;
+ agc_config.digitalCompressionGaindB = 91;
+ EXPECT_EQ(-1, voe_apm_->SetAgcConfig(agc_config)) << "Should not be able "
+ "to set gain to more than 90 dB.";
+ EXPECT_EQ(VE_APM_ERROR, voe_base_->LastError());
+
+ agc_config = default_agc_config_;
+ agc_config.targetLeveldBOv = 32;
+ EXPECT_EQ(-1, voe_apm_->SetAgcConfig(agc_config)) << "Should not be able "
+ "to set target level to more than 31.";
+ EXPECT_EQ(VE_APM_ERROR, voe_base_->LastError());
+}
+
+TEST_F(AgcConfigTest, CanGetAndSetAgcStatus) {
+ webrtc::AgcConfig agc_config;
+ agc_config.digitalCompressionGaindB = 17;
+ agc_config.targetLeveldBOv = 11;
+ agc_config.limiterEnable = false;
+
+ webrtc::AgcConfig actual_config;
+ EXPECT_EQ(0, voe_apm_->SetAgcConfig(agc_config));
+ EXPECT_EQ(0, voe_apm_->GetAgcConfig(actual_config));
+
+ EXPECT_EQ(agc_config.digitalCompressionGaindB,
+ actual_config.digitalCompressionGaindB);
+ EXPECT_EQ(agc_config.limiterEnable,
+ actual_config.limiterEnable);
+ EXPECT_EQ(agc_config.targetLeveldBOv,
+ actual_config.targetLeveldBOv);
+}
+
+TEST_F(AgcConfigTest, HasCorrectDefaultRxConfiguration) {
+ webrtc::AgcConfig agc_config;
+
+ EXPECT_EQ(0, voe_apm_->GetRxAgcConfig(channel_, agc_config));
+
+ EXPECT_EQ(default_agc_config_.targetLeveldBOv, agc_config.targetLeveldBOv);
+ EXPECT_EQ(default_agc_config_.digitalCompressionGaindB,
+ agc_config.digitalCompressionGaindB);
+ EXPECT_EQ(default_agc_config_.limiterEnable, agc_config.limiterEnable);
+}
+
+TEST_F(AgcConfigTest, DealsWithInvalidRxParameters) {
+ webrtc::AgcConfig agc_config = default_agc_config_;
+ agc_config.digitalCompressionGaindB = 91;
+ EXPECT_EQ(-1, voe_apm_->SetRxAgcConfig(channel_, agc_config)) <<
+ "Should not be able to set RX gain to more than 90 dB.";
+ EXPECT_EQ(VE_APM_ERROR, voe_base_->LastError());
+
+ agc_config = default_agc_config_;
+ agc_config.targetLeveldBOv = 32;
+ EXPECT_EQ(-1, voe_apm_->SetRxAgcConfig(channel_, agc_config)) <<
+ "Should not be able to set target level to more than 31.";
+ EXPECT_EQ(VE_APM_ERROR, voe_base_->LastError());
+}
+
+TEST_F(AgcConfigTest, CanGetAndSetRxAgcStatus) {
+ webrtc::AgcConfig agc_config;
+ agc_config.digitalCompressionGaindB = 17;
+ agc_config.targetLeveldBOv = 11;
+ agc_config.limiterEnable = false;
+
+ webrtc::AgcConfig actual_config;
+ EXPECT_EQ(0, voe_apm_->SetRxAgcConfig(channel_, agc_config));
+ EXPECT_EQ(0, voe_apm_->GetRxAgcConfig(channel_, actual_config));
+
+ EXPECT_EQ(agc_config.digitalCompressionGaindB,
+ actual_config.digitalCompressionGaindB);
+ EXPECT_EQ(agc_config.limiterEnable,
+ actual_config.limiterEnable);
+ EXPECT_EQ(agc_config.targetLeveldBOv,
+ actual_config.targetLeveldBOv);
+}
diff --git a/voice_engine/test/auto_test/extended/ec_metrics_test.cc b/voice_engine/test/auto_test/extended/ec_metrics_test.cc
new file mode 100644
index 0000000..2d60d0d
--- /dev/null
+++ b/voice_engine/test/auto_test/extended/ec_metrics_test.cc
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
+
+class EcMetricsTest : public AfterStreamingFixture {
+};
+
+TEST_F(EcMetricsTest, EcMetricsAreOffByDefault) {
+ bool enabled = true;
+ EXPECT_EQ(0, voe_apm_->GetEcMetricsStatus(enabled));
+ EXPECT_FALSE(enabled);
+}
+
+TEST_F(EcMetricsTest, CanEnableAndDisableEcMetrics) {
+ EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(true));
+ bool ec_on = false;
+ EXPECT_EQ(0, voe_apm_->GetEcMetricsStatus(ec_on));
+ ASSERT_TRUE(ec_on);
+ EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(false));
+ EXPECT_EQ(0, voe_apm_->GetEcMetricsStatus(ec_on));
+ ASSERT_FALSE(ec_on);
+}
+
+TEST_F(EcMetricsTest, ManualTestEcMetrics) {
+ SwitchToManualMicrophone();
+
+ EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(true));
+
+ // Must enable AEC to get valid echo metrics.
+ EXPECT_EQ(0, voe_apm_->SetEcStatus(true, webrtc::kEcAec));
+
+ TEST_LOG("Speak into microphone and check metrics for 5 seconds...\n");
+ int erl, erle, rerl, a_nlp;
+ int delay_median = 0;
+ int delay_std = 0;
+
+ for (int i = 0; i < 5; i++) {
+ Sleep(1000);
+ EXPECT_EQ(0, voe_apm_->GetEchoMetrics(erl, erle, rerl, a_nlp));
+ EXPECT_EQ(0, voe_apm_->GetEcDelayMetrics(delay_median, delay_std));
+ TEST_LOG(" Echo : ERL=%5d, ERLE=%5d, RERL=%5d, A_NLP=%5d [dB], "
+ " delay median=%3d, delay std=%3d [ms]\n", erl, erle, rerl, a_nlp,
+ delay_median, delay_std);
+ }
+
+ EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(false));
+}
+
+TEST_F(EcMetricsTest, GetEcMetricsFailsIfEcNotEnabled) {
+ int dummy = 0;
+ EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(true));
+ EXPECT_EQ(-1, voe_apm_->GetEchoMetrics(dummy, dummy, dummy, dummy));
+ EXPECT_EQ(VE_APM_ERROR, voe_base_->LastError());
+}
+
+TEST_F(EcMetricsTest, GetEcDelayMetricsFailsIfEcNotEnabled) {
+ int dummy = 0;
+ EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(true));
+ EXPECT_EQ(-1, voe_apm_->GetEcDelayMetrics(dummy, dummy));
+ EXPECT_EQ(VE_APM_ERROR, voe_base_->LastError());
+}
+
+TEST_F(EcMetricsTest, ManualVerifyEcDelayMetrics) {
+ SwitchToManualMicrophone();
+ TEST_LOG("Verify EC Delay metrics:");
+ EXPECT_EQ(0, voe_apm_->SetEcStatus(true));
+ EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(true));
+
+ for (int i = 0; i < 5; i++) {
+ int delay, delay_std;
+ EXPECT_EQ(0, voe_apm_->GetEcDelayMetrics(delay, delay_std));
+ TEST_LOG("Delay = %d, Delay Std = %d\n", delay, delay_std);
+ Sleep(1000);
+ }
+}
diff --git a/voice_engine/test/auto_test/fakes/fake_external_transport.cc b/voice_engine/test/auto_test/fakes/fake_external_transport.cc
new file mode 100644
index 0000000..1fd4a25
--- /dev/null
+++ b/voice_engine/test/auto_test/fakes/fake_external_transport.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "system_wrappers/interface/critical_section_wrapper.h"
+#include "system_wrappers/interface/event_wrapper.h"
+#include "system_wrappers/interface/thread_wrapper.h"
+#include "voice_engine/include/voe_network.h"
+#include "voice_engine/voice_engine_defines.h"
+#include "voice_engine/test/auto_test/fakes/fake_external_transport.h"
+
+FakeExternalTransport::FakeExternalTransport(webrtc::VoENetwork* ptr)
+ : my_network_(ptr),
+ thread_(NULL),
+ lock_(NULL),
+ event_(NULL),
+ length_(0),
+ channel_(0),
+ delay_is_enabled_(0),
+ delay_time_in_ms_(0) {
+ const char* thread_name = "external_thread";
+ lock_ = webrtc::CriticalSectionWrapper::CreateCriticalSection();
+ event_ = webrtc::EventWrapper::Create();
+ thread_ = webrtc::ThreadWrapper::CreateThread(
+ Run, this, webrtc::kHighPriority, thread_name);
+ if (thread_) {
+ unsigned int id;
+ thread_->Start(id);
+ }
+}
+
+FakeExternalTransport::~FakeExternalTransport() {
+ if (thread_) {
+ thread_->SetNotAlive();
+ event_->Set();
+ if (thread_->Stop()) {
+ delete thread_;
+ thread_ = NULL;
+ delete event_;
+ event_ = NULL;
+ delete lock_;
+ lock_ = NULL;
+ }
+ }
+}
+
+bool FakeExternalTransport::Run(void* ptr) {
+ return static_cast<FakeExternalTransport*> (ptr)->Process();
+}
+
+bool FakeExternalTransport::Process() {
+ switch (event_->Wait(500)) {
+ case webrtc::kEventSignaled:
+ lock_->Enter();
+ my_network_->ReceivedRTPPacket(channel_, packet_buffer_, length_);
+ lock_->Leave();
+ return true;
+ case webrtc::kEventTimeout:
+ return true;
+ case webrtc::kEventError:
+ break;
+ }
+ return true;
+}
+
+int FakeExternalTransport::SendPacket(int channel, const void *data, int len) {
+ lock_->Enter();
+ if (len < 1612) {
+ memcpy(packet_buffer_, (const unsigned char*) data, len);
+ length_ = len;
+ channel_ = channel;
+ }
+ lock_->Leave();
+ event_->Set(); // Triggers ReceivedRTPPacket() from worker thread.
+ return len;
+}
+
+int FakeExternalTransport::SendRTCPPacket(int channel,
+ const void *data,
+ int len) {
+ if (delay_is_enabled_) {
+ Sleep(delay_time_in_ms_);
+ }
+ my_network_->ReceivedRTCPPacket(channel, data, len);
+ return len;
+}
+
+void FakeExternalTransport::SetDelayStatus(bool enable,
+ unsigned int delayInMs) {
+ delay_is_enabled_ = enable;
+ delay_time_in_ms_ = delayInMs;
+}
diff --git a/voice_engine/test/auto_test/fakes/fake_external_transport.h b/voice_engine/test/auto_test/fakes/fake_external_transport.h
new file mode 100644
index 0000000..25d34c7
--- /dev/null
+++ b/voice_engine/test/auto_test/fakes/fake_external_transport.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VOICE_ENGINE_MAIN_TEST_AUTO_TEST_FAKES_FAKE_EXTERNAL_TRANSPORT_H_
+#define VOICE_ENGINE_MAIN_TEST_AUTO_TEST_FAKES_FAKE_EXTERNAL_TRANSPORT_H_
+
+#include "common_types.h"
+
+namespace webrtc {
+class CriticalSectionWrapper;
+class EventWrapper;
+class ThreadWrapper;
+class VoENetwork;
+}
+
+class FakeExternalTransport : public webrtc::Transport {
+ public:
+ explicit FakeExternalTransport(webrtc::VoENetwork* ptr);
+ virtual ~FakeExternalTransport();
+ int SendPacket(int channel, const void *data, int len);
+ int SendRTCPPacket(int channel, const void *data, int len);
+ void SetDelayStatus(bool enabled, unsigned int delayInMs = 100);
+
+ webrtc::VoENetwork* my_network_;
+ private:
+ static bool Run(void* ptr);
+ bool Process();
+ private:
+ webrtc::ThreadWrapper* thread_;
+ webrtc::CriticalSectionWrapper* lock_;
+ webrtc::EventWrapper* event_;
+ private:
+ unsigned char packet_buffer_[1612];
+ int length_;
+ int channel_;
+ bool delay_is_enabled_;
+ int delay_time_in_ms_;
+};
+
+#endif // VOICE_ENGINE_MAIN_TEST_AUTO_TEST_FAKES_FAKE_EXTERNAL_TRANSPORT_H_
diff --git a/voice_engine/test/auto_test/fakes/fake_media_process.h b/voice_engine/test/auto_test/fakes/fake_media_process.h
new file mode 100644
index 0000000..9c45129
--- /dev/null
+++ b/voice_engine/test/auto_test/fakes/fake_media_process.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef VOICE_ENGINE_MAIN_TEST_AUTO_TEST_FAKE_MEDIA_PROCESS_H_
+#define VOICE_ENGINE_MAIN_TEST_AUTO_TEST_FAKE_MEDIA_PROCESS_H_
+
+#include <cmath>
+
+class FakeMediaProcess : public webrtc::VoEMediaProcess {
+ public:
+ virtual void Process(const int channel,
+ const webrtc::ProcessingTypes type,
+ WebRtc_Word16 audio_10ms[],
+ const int length,
+ const int sampling_freq_hz,
+ const bool stereo) {
+ for (int i = 0; i < length; i++) {
+ if (!stereo) {
+ audio_10ms[i] = static_cast<WebRtc_Word16>(audio_10ms[i] *
+ sin(2.0 * 3.14 * frequency * 400.0 / sampling_freq_hz));
+ } else {
+ // Interleaved stereo.
+ audio_10ms[2 * i] = static_cast<WebRtc_Word16> (
+ audio_10ms[2 * i] * sin(2.0 * 3.14 *
+ frequency * 400.0 / sampling_freq_hz));
+ audio_10ms[2 * i + 1] = static_cast<WebRtc_Word16> (
+ audio_10ms[2 * i + 1] * sin(2.0 * 3.14 *
+ frequency * 400.0 / sampling_freq_hz));
+ }
+ frequency++;
+ }
+ }
+
+ private:
+ int frequency;
+};
+
+#endif // VOICE_ENGINE_MAIN_TEST_AUTO_TEST_FAKE_MEDIA_PROCESS_H_
diff --git a/voice_engine/test/auto_test/fixtures/after_initialization_fixture.cc b/voice_engine/test/auto_test/fixtures/after_initialization_fixture.cc
new file mode 100644
index 0000000..f0e665b
--- /dev/null
+++ b/voice_engine/test/auto_test/fixtures/after_initialization_fixture.cc
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_initialization_fixture.h"
+
+class TestErrorObserver : public webrtc::VoiceEngineObserver {
+ public:
+ TestErrorObserver() {}
+ virtual ~TestErrorObserver() {}
+ void CallbackOnError(const int channel, const int error_code) {
+ ADD_FAILURE() << "Unexpected error on channel " << channel <<
+ ": error code " << error_code;
+ }
+};
+
+AfterInitializationFixture::AfterInitializationFixture()
+ : error_observer_(new TestErrorObserver()) {
+ EXPECT_EQ(0, voe_base_->Init());
+
+#if defined(WEBRTC_ANDROID)
+ EXPECT_EQ(0, voe_hardware_->SetLoudspeakerStatus(false));
+#endif
+
+ EXPECT_EQ(0, voe_base_->RegisterVoiceEngineObserver(*error_observer_));
+}
+
+AfterInitializationFixture::~AfterInitializationFixture() {
+ EXPECT_EQ(0, voe_base_->DeRegisterVoiceEngineObserver());
+}
diff --git a/voice_engine/test/auto_test/fixtures/after_initialization_fixture.h b/voice_engine/test/auto_test/fixtures/after_initialization_fixture.h
new file mode 100644
index 0000000..bbdd64d
--- /dev/null
+++ b/voice_engine/test/auto_test/fixtures/after_initialization_fixture.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_STANDARD_TEST_BASE_AFTER_INIT_H_
+#define SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_STANDARD_TEST_BASE_AFTER_INIT_H_
+
+#include "before_initialization_fixture.h"
+#include "scoped_ptr.h"
+
+class TestErrorObserver;
+
+// This fixture initializes the voice engine in addition to the work
+// done by the before-initialization fixture. It also registers an error
+// observer which will fail tests on error callbacks. This fixture is
+// useful to tests that want to run before we have started any form of
+// streaming through the voice engine.
+class AfterInitializationFixture : public BeforeInitializationFixture {
+ public:
+ AfterInitializationFixture();
+ virtual ~AfterInitializationFixture();
+ protected:
+ webrtc::scoped_ptr<TestErrorObserver> error_observer_;
+};
+
+#endif // SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_STANDARD_TEST_BASE_AFTER_INIT_H_
diff --git a/voice_engine/test/auto_test/fixtures/after_streaming_fixture.cc b/voice_engine/test/auto_test/fixtures/after_streaming_fixture.cc
new file mode 100644
index 0000000..d1e6039
--- /dev/null
+++ b/voice_engine/test/auto_test/fixtures/after_streaming_fixture.cc
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_streaming_fixture.h"
+
+#include <cstring>
+
+static const char* kLoopbackIp = "127.0.0.1";
+
+AfterStreamingFixture::AfterStreamingFixture()
+ : channel_(voe_base_->CreateChannel()) {
+ EXPECT_GE(channel_, 0);
+
+ fake_microphone_input_file_ = resource_manager_.long_audio_file_path();
+ EXPECT_FALSE(fake_microphone_input_file_.empty());
+
+ SetUpLocalPlayback();
+ ResumePlaying();
+ RestartFakeMicrophone();
+}
+
+AfterStreamingFixture::~AfterStreamingFixture() {
+ voe_file_->StopPlayingFileAsMicrophone(channel_);
+ PausePlaying();
+
+ voe_base_->DeleteChannel(channel_);
+}
+
+void AfterStreamingFixture::SwitchToManualMicrophone() {
+ EXPECT_EQ(0, voe_file_->StopPlayingFileAsMicrophone(channel_));
+
+ TEST_LOG("You need to speak manually into the microphone for this test.\n");
+ TEST_LOG("Please start speaking now.\n");
+ Sleep(1000);
+}
+
+void AfterStreamingFixture::RestartFakeMicrophone() {
+ EXPECT_EQ(0, voe_file_->StartPlayingFileAsMicrophone(
+ channel_, fake_microphone_input_file_.c_str(), true, true));
+}
+
+void AfterStreamingFixture::PausePlaying() {
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
+ EXPECT_EQ(0, voe_base_->StopReceive(channel_));
+}
+
+void AfterStreamingFixture::ResumePlaying() {
+ EXPECT_EQ(0, voe_base_->StartReceive(channel_));
+ EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+}
+
+void AfterStreamingFixture::SetUpLocalPlayback() {
+ EXPECT_EQ(0, voe_base_->SetSendDestination(channel_, 8000, kLoopbackIp));
+ EXPECT_EQ(0, voe_base_->SetLocalReceiver(0, 8000));
+
+ webrtc::CodecInst codec;
+ codec.channels = 1;
+ codec.pacsize = 160;
+ codec.plfreq = 8000;
+ codec.pltype = 0;
+ codec.rate = 64000;
+ strcpy(codec.plname, "PCMU");
+
+ voe_codec_->SetSendCodec(channel_, codec);
+}
diff --git a/voice_engine/test/auto_test/fixtures/after_streaming_fixture.h b/voice_engine/test/auto_test/fixtures/after_streaming_fixture.h
new file mode 100644
index 0000000..6b0a61f
--- /dev/null
+++ b/voice_engine/test/auto_test/fixtures/after_streaming_fixture.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_STANDARD_AFTER_STREAMING_H_
+#define SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_STANDARD_AFTER_STREAMING_H_
+
+#include "after_initialization_fixture.h"
+#include "resource_manager.h"
+
+// This fixture will, in addition to the work done by its superclasses,
+// create a channel and start playing a file through the fake microphone
+// to simulate microphone input. The purpose is to make it convenient
+// to write tests that require microphone input.
+class AfterStreamingFixture : public AfterInitializationFixture {
+ public:
+ AfterStreamingFixture();
+ virtual ~AfterStreamingFixture();
+
+ protected:
+ int channel_;
+ ResourceManager resource_manager_;
+ std::string fake_microphone_input_file_;
+
+ // Shuts off the fake microphone for this test.
+ void SwitchToManualMicrophone();
+
+ // Restarts the fake microphone if it's been shut off earlier.
+ void RestartFakeMicrophone();
+
+ // Stops all sending and playout.
+ void PausePlaying();
+
+ // Resumes all sending and playout.
+ void ResumePlaying();
+
+ private:
+ void SetUpLocalPlayback();
+};
+
+
+#endif // SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_STANDARD_AFTER_STREAMING_H_
diff --git a/voice_engine/test/auto_test/fixtures/before_initialization_fixture.cc b/voice_engine/test/auto_test/fixtures/before_initialization_fixture.cc
new file mode 100644
index 0000000..407e5b3
--- /dev/null
+++ b/voice_engine/test/auto_test/fixtures/before_initialization_fixture.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "before_initialization_fixture.h"
+
+#include "voice_engine_defines.h"
+
+BeforeInitializationFixture::BeforeInitializationFixture()
+ : voice_engine_(webrtc::VoiceEngine::Create()) {
+ EXPECT_TRUE(voice_engine_ != NULL);
+
+ voe_base_ = webrtc::VoEBase::GetInterface(voice_engine_);
+ voe_codec_ = webrtc::VoECodec::GetInterface(voice_engine_);
+ voe_volume_control_ = webrtc::VoEVolumeControl::GetInterface(voice_engine_);
+ voe_dtmf_ = webrtc::VoEDtmf::GetInterface(voice_engine_);
+ voe_rtp_rtcp_ = webrtc::VoERTP_RTCP::GetInterface(voice_engine_);
+ voe_apm_ = webrtc::VoEAudioProcessing::GetInterface(voice_engine_);
+ voe_network_ = webrtc::VoENetwork::GetInterface(voice_engine_);
+ voe_file_ = webrtc::VoEFile::GetInterface(voice_engine_);
+ voe_vsync_ = webrtc::VoEVideoSync::GetInterface(voice_engine_);
+ voe_encrypt_ = webrtc::VoEEncryption::GetInterface(voice_engine_);
+ voe_hardware_ = webrtc::VoEHardware::GetInterface(voice_engine_);
+ voe_xmedia_ = webrtc::VoEExternalMedia::GetInterface(voice_engine_);
+ voe_call_report_ = webrtc::VoECallReport::GetInterface(voice_engine_);
+ voe_neteq_stats_ = webrtc::VoENetEqStats::GetInterface(voice_engine_);
+}
+
+BeforeInitializationFixture::~BeforeInitializationFixture() {
+ voe_base_->Release();
+ voe_codec_->Release();
+ voe_volume_control_->Release();
+ voe_dtmf_->Release();
+ voe_rtp_rtcp_->Release();
+ voe_apm_->Release();
+ voe_network_->Release();
+ voe_file_->Release();
+ voe_vsync_->Release();
+ voe_encrypt_->Release();
+ voe_hardware_->Release();
+ voe_xmedia_->Release();
+ voe_call_report_->Release();
+ voe_neteq_stats_->Release();
+
+ EXPECT_TRUE(webrtc::VoiceEngine::Delete(voice_engine_));
+}
+
+void BeforeInitializationFixture::Sleep(long milliseconds) {
+ // Implementation note: This method is used to reduce usage of the macro and
+ // avoid ugly errors in Eclipse (its parser can't deal with the sleep macro).
+ SLEEP(milliseconds);
+}
diff --git a/voice_engine/test/auto_test/fixtures/before_initialization_fixture.h b/voice_engine/test/auto_test/fixtures/before_initialization_fixture.h
new file mode 100644
index 0000000..cd1520d
--- /dev/null
+++ b/voice_engine/test/auto_test/fixtures/before_initialization_fixture.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_STANDARD_TEST_BASE_H_
+#define SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_STANDARD_TEST_BASE_H_
+
+#include <assert.h>
+
+#include "common_types.h"
+#include "engine_configurations.h"
+#include "voice_engine/include/voe_audio_processing.h"
+#include "voice_engine/include/voe_base.h"
+#include "voice_engine/include/voe_call_report.h"
+#include "voice_engine/include/voe_codec.h"
+#include "voice_engine/include/voe_dtmf.h"
+#include "voice_engine/include/voe_encryption.h"
+#include "voice_engine/include/voe_errors.h"
+#include "voice_engine/include/voe_external_media.h"
+#include "voice_engine/include/voe_file.h"
+#include "voice_engine/include/voe_hardware.h"
+#include "voice_engine/include/voe_neteq_stats.h"
+#include "voice_engine/include/voe_network.h"
+#include "voice_engine/include/voe_rtp_rtcp.h"
+#include "voice_engine/include/voe_video_sync.h"
+#include "voice_engine/include/voe_volume_control.h"
+#include "voice_engine/test/auto_test/voe_test_defines.h"
+
+// TODO(qhogpat): Remove these undefs once the clashing macros are gone.
+#undef TEST
+#undef ASSERT_TRUE
+#undef ASSERT_FALSE
+#include "gtest/gtest.h"
+#include "gmock/gmock.h"
+
+// This convenient fixture sets up all voice engine interfaces automatically for
+// use by testing subclasses. It allocates each interface and releases it once
+// which means that if a tests allocates additional interfaces from the voice
+// engine and forgets to release it, this test will fail in the destructor.
+// It will not call any init methods.
+//
+// Implementation note:
+// The interface fetching is done in the constructor and not SetUp() since
+// this relieves our subclasses from calling SetUp in the superclass if they
+// choose to override SetUp() themselves. This is fine as googletest will
+// construct new test objects for each method.
+class BeforeInitializationFixture : public testing::Test {
+ public:
+ BeforeInitializationFixture();
+ virtual ~BeforeInitializationFixture();
+
+ protected:
+ // Use this sleep function to sleep in test (avoid sleep macro).
+ void Sleep(long milliseconds);
+
+ webrtc::VoiceEngine* voice_engine_;
+ webrtc::VoEBase* voe_base_;
+ webrtc::VoECodec* voe_codec_;
+ webrtc::VoEVolumeControl* voe_volume_control_;
+ webrtc::VoEDtmf* voe_dtmf_;
+ webrtc::VoERTP_RTCP* voe_rtp_rtcp_;
+ webrtc::VoEAudioProcessing* voe_apm_;
+ webrtc::VoENetwork* voe_network_;
+ webrtc::VoEFile* voe_file_;
+ webrtc::VoEVideoSync* voe_vsync_;
+ webrtc::VoEEncryption* voe_encrypt_;
+ webrtc::VoEHardware* voe_hardware_;
+ webrtc::VoEExternalMedia* voe_xmedia_;
+ webrtc::VoECallReport* voe_call_report_;
+ webrtc::VoENetEqStats* voe_neteq_stats_;
+};
+
+#endif // SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_STANDARD_TEST_BASE_H_
diff --git a/voice_engine/test/auto_test/fuzz/rtp_fuzz_test.cc b/voice_engine/test/auto_test/fuzz/rtp_fuzz_test.cc
new file mode 100644
index 0000000..f18d5e1
--- /dev/null
+++ b/voice_engine/test/auto_test/fuzz/rtp_fuzz_test.cc
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <ctime>
+
+#include "test/libtest/include/bit_flip_encryption.h"
+#include "voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
+
+class RtpFuzzTest : public AfterStreamingFixture {
+ protected:
+ void BitFlipFuzzTest(float flip_probability) {
+ BitFlipEncryption bit_flip_encryption(std::time(NULL), flip_probability);
+
+ TEST_LOG("Starting to flip bits in RTP/RTCP packets.\n");
+ voe_encrypt_->RegisterExternalEncryption(channel_, bit_flip_encryption);
+
+ Sleep(5000);
+
+ voe_encrypt_->DeRegisterExternalEncryption(channel_);
+
+ TEST_LOG("Flipped %d bits. Back to normal.\n",
+ static_cast<int>(bit_flip_encryption.flip_count()));
+ Sleep(2000);
+ }
+};
+
+TEST_F(RtpFuzzTest, VoiceEngineDealsWithASmallNumberOfTamperedRtpPackets) {
+ BitFlipFuzzTest(0.00005f);
+}
+
+TEST_F(RtpFuzzTest, VoiceEngineDealsWithAMediumNumberOfTamperedRtpPackets) {
+ BitFlipFuzzTest(0.0005f);
+}
+
+TEST_F(RtpFuzzTest, VoiceEngineDealsWithALargeNumberOfTamperedRtpPackets) {
+ BitFlipFuzzTest(0.05f);
+}
+
+TEST_F(RtpFuzzTest, VoiceEngineDealsWithAHugeNumberOfTamperedRtpPackets) {
+ BitFlipFuzzTest(0.5f);
+}
diff --git a/voice_engine/test/auto_test/resource_manager.cc b/voice_engine/test/auto_test/resource_manager.cc
new file mode 100644
index 0000000..18213f9
--- /dev/null
+++ b/voice_engine/test/auto_test/resource_manager.cc
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "resource_manager.h"
+
+#include "testsupport/fileutils.h"
+
+ResourceManager::ResourceManager() {
+ std::string filename = "audio_long16.pcm";
+#if defined(WEBRTC_ANDROID)
+ long_audio_file_path_ = "/sdcard/" + filename;
+#else
+ std::string resource_path = webrtc::test::ProjectRootPath();
+ if (resource_path == webrtc::test::kCannotFindProjectRootDir) {
+ long_audio_file_path_ = "";
+ } else {
+ long_audio_file_path_ =
+ resource_path + "data/voice_engine/" + filename;
+ }
+#endif
+}
+
diff --git a/voice_engine/test/auto_test/resource_manager.h b/voice_engine/test/auto_test/resource_manager.h
new file mode 100644
index 0000000..1bb91cf
--- /dev/null
+++ b/voice_engine/test/auto_test/resource_manager.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_RESOURCE_MANAGER_H_
+#define SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_RESOURCE_MANAGER_H_
+
+#include <string>
+
+class ResourceManager {
+ public:
+ ResourceManager();
+
+ // Returns the full path to a long audio file.
+ // Returns the empty string on failure.
+ const std::string& long_audio_file_path() const {
+ return long_audio_file_path_;
+ }
+
+ private:
+ std::string long_audio_file_path_;
+};
+
+#endif // SRC_VOICE_ENGINE_MAIN_TEST_AUTO_TEST_RESOURCE_MANAGER_H_
diff --git a/voice_engine/test/auto_test/standard/audio_processing_test.cc b/voice_engine/test/auto_test/standard/audio_processing_test.cc
new file mode 100644
index 0000000..fc759b0
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/audio_processing_test.cc
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testsupport/fileutils.h"
+#include "voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
+#include "voice_engine/test/auto_test/voe_standard_test.h"
+
+class RxCallback : public webrtc::VoERxVadCallback {
+ public:
+ RxCallback() :
+ vad_decision(-1) {
+ }
+
+ virtual void OnRxVad(int, int vadDecision) {
+ char msg[128];
+ sprintf(msg, "RX VAD detected decision %d \n", vadDecision);
+ TEST_LOG("%s", msg);
+ vad_decision = vadDecision;
+ }
+
+ int vad_decision;
+};
+
+class AudioProcessingTest : public AfterStreamingFixture {
+ protected:
+ // Note: Be careful with this one, it is used in the
+ // Android / iPhone part too.
+ void TryEnablingAgcWithMode(webrtc::AgcModes agc_mode_to_set) {
+ EXPECT_EQ(0, voe_apm_->SetAgcStatus(true, agc_mode_to_set));
+
+ bool agc_enabled = false;
+ webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
+
+ EXPECT_EQ(0, voe_apm_->GetAgcStatus(agc_enabled, agc_mode));
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_EQ(agc_mode_to_set, agc_mode);
+ }
+
+ void TryEnablingRxAgcWithMode(webrtc::AgcModes agc_mode_to_set) {
+ EXPECT_EQ(0, voe_apm_->SetRxAgcStatus(channel_, true, agc_mode_to_set));
+
+ bool rx_agc_enabled = false;
+ webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
+
+ EXPECT_EQ(0, voe_apm_->GetRxAgcStatus(channel_, rx_agc_enabled, agc_mode));
+ EXPECT_TRUE(rx_agc_enabled);
+ EXPECT_EQ(agc_mode_to_set, agc_mode);
+ }
+
+ // EC modes can map to other EC modes, so we have a separate parameter
+ // for what we expect the EC mode to be set to.
+ void TryEnablingEcWithMode(webrtc::EcModes ec_mode_to_set,
+ webrtc::EcModes expected_mode) {
+ EXPECT_EQ(0, voe_apm_->SetEcStatus(true, ec_mode_to_set));
+
+ bool ec_enabled = true;
+ webrtc::EcModes ec_mode = webrtc::kEcDefault;
+
+ EXPECT_EQ(0, voe_apm_->GetEcStatus(ec_enabled, ec_mode));
+
+ EXPECT_EQ(expected_mode, ec_mode);
+ }
+
+ // Here, the CNG mode will be expected to be on or off depending on the mode.
+ void TryEnablingAecmWithMode(webrtc::AecmModes aecm_mode_to_set,
+ bool cng_enabled_to_set) {
+ EXPECT_EQ(0, voe_apm_->SetAecmMode(aecm_mode_to_set, cng_enabled_to_set));
+
+ bool cng_enabled = false;
+ webrtc::AecmModes aecm_mode = webrtc::kAecmEarpiece;
+
+ voe_apm_->GetAecmMode(aecm_mode, cng_enabled);
+
+ EXPECT_EQ(cng_enabled_to_set, cng_enabled);
+ EXPECT_EQ(aecm_mode_to_set, aecm_mode);
+ }
+
+ void TryEnablingNsWithMode(webrtc::NsModes ns_mode_to_set,
+ webrtc::NsModes expected_ns_mode) {
+ EXPECT_EQ(0, voe_apm_->SetNsStatus(true, ns_mode_to_set));
+
+ bool ns_status = true;
+ webrtc::NsModes ns_mode = webrtc::kNsDefault;
+ EXPECT_EQ(0, voe_apm_->GetNsStatus(ns_status, ns_mode));
+
+ EXPECT_TRUE(ns_status);
+ EXPECT_EQ(expected_ns_mode, ns_mode);
+ }
+
+ void TryEnablingRxNsWithMode(webrtc::NsModes ns_mode_to_set,
+ webrtc::NsModes expected_ns_mode) {
+ EXPECT_EQ(0, voe_apm_->SetRxNsStatus(channel_, true, ns_mode_to_set));
+
+ bool ns_status = true;
+ webrtc::NsModes ns_mode = webrtc::kNsDefault;
+ EXPECT_EQ(0, voe_apm_->GetRxNsStatus(channel_, ns_status, ns_mode));
+
+ EXPECT_TRUE(ns_status);
+ EXPECT_EQ(expected_ns_mode, ns_mode);
+ }
+
+ void TryDetectingSilence() {
+ // Here, speech is running. Shut down speech.
+ EXPECT_EQ(0, voe_codec_->SetVADStatus(channel_, true));
+ EXPECT_EQ(0, voe_volume_control_->SetInputMute(channel_, true));
+ EXPECT_EQ(0, voe_file_->StopPlayingFileAsMicrophone(channel_));
+
+ // We should detect the silence after a short time.
+ Sleep(50);
+ for (int i = 0; i < 25; i++) {
+ EXPECT_EQ(0, voe_apm_->VoiceActivityIndicator(channel_));
+ Sleep(10);
+ }
+ }
+
+ void TryDetectingSpeechAfterSilence() {
+ // Re-enable speech.
+ RestartFakeMicrophone();
+ EXPECT_EQ(0, voe_codec_->SetVADStatus(channel_, false));
+ EXPECT_EQ(0, voe_volume_control_->SetInputMute(channel_, false));
+
+ // We should detect the speech after a short time.
+ for (int i = 0; i < 50; i++) {
+ if (voe_apm_->VoiceActivityIndicator(channel_) == 1) {
+ return;
+ }
+ Sleep(10);
+ }
+
+ ADD_FAILURE() << "Failed to detect speech within 500 ms.";
+ }
+};
+
+#if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID)
+
+TEST_F(AudioProcessingTest, AgcIsOnByDefault) {
+ bool agc_enabled = false;
+ webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog;
+
+ EXPECT_EQ(0, voe_apm_->GetAgcStatus(agc_enabled, agc_mode));
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_EQ(webrtc::kAgcAdaptiveAnalog, agc_mode);
+}
+
+TEST_F(AudioProcessingTest, CanEnableAgcWithAllModes) {
+ TryEnablingAgcWithMode(webrtc::kAgcAdaptiveDigital);
+ TryEnablingAgcWithMode(webrtc::kAgcAdaptiveAnalog);
+ TryEnablingAgcWithMode(webrtc::kAgcFixedDigital);
+}
+
+TEST_F(AudioProcessingTest, EcIsDisabledAndAecIsDefaultEcMode) {
+ bool ec_enabled = true;
+ webrtc::EcModes ec_mode = webrtc::kEcDefault;
+
+ EXPECT_EQ(0, voe_apm_->GetEcStatus(ec_enabled, ec_mode));
+ EXPECT_FALSE(ec_enabled);
+ EXPECT_EQ(webrtc::kEcAec, ec_mode);
+}
+
+TEST_F(AudioProcessingTest, EnablingEcAecShouldEnableEcAec) {
+ TryEnablingEcWithMode(webrtc::kEcAec, webrtc::kEcAec);
+}
+
+TEST_F(AudioProcessingTest, EnablingEcConferenceShouldEnableEcAec) {
+ TryEnablingEcWithMode(webrtc::kEcConference, webrtc::kEcAec);
+}
+
+TEST_F(AudioProcessingTest, EcModeIsPreservedWhenEcIsTurnedOff) {
+ TryEnablingEcWithMode(webrtc::kEcConference, webrtc::kEcAec);
+
+ EXPECT_EQ(0, voe_apm_->SetEcStatus(false));
+
+ bool ec_enabled = true;
+ webrtc::EcModes ec_mode = webrtc::kEcDefault;
+ EXPECT_EQ(0, voe_apm_->GetEcStatus(ec_enabled, ec_mode));
+
+ EXPECT_FALSE(ec_enabled);
+ EXPECT_EQ(webrtc::kEcAec, ec_mode);
+}
+
+TEST_F(AudioProcessingTest, CanEnableAndDisableEcModeSeveralTimesInARow) {
+ for (int i = 0; i < 10; i++) {
+ EXPECT_EQ(0, voe_apm_->SetEcStatus(true));
+ EXPECT_EQ(0, voe_apm_->SetEcStatus(false));
+ }
+
+ bool ec_enabled = true;
+ webrtc::EcModes ec_mode = webrtc::kEcDefault;
+ EXPECT_EQ(0, voe_apm_->GetEcStatus(ec_enabled, ec_mode));
+
+ EXPECT_FALSE(ec_enabled);
+ EXPECT_EQ(webrtc::kEcAec, ec_mode);
+}
+
+// TODO(phoglund): Reenable below test when it's no longer flaky.
+TEST_F(AudioProcessingTest, DISABLED_TestVoiceActivityDetectionWithObserver) {
+ RxCallback rx_callback;
+ EXPECT_EQ(0, voe_apm_->RegisterRxVadObserver(channel_, rx_callback));
+
+ // The extra sleeps are to allow decisions some time to propagate to the
+ // observer.
+ TryDetectingSilence();
+ Sleep(100);
+
+ EXPECT_EQ(0, rx_callback.vad_decision);
+
+ TryDetectingSpeechAfterSilence();
+ Sleep(100);
+
+ EXPECT_EQ(1, rx_callback.vad_decision);
+
+ EXPECT_EQ(0, voe_apm_->DeRegisterRxVadObserver(channel_));
+}
+
+#endif // !WEBRTC_IOS && !WEBRTC_ANDROID
+
+TEST_F(AudioProcessingTest, EnablingEcAecmShouldEnableEcAecm) {
+ // This one apparently applies to Android and iPhone as well.
+ TryEnablingEcWithMode(webrtc::kEcAecm, webrtc::kEcAecm);
+}
+
+TEST_F(AudioProcessingTest, EcAecmModeIsEnabledAndSpeakerphoneByDefault) {
+ bool cng_enabled = false;
+ webrtc::AecmModes aecm_mode = webrtc::kAecmEarpiece;
+
+ voe_apm_->GetAecmMode(aecm_mode, cng_enabled);
+
+ EXPECT_TRUE(cng_enabled);
+ EXPECT_EQ(webrtc::kAecmSpeakerphone, aecm_mode);
+}
+
+TEST_F(AudioProcessingTest, CanSetAecmMode) {
+ EXPECT_EQ(0, voe_apm_->SetEcStatus(true, webrtc::kEcAecm));
+
+ // Try some AECM mode - CNG enabled combinations.
+ TryEnablingAecmWithMode(webrtc::kAecmEarpiece, true);
+ TryEnablingAecmWithMode(webrtc::kAecmEarpiece, false);
+ TryEnablingAecmWithMode(webrtc::kAecmLoudEarpiece, true);
+ TryEnablingAecmWithMode(webrtc::kAecmLoudSpeakerphone, false);
+ TryEnablingAecmWithMode(webrtc::kAecmQuietEarpieceOrHeadset, true);
+ TryEnablingAecmWithMode(webrtc::kAecmSpeakerphone, false);
+}
+
+TEST_F(AudioProcessingTest, RxAgcShouldBeOffByDefault) {
+ bool rx_agc_enabled = true;
+ webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
+
+ EXPECT_EQ(0, voe_apm_->GetRxAgcStatus(channel_, rx_agc_enabled, agc_mode));
+ EXPECT_FALSE(rx_agc_enabled);
+ EXPECT_EQ(webrtc::kAgcAdaptiveDigital, agc_mode);
+}
+
+TEST_F(AudioProcessingTest, CanTurnOnDigitalRxAcg) {
+ TryEnablingRxAgcWithMode(webrtc::kAgcAdaptiveDigital);
+ TryEnablingRxAgcWithMode(webrtc::kAgcFixedDigital);
+}
+
+TEST_F(AudioProcessingTest, CannotTurnOnAdaptiveAnalogRxAgc) {
+ EXPECT_EQ(-1, voe_apm_->SetRxAgcStatus(
+ channel_, true, webrtc::kAgcAdaptiveAnalog));
+}
+
+TEST_F(AudioProcessingTest, NsIsOffWithModerateSuppressionByDefault) {
+ bool ns_status = true;
+ webrtc::NsModes ns_mode = webrtc::kNsDefault;
+ EXPECT_EQ(0, voe_apm_->GetNsStatus(ns_status, ns_mode));
+
+ EXPECT_FALSE(ns_status);
+ EXPECT_EQ(webrtc::kNsModerateSuppression, ns_mode);
+}
+
+TEST_F(AudioProcessingTest, CanSetNsMode) {
+ // Concrete suppression values map to themselves.
+ TryEnablingNsWithMode(webrtc::kNsHighSuppression,
+ webrtc::kNsHighSuppression);
+ TryEnablingNsWithMode(webrtc::kNsLowSuppression,
+ webrtc::kNsLowSuppression);
+ TryEnablingNsWithMode(webrtc::kNsModerateSuppression,
+ webrtc::kNsModerateSuppression);
+ TryEnablingNsWithMode(webrtc::kNsVeryHighSuppression,
+ webrtc::kNsVeryHighSuppression);
+
+ // Conference and Default map to concrete values.
+ TryEnablingNsWithMode(webrtc::kNsConference,
+ webrtc::kNsHighSuppression);
+ TryEnablingNsWithMode(webrtc::kNsDefault,
+ webrtc::kNsModerateSuppression);
+}
+
+TEST_F(AudioProcessingTest, RxNsIsOffWithModerateSuppressionByDefault) {
+ bool ns_status = true;
+ webrtc::NsModes ns_mode = webrtc::kNsDefault;
+ EXPECT_EQ(0, voe_apm_->GetRxNsStatus(channel_, ns_status, ns_mode));
+
+ EXPECT_FALSE(ns_status);
+ EXPECT_EQ(webrtc::kNsModerateSuppression, ns_mode);
+}
+
+TEST_F(AudioProcessingTest, CanSetRxNsMode) {
+ EXPECT_EQ(0, voe_apm_->SetRxNsStatus(channel_, true));
+
+ // See comments on the regular NS test above.
+ TryEnablingRxNsWithMode(webrtc::kNsHighSuppression,
+ webrtc::kNsHighSuppression);
+ TryEnablingRxNsWithMode(webrtc::kNsLowSuppression,
+ webrtc::kNsLowSuppression);
+ TryEnablingRxNsWithMode(webrtc::kNsModerateSuppression,
+ webrtc::kNsModerateSuppression);
+ TryEnablingRxNsWithMode(webrtc::kNsVeryHighSuppression,
+ webrtc::kNsVeryHighSuppression);
+ TryEnablingRxNsWithMode(webrtc::kNsConference,
+ webrtc::kNsHighSuppression);
+ TryEnablingRxNsWithMode(webrtc::kNsDefault,
+ webrtc::kNsModerateSuppression);
+}
+
+TEST_F(AudioProcessingTest, VadIsDisabledByDefault) {
+ bool vad_enabled;
+ bool disabled_dtx;
+ webrtc::VadModes vad_mode;
+
+ EXPECT_EQ(0, voe_codec_->GetVADStatus(
+ channel_, vad_enabled, vad_mode, disabled_dtx));
+
+ EXPECT_FALSE(vad_enabled);
+}
+
+TEST_F(AudioProcessingTest, VoiceActivityIndicatorReturns1WithSpeechOn) {
+ // This sleep is necessary since the voice detection algorithm needs some
+ // time to detect the speech from the fake microphone.
+ Sleep(500);
+ EXPECT_EQ(1, voe_apm_->VoiceActivityIndicator(channel_));
+}
+
+TEST_F(AudioProcessingTest, CanSetDelayOffset) {
+ voe_apm_->SetDelayOffsetMs(50);
+ EXPECT_EQ(50, voe_apm_->DelayOffsetMs());
+ voe_apm_->SetDelayOffsetMs(-50);
+ EXPECT_EQ(-50, voe_apm_->DelayOffsetMs());
+}
+
+TEST_F(AudioProcessingTest, HighPassFilterIsOnByDefault) {
+ EXPECT_TRUE(voe_apm_->IsHighPassFilterEnabled());
+}
+
+TEST_F(AudioProcessingTest, CanSetHighPassFilter) {
+ EXPECT_EQ(0, voe_apm_->EnableHighPassFilter(true));
+ EXPECT_TRUE(voe_apm_->IsHighPassFilterEnabled());
+ EXPECT_EQ(0, voe_apm_->EnableHighPassFilter(false));
+ EXPECT_FALSE(voe_apm_->IsHighPassFilterEnabled());
+}
+
+TEST_F(AudioProcessingTest, StereoChannelSwappingIsOffByDefault) {
+ EXPECT_FALSE(voe_apm_->IsStereoChannelSwappingEnabled());
+}
+
+TEST_F(AudioProcessingTest, CanSetStereoChannelSwapping) {
+ voe_apm_->EnableStereoChannelSwapping(true);
+ EXPECT_TRUE(voe_apm_->IsStereoChannelSwappingEnabled());
+ voe_apm_->EnableStereoChannelSwapping(false);
+ EXPECT_FALSE(voe_apm_->IsStereoChannelSwappingEnabled());
+}
+
+TEST_F(AudioProcessingTest, CanStartAndStopDebugRecording) {
+ std::string output_path = webrtc::test::OutputPath();
+ std::string output_file = output_path + "apm_debug.txt";
+
+ EXPECT_EQ(0, voe_apm_->StartDebugRecording(output_file.c_str()));
+ Sleep(1000);
+ EXPECT_EQ(0, voe_apm_->StopDebugRecording());
+}
+
+#if defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID)
+
+TEST_F(AudioProcessingTest, AgcIsOffByDefaultAndDigital) {
+ bool agc_enabled = true;
+ webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog;
+
+ EXPECT_EQ(0, voe_apm_->GetAgcStatus(agc_enabled, agc_mode));
+ EXPECT_FALSE(agc_enabled);
+ EXPECT_EQ(webrtc::kAgcAdaptiveDigital, agc_mode);
+}
+
+TEST_F(AudioProcessingTest, CanEnableAgcInAdaptiveDigitalMode) {
+ TryEnablingAgcWithMode(webrtc::kAgcAdaptiveDigital);
+}
+
+TEST_F(AudioProcessingTest, AgcIsPossibleExceptInAdaptiveAnalogMode) {
+ EXPECT_EQ(-1, voe_apm_->SetAgcStatus(true, webrtc::kAgcAdaptiveAnalog));
+ EXPECT_EQ(0, voe_apm_->SetAgcStatus(true, webrtc::kAgcFixedDigital));
+ EXPECT_EQ(0, voe_apm_->SetAgcStatus(true, webrtc::kAgcAdaptiveDigital));
+}
+
+TEST_F(AudioProcessingTest, EcIsDisabledAndAecmIsDefaultEcMode) {
+ bool ec_enabled = true;
+ webrtc::EcModes ec_mode = webrtc::kEcDefault;
+
+ EXPECT_EQ(0, voe_apm_->GetEcStatus(ec_enabled, ec_mode));
+ EXPECT_FALSE(ec_enabled);
+ EXPECT_EQ(webrtc::kEcAecm, ec_mode);
+}
+
+TEST_F(AudioProcessingTest, TestVoiceActivityDetection) {
+ TryDetectingSilence();
+ TryDetectingSpeechAfterSilence();
+}
+
+#endif // WEBRTC_IOS || WEBRTC_ANDROID
diff --git a/voice_engine/test/auto_test/standard/call_report_test.cc b/voice_engine/test/auto_test/standard/call_report_test.cc
new file mode 100644
index 0000000..c96b14d
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/call_report_test.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_streaming_fixture.h"
+#include "testsupport/fileutils.h"
+
+class CallReportTest : public AfterStreamingFixture {
+};
+
+TEST_F(CallReportTest, ResetCallReportStatisticsFailsForBadInput) {
+ EXPECT_EQ(-1, voe_call_report_->ResetCallReportStatistics(-2));
+ EXPECT_EQ(-1, voe_call_report_->ResetCallReportStatistics(1));
+}
+
+TEST_F(CallReportTest, ResetCallReportStatisticsSucceedsWithCorrectInput) {
+ EXPECT_EQ(0, voe_call_report_->ResetCallReportStatistics(channel_));
+ EXPECT_EQ(0, voe_call_report_->ResetCallReportStatistics(-1));
+}
+
+TEST_F(CallReportTest, EchoMetricSummarySucceeds) {
+ EXPECT_EQ(0, voe_apm_->SetEcMetricsStatus(true));
+ Sleep(1000);
+
+ webrtc::EchoStatistics echo_statistics;
+ EXPECT_EQ(0, voe_call_report_->GetEchoMetricSummary(echo_statistics));
+}
+
+TEST_F(CallReportTest, GetRoundTripTimeSummaryReturnsAllMinusOnesIfRtcpIsOff) {
+ voe_rtp_rtcp_->SetRTCPStatus(channel_, false);
+
+ webrtc::StatVal delays;
+ EXPECT_EQ(0, voe_call_report_->GetRoundTripTimeSummary(channel_, delays));
+ EXPECT_EQ(-1, delays.average);
+ EXPECT_EQ(-1, delays.min);
+ EXPECT_EQ(-1, delays.max);
+}
+
+TEST_F(CallReportTest, GetRoundTripTimesReturnsValuesIfRtcpIsOn) {
+ voe_rtp_rtcp_->SetRTCPStatus(channel_, true);
+ Sleep(1000);
+
+ webrtc::StatVal delays;
+ EXPECT_EQ(0, voe_call_report_->GetRoundTripTimeSummary(channel_, delays));
+ EXPECT_NE(-1, delays.average);
+ EXPECT_NE(-1, delays.min);
+ EXPECT_NE(-1, delays.max);
+}
+
+TEST_F(CallReportTest, DeadOrAliveSummaryFailsIfDeadOrAliveTrackingNotActive) {
+ int count_the_dead;
+ int count_the_living;
+ EXPECT_EQ(-1, voe_call_report_->GetDeadOrAliveSummary(channel_,
+ count_the_dead,
+ count_the_living));
+}
+
+TEST_F(CallReportTest,
+ DeadOrAliveSummarySucceedsIfDeadOrAliveTrackingIsActive) {
+ EXPECT_EQ(0, voe_network_->SetPeriodicDeadOrAliveStatus(channel_, true, 1));
+ Sleep(1200);
+
+ int count_the_dead;
+ int count_the_living;
+ EXPECT_EQ(0, voe_call_report_->GetDeadOrAliveSummary(channel_,
+ count_the_dead,
+ count_the_living));
+
+ EXPECT_GE(count_the_dead, 0);
+ EXPECT_GE(count_the_living, 0);
+}
+
+TEST_F(CallReportTest, WriteReportToFileFailsOnBadInput) {
+ EXPECT_EQ(-1, voe_call_report_->WriteReportToFile(NULL));
+}
+
+TEST_F(CallReportTest, WriteReportToFileSucceedsWithCorrectFilename) {
+ std::string output_path = webrtc::test::OutputPath();
+ std::string report_filename = output_path + "call_report.txt";
+
+ EXPECT_EQ(0, voe_call_report_->WriteReportToFile(report_filename.c_str()));
+}
diff --git a/voice_engine/test/auto_test/standard/codec_before_streaming_test.cc b/voice_engine/test/auto_test/standard/codec_before_streaming_test.cc
new file mode 100644
index 0000000..6d902ef
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/codec_before_streaming_test.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_initialization_fixture.h"
+
+class CodecBeforeStreamingTest : public AfterInitializationFixture {
+ protected:
+ void SetUp() {
+ memset(&codec_instance_, 0, sizeof(codec_instance_));
+ codec_instance_.channels = 1;
+ codec_instance_.plfreq = 16000;
+ codec_instance_.pacsize = 480;
+
+ channel_ = voe_base_->CreateChannel();
+ }
+
+ void TearDown() {
+ voe_base_->DeleteChannel(channel_);
+ }
+
+ webrtc::CodecInst codec_instance_;
+ int channel_;
+};
+
+// TODO(phoglund): add test which verifies default pltypes for various codecs.
+
+TEST_F(CodecBeforeStreamingTest, GetRecPayloadTypeFailsForInvalidCodecName) {
+ strcpy(codec_instance_.plname, "SomeInvalidCodecName");
+
+ // Should fail since the codec name is invalid.
+ EXPECT_NE(0, voe_codec_->GetRecPayloadType(channel_, codec_instance_));
+}
+
+TEST_F(CodecBeforeStreamingTest, GetRecPayloadTypeRecognizesISAC) {
+ strcpy(codec_instance_.plname, "iSAC");
+ EXPECT_EQ(0, voe_codec_->GetRecPayloadType(channel_, codec_instance_));
+ strcpy(codec_instance_.plname, "ISAC");
+ EXPECT_EQ(0, voe_codec_->GetRecPayloadType(channel_, codec_instance_));
+}
+
+TEST_F(CodecBeforeStreamingTest, SetRecPayloadTypeCanChangeISACPayloadType) {
+ strcpy(codec_instance_.plname, "ISAC");
+
+ codec_instance_.pltype = 123;
+ EXPECT_EQ(0, voe_codec_->SetRecPayloadType(channel_, codec_instance_));
+ EXPECT_EQ(0, voe_codec_->GetRecPayloadType(channel_, codec_instance_));
+ EXPECT_EQ(123, codec_instance_.pltype);
+
+ codec_instance_.pltype = 104;
+ EXPECT_EQ(0, voe_codec_->SetRecPayloadType(channel_, codec_instance_));
+ EXPECT_EQ(0, voe_codec_->GetRecPayloadType(channel_, codec_instance_));
+
+ EXPECT_EQ(104, codec_instance_.pltype);
+}
+
+TEST_F(CodecBeforeStreamingTest, SetRecPayloadTypeCanChangeILBCPayloadType) {
+ strcpy(codec_instance_.plname, "iLBC");
+ codec_instance_.plfreq = 8000;
+ codec_instance_.pacsize = 240;
+ codec_instance_.rate = 13300;
+
+ EXPECT_EQ(0, voe_codec_->GetRecPayloadType(channel_, codec_instance_));
+ int original_pltype = codec_instance_.pltype;
+ codec_instance_.pltype = 123;
+ EXPECT_EQ(0, voe_codec_->SetRecPayloadType(channel_, codec_instance_));
+ EXPECT_EQ(0, voe_codec_->GetRecPayloadType(channel_, codec_instance_));
+
+ EXPECT_EQ(123, codec_instance_.pltype);
+
+ codec_instance_.pltype = original_pltype;
+ EXPECT_EQ(0, voe_codec_->SetRecPayloadType(channel_, codec_instance_));
+ EXPECT_EQ(0, voe_codec_->GetRecPayloadType(channel_, codec_instance_));
+
+ EXPECT_EQ(original_pltype, codec_instance_.pltype);
+}
diff --git a/voice_engine/test/auto_test/standard/codec_test.cc b/voice_engine/test/auto_test/standard/codec_test.cc
new file mode 100644
index 0000000..d861452
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/codec_test.cc
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_streaming_fixture.h"
+#include "voe_test_defines.h"
+#include "voice_engine_defines.h"
+
+class CodecTest : public AfterStreamingFixture {
+ protected:
+ void SetUp() {
+ memset(&codec_instance_, 0, sizeof(codec_instance_));
+ }
+
+ void SetArbitrarySendCodec() {
+ // Just grab the first codec.
+ EXPECT_EQ(0, voe_codec_->GetCodec(0, codec_instance_));
+ EXPECT_EQ(0, voe_codec_->SetSendCodec(channel_, codec_instance_));
+ }
+
+ webrtc::CodecInst codec_instance_;
+};
+
+static void SetRateIfILBC(webrtc::CodecInst* codec_instance, int packet_size) {
+ if (!_stricmp(codec_instance->plname, "ilbc")) {
+ if (packet_size == 160 || packet_size == 320) {
+ codec_instance->rate = 15200;
+ } else {
+ codec_instance->rate = 13300;
+ }
+ }
+}
+
+static bool IsNotViableSendCodec(const char* codec_name) {
+ return !_stricmp(codec_name, "CN") ||
+ !_stricmp(codec_name, "telephone-event") ||
+ !_stricmp(codec_name, "red");
+}
+
+TEST_F(CodecTest, PcmuIsDefaultCodecAndHasTheRightValues) {
+ EXPECT_EQ(0, voe_codec_->GetSendCodec(channel_, codec_instance_));
+ EXPECT_EQ(1, codec_instance_.channels);
+ EXPECT_EQ(160, codec_instance_.pacsize);
+ EXPECT_EQ(8000, codec_instance_.plfreq);
+ EXPECT_EQ(0, codec_instance_.pltype);
+ EXPECT_EQ(64000, codec_instance_.rate);
+ EXPECT_STRCASEEQ("PCMU", codec_instance_.plname);
+}
+
+TEST_F(CodecTest, VoiceActivityDetectionIsOffByDefault) {
+ bool vad_enabled = false;
+ bool dtx_disabled = false;
+ webrtc::VadModes vad_mode = webrtc::kVadAggressiveMid;
+
+ voe_codec_->GetVADStatus(channel_, vad_enabled, vad_mode, dtx_disabled);
+
+ EXPECT_FALSE(vad_enabled);
+ EXPECT_TRUE(dtx_disabled);
+ EXPECT_EQ(webrtc::kVadConventional, vad_mode);
+}
+
+TEST_F(CodecTest, VoiceActivityDetectionCanBeEnabled) {
+ EXPECT_EQ(0, voe_codec_->SetVADStatus(channel_, true));
+
+ bool vad_enabled = false;
+ bool dtx_disabled = false;
+ webrtc::VadModes vad_mode = webrtc::kVadAggressiveMid;
+
+ voe_codec_->GetVADStatus(channel_, vad_enabled, vad_mode, dtx_disabled);
+
+ EXPECT_TRUE(vad_enabled);
+ EXPECT_EQ(webrtc::kVadConventional, vad_mode);
+ EXPECT_FALSE(dtx_disabled);
+}
+
+TEST_F(CodecTest, VoiceActivityDetectionTypeSettingsCanBeChanged) {
+ bool vad_enabled = false;
+ bool dtx_disabled = false;
+ webrtc::VadModes vad_mode = webrtc::kVadAggressiveMid;
+
+ EXPECT_EQ(0, voe_codec_->SetVADStatus(
+ channel_, true, webrtc::kVadAggressiveLow, false));
+ EXPECT_EQ(0, voe_codec_->GetVADStatus(
+ channel_, vad_enabled, vad_mode, dtx_disabled));
+ EXPECT_EQ(vad_mode, webrtc::kVadAggressiveLow);
+ EXPECT_FALSE(dtx_disabled);
+
+ EXPECT_EQ(0, voe_codec_->SetVADStatus(
+ channel_, true, webrtc::kVadAggressiveMid, false));
+ EXPECT_EQ(0, voe_codec_->GetVADStatus(
+ channel_, vad_enabled, vad_mode, dtx_disabled));
+ EXPECT_EQ(vad_mode, webrtc::kVadAggressiveMid);
+ EXPECT_FALSE(dtx_disabled);
+
+ // The fourth argument is the DTX disable flag.
+ EXPECT_EQ(0, voe_codec_->SetVADStatus(
+ channel_, true, webrtc::kVadAggressiveHigh, true));
+ EXPECT_EQ(0, voe_codec_->GetVADStatus(
+ channel_, vad_enabled, vad_mode, dtx_disabled));
+ EXPECT_EQ(vad_mode, webrtc::kVadAggressiveHigh);
+ EXPECT_TRUE(dtx_disabled);
+
+ EXPECT_EQ(0, voe_codec_->SetVADStatus(
+ channel_, true, webrtc::kVadConventional, true));
+ EXPECT_EQ(0, voe_codec_->GetVADStatus(
+ channel_, vad_enabled, vad_mode, dtx_disabled));
+ EXPECT_EQ(vad_mode, webrtc::kVadConventional);
+}
+
+TEST_F(CodecTest, VoiceActivityDetectionCanBeTurnedOff) {
+ EXPECT_EQ(0, voe_codec_->SetVADStatus(channel_, true));
+
+ // VAD is always on when DTX is on, so we need to turn off DTX too.
+ EXPECT_EQ(0, voe_codec_->SetVADStatus(
+ channel_, false, webrtc::kVadConventional, true));
+
+ bool vad_enabled = false;
+ bool dtx_disabled = false;
+ webrtc::VadModes vad_mode = webrtc::kVadAggressiveMid;
+
+ voe_codec_->GetVADStatus(channel_, vad_enabled, vad_mode, dtx_disabled);
+
+ EXPECT_FALSE(vad_enabled);
+ EXPECT_TRUE(dtx_disabled);
+ EXPECT_EQ(webrtc::kVadConventional, vad_mode);
+}
+
+// Tests requiring manual verification (although they do have some value
+// without the manual verification):
+TEST_F(CodecTest, ManualExtendedISACApisBehaveAsExpected) {
+ strcpy(codec_instance_.plname, "isac");
+ codec_instance_.pltype = 103;
+ codec_instance_.plfreq = 16000;
+ codec_instance_.channels = 1;
+ // -1 here means "adaptive rate".
+ codec_instance_.rate = -1;
+ codec_instance_.pacsize = 480;
+
+ EXPECT_EQ(0, voe_codec_->SetSendCodec(channel_, codec_instance_));
+
+ EXPECT_NE(0, voe_codec_->SetISACInitTargetRate(channel_, 5000)) <<
+ "iSAC should reject rate 5000.";
+ EXPECT_NE(0, voe_codec_->SetISACInitTargetRate(channel_, 33000)) <<
+ "iSAC should reject rate 33000.";
+ EXPECT_EQ(0, voe_codec_->SetISACInitTargetRate(channel_, 32000));
+
+ TEST_LOG("Ensure that the sound is good (iSAC, target = 32kbps)...\n");
+ Sleep(3000);
+
+ EXPECT_EQ(0, voe_codec_->SetISACInitTargetRate(channel_, 10000));
+ TEST_LOG("Ensure that the sound is good (iSAC, target = 10kbps)...\n");
+ Sleep(3000);
+
+ EXPECT_EQ(0, voe_codec_->SetISACInitTargetRate(channel_, 10000, true));
+ EXPECT_EQ(0, voe_codec_->SetISACInitTargetRate(channel_, 10000, false));
+ EXPECT_EQ(0, voe_codec_->SetISACInitTargetRate(channel_, 0));
+ TEST_LOG("Ensure that the sound is good (iSAC, target = default)...\n");
+ Sleep(3000);
+
+ TEST_LOG(" Testing SetISACMaxPayloadSize:\n");
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_NE(0, voe_codec_->SetISACMaxPayloadSize(channel_, 50));
+ EXPECT_NE(0, voe_codec_->SetISACMaxPayloadSize(channel_, 650));
+ EXPECT_EQ(0, voe_codec_->SetISACMaxPayloadSize(channel_, 120));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+ TEST_LOG("Ensure that the sound is good (iSAC, "
+ "max payload size = 100 bytes)...\n");
+ Sleep(3000);
+
+ TEST_LOG(" Testing SetISACMaxRate:\n");
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_codec_->SetISACMaxPayloadSize(channel_, 400));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_NE(0, voe_codec_->SetISACMaxRate(channel_, 31900));
+ EXPECT_NE(0, voe_codec_->SetISACMaxRate(channel_, 53500));
+ EXPECT_EQ(0, voe_codec_->SetISACMaxRate(channel_, 32000));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+ TEST_LOG("Ensure that the sound is good (iSAC, max rate = 32 kbps)...\n");
+ Sleep(3000);
+
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+
+ // Restore "no limitation". No, no limit, we reach for the sky.
+ EXPECT_EQ(0, voe_codec_->SetISACMaxRate(channel_, 53400));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+}
+
+// TODO(xians, phoglund): Re-enable when issue 372 is resolved.
+TEST_F(CodecTest, DISABLED_ManualVerifySendCodecsForAllPacketSizes) {
+ for (int i = 0; i < voe_codec_->NumOfCodecs(); ++i) {
+ voe_codec_->GetCodec(i, codec_instance_);
+ if (IsNotViableSendCodec(codec_instance_.plname)) {
+ TEST_LOG("Skipping %s.\n", codec_instance_.plname);
+ continue;
+ }
+ EXPECT_NE(-1, codec_instance_.pltype) <<
+ "The codec database should suggest a payload type.";
+
+ // Test with default packet size:
+ TEST_LOG("%s (pt=%d): default packet size(%d), accepts sizes ",
+ codec_instance_.plname, codec_instance_.pltype,
+ codec_instance_.pacsize);
+ voe_codec_->SetSendCodec(channel_, codec_instance_);
+ Sleep(CODEC_TEST_TIME);
+
+ // Now test other reasonable packet sizes:
+ bool at_least_one_succeeded = false;
+ for (int packet_size = 80; packet_size < 1000; packet_size += 80) {
+ SetRateIfILBC(&codec_instance_, packet_size);
+ codec_instance_.pacsize = packet_size;
+
+ if (voe_codec_->SetSendCodec(channel_, codec_instance_) != -1) {
+ // Note that it's fine for SetSendCodec to fail - what packet sizes
+ // it accepts depends on the codec. It should accept one at minimum.
+ TEST_LOG("%d ", packet_size);
+ TEST_LOG_FLUSH;
+ at_least_one_succeeded = true;
+ Sleep(CODEC_TEST_TIME);
+ }
+ }
+ TEST_LOG("\n");
+ EXPECT_TRUE(at_least_one_succeeded);
+ }
+}
diff --git a/voice_engine/test/auto_test/standard/dtmf_test.cc b/voice_engine/test/auto_test/standard/dtmf_test.cc
new file mode 100644
index 0000000..b1b1666
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/dtmf_test.cc
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_streaming_fixture.h"
+#include "voice_engine_defines.h"
+
+class DtmfTest : public AfterStreamingFixture {
+ protected:
+ void RunSixteenDtmfEvents(bool out_of_band) {
+ TEST_LOG("Sending telephone events:\n");
+ EXPECT_EQ(0, voe_dtmf_->SetDtmfFeedbackStatus(false));
+
+ for (int i = 0; i < 16; i++) {
+ TEST_LOG("%d ", i);
+ TEST_LOG_FLUSH;
+ EXPECT_EQ(0, voe_dtmf_->SendTelephoneEvent(
+ channel_, i, out_of_band, 160, 10));
+ Sleep(500);
+ }
+ TEST_LOG("\n");
+ }
+};
+
+TEST_F(DtmfTest, DtmfFeedbackIsEnabledByDefaultButNotDirectFeedback) {
+ bool dtmf_feedback = false;
+ bool dtmf_direct_feedback = false;
+
+ EXPECT_EQ(0, voe_dtmf_->GetDtmfFeedbackStatus(dtmf_feedback,
+ dtmf_direct_feedback));
+
+ EXPECT_TRUE(dtmf_feedback);
+ EXPECT_FALSE(dtmf_direct_feedback);
+}
+
+TEST_F(DtmfTest, ManualSuccessfullySendsInBandTelephoneEvents) {
+ RunSixteenDtmfEvents(false);
+}
+
+TEST_F(DtmfTest, ManualSuccessfullySendsOutOfBandTelephoneEvents) {
+ RunSixteenDtmfEvents(true);
+}
+
+TEST_F(DtmfTest, TestTwoNonDtmfEvents) {
+ EXPECT_EQ(0, voe_dtmf_->SendTelephoneEvent(channel_, 32, true));
+ EXPECT_EQ(0, voe_dtmf_->SendTelephoneEvent(channel_, 110, true));
+}
+
+#ifndef WEBRTC_IOS
+TEST_F(DtmfTest, ManualCanDisableDtmfPlayoutExceptOnIphone) {
+ TEST_LOG("Disabling DTMF playout (no tone should be heard) \n");
+ EXPECT_EQ(0, voe_dtmf_->SetDtmfPlayoutStatus(channel_, false));
+ EXPECT_EQ(0, voe_dtmf_->SendTelephoneEvent(channel_, 0, true));
+ Sleep(500);
+
+ TEST_LOG("Enabling DTMF playout (tone should be heard) \n");
+ EXPECT_EQ(0, voe_dtmf_->SetDtmfPlayoutStatus(channel_, true));
+ EXPECT_EQ(0, voe_dtmf_->SendTelephoneEvent(channel_, 0, true));
+ Sleep(500);
+}
+#endif
+
+// This test modifies the DTMF payload type from the default 106 to 88
+// and then runs through 16 DTMF out.of-band events.
+TEST_F(DtmfTest, ManualCanChangeDtmfPayloadType) {
+ webrtc::CodecInst codec_instance;
+
+ TEST_LOG("Changing DTMF payload type.\n");
+
+ // Start by modifying the receiving side.
+ for (int i = 0; i < voe_codec_->NumOfCodecs(); i++) {
+ EXPECT_EQ(0, voe_codec_->GetCodec(i, codec_instance));
+ if (!_stricmp("telephone-event", codec_instance.plname)) {
+ codec_instance.pltype = 88; // Use 88 instead of default 106.
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
+ EXPECT_EQ(0, voe_base_->StopReceive(channel_));
+ EXPECT_EQ(0, voe_codec_->SetRecPayloadType(channel_, codec_instance));
+ EXPECT_EQ(0, voe_base_->StartReceive(channel_));
+ EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+ break;
+ }
+ }
+
+ Sleep(500);
+
+ // Next, we must modify the sending side as well.
+ EXPECT_EQ(0, voe_dtmf_->SetSendTelephoneEventPayloadType(
+ channel_, codec_instance.pltype));
+
+ RunSixteenDtmfEvents(true);
+
+ EXPECT_EQ(0, voe_dtmf_->SetDtmfFeedbackStatus(true, false));
+}
diff --git a/voice_engine/test/auto_test/standard/encryption_test.cc b/voice_engine/test/auto_test/standard/encryption_test.cc
new file mode 100644
index 0000000..acb1a0d
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/encryption_test.cc
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/include/voe_encryption.h"
+#include "voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
+
+class BasicBitInverseEncryption : public webrtc::Encryption {
+ void encrypt(int channel_no, unsigned char* in_data,
+ unsigned char* out_data, int bytes_in, int* bytes_out);
+ void decrypt(int channel_no, unsigned char* in_data,
+ unsigned char* out_data, int bytes_in, int* bytes_out);
+ void encrypt_rtcp(int channel_no, unsigned char* in_data,
+ unsigned char* out_data, int bytes_in, int* bytes_out);
+ void decrypt_rtcp(int channel_no, unsigned char* in_data,
+ unsigned char* out_data, int bytes_in, int* bytes_out);
+};
+
+void BasicBitInverseEncryption::encrypt(int, unsigned char* in_data,
+ unsigned char* out_data,
+ int bytes_in, int* bytes_out) {
+ int i;
+ for (i = 0; i < bytes_in; i++)
+ out_data[i] = ~in_data[i];
+ *bytes_out = bytes_in + 2;
+}
+
+void BasicBitInverseEncryption::decrypt(int, unsigned char* in_data,
+ unsigned char* out_data,
+ int bytes_in, int* bytes_out) {
+ int i;
+ for (i = 0; i < bytes_in; i++)
+ out_data[i] = ~in_data[i];
+ *bytes_out = bytes_in - 2;
+}
+
+void BasicBitInverseEncryption::encrypt_rtcp(int, unsigned char* in_data,
+ unsigned char* out_data,
+ int bytes_in, int* bytes_out) {
+ int i;
+ for (i = 0; i < bytes_in; i++)
+ out_data[i] = ~in_data[i];
+ *bytes_out = bytes_in + 2;
+}
+
+void BasicBitInverseEncryption::decrypt_rtcp(int, unsigned char* in_data,
+ unsigned char* out_data,
+ int bytes_in, int* bytes_out) {
+ int i;
+ for (i = 0; i < bytes_in; i++)
+ out_data[i] = ~in_data[i];
+ *bytes_out = bytes_in + 2;
+}
+
+
+class EncryptionTest : public AfterStreamingFixture {
+};
+
+TEST_F(EncryptionTest, ManualBasicCorrectExternalEncryptionHasNoEffectOnVoice) {
+ BasicBitInverseEncryption basic_encryption;
+
+ voe_encrypt_->RegisterExternalEncryption(channel_, basic_encryption);
+
+ TEST_LOG("Registered external encryption, should still hear good audio.");
+ Sleep(3000);
+
+ voe_encrypt_->DeRegisterExternalEncryption(channel_);
+}
diff --git a/voice_engine/test/auto_test/standard/external_media_test.cc b/voice_engine/test/auto_test/standard/external_media_test.cc
new file mode 100644
index 0000000..5c641ba
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/external_media_test.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/include/voe_external_media.h"
+#include "voice_engine/test/auto_test/fakes/fake_media_process.h"
+#include "voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
+
+class ExternalMediaTest : public AfterStreamingFixture {
+ protected:
+ void TestRegisterExternalMedia(int channel, webrtc::ProcessingTypes type) {
+ FakeMediaProcess fake_media_process;
+ EXPECT_EQ(0, voe_xmedia_->RegisterExternalMediaProcessing(
+ channel, type, fake_media_process));
+ Sleep(2000);
+
+ TEST_LOG("Back to normal.\n");
+ EXPECT_EQ(0, voe_xmedia_->DeRegisterExternalMediaProcessing(
+ channel, type));
+ Sleep(2000);
+ }
+};
+
+TEST_F(ExternalMediaTest, ManualCanRecordAndPlaybackUsingExternalPlayout) {
+ SwitchToManualMicrophone();
+
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
+ EXPECT_EQ(0, voe_xmedia_->SetExternalPlayoutStatus(true));
+ EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+
+ TEST_LOG("Recording data for 2 seconds starting now: please speak.\n");
+ int16_t recording[32000];
+ for (int i = 0; i < 200; i++) {
+ int sample_length = 0;
+ EXPECT_EQ(0, voe_xmedia_->ExternalPlayoutGetData(
+ &(recording[i * 160]), 16000, 100, sample_length));
+ EXPECT_EQ(160, sample_length);
+ Sleep(10);
+ }
+
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
+ EXPECT_EQ(0, voe_xmedia_->SetExternalPlayoutStatus(false));
+ EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
+ EXPECT_EQ(0, voe_xmedia_->SetExternalRecordingStatus(true));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+
+ TEST_LOG("Playing back recording, you should hear what you said earlier.\n");
+ for (int i = 0; i < 200; i++) {
+ EXPECT_EQ(0, voe_xmedia_->ExternalRecordingInsertData(
+ &(recording[i * 160]), 160, 16000, 20));
+ Sleep(10);
+ }
+
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_xmedia_->SetExternalRecordingStatus(false));
+}
+
+TEST_F(ExternalMediaTest,
+ ManualRegisterExternalMediaProcessingOnAllChannelsAffectsPlayout) {
+ TEST_LOG("Enabling external media processing: audio should be affected.\n");
+ TestRegisterExternalMedia(-1, webrtc::kPlaybackAllChannelsMixed);
+}
+
+TEST_F(ExternalMediaTest,
+ ManualRegisterExternalMediaOnSingleChannelAffectsPlayout) {
+ TEST_LOG("Enabling external media processing: audio should be affected.\n");
+ TestRegisterExternalMedia(channel_, webrtc::kRecordingPerChannel);
+}
+
+TEST_F(ExternalMediaTest,
+ ManualRegisterExternalMediaOnAllChannelsMixedAffectsRecording) {
+ SwitchToManualMicrophone();
+ TEST_LOG("Speak and verify your voice is distorted.\n");
+ TestRegisterExternalMedia(-1, webrtc::kRecordingAllChannelsMixed);
+}
diff --git a/voice_engine/test/auto_test/standard/file_before_streaming_test.cc b/voice_engine/test/auto_test/standard/file_before_streaming_test.cc
new file mode 100644
index 0000000..5a10d72
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/file_before_streaming_test.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_initialization_fixture.h"
+#include "test/testsupport/fileutils.h"
+
+namespace {
+
+const int kSampleRateHz = 16000;
+const int kTestDurationMs = 1000;
+const int kSkipOutputMs = 50;
+const int16_t kInputValue = 15000;
+const int16_t kSilenceValue = 0;
+
+} // namespace
+
+class FileBeforeStreamingTest : public AfterInitializationFixture {
+ protected:
+ FileBeforeStreamingTest()
+ : input_filename_(webrtc::test::OutputPath() + "file_test_input.pcm"),
+ output_filename_(webrtc::test::OutputPath() + "file_test_output.pcm") {
+ }
+
+ void SetUp() {
+ channel_ = voe_base_->CreateChannel();
+ }
+
+ void TearDown() {
+ voe_base_->DeleteChannel(channel_);
+ }
+
+ // TODO(andrew): consolidate below methods in a shared place?
+
+ // Generate input file with constant values as |kInputValue|. The file
+ // will be one second longer than the duration of the test.
+ void GenerateInputFile() {
+ FILE* input_file = fopen(input_filename_.c_str(), "wb");
+ ASSERT_TRUE(input_file != NULL);
+ for (int i = 0; i < kSampleRateHz / 1000 * (kTestDurationMs + 1000); i++) {
+ ASSERT_EQ(1u, fwrite(&kInputValue, sizeof(kInputValue), 1, input_file));
+ }
+ ASSERT_EQ(0, fclose(input_file));
+ }
+
+ void RecordOutput() {
+ // Start recording the mixed output for |kTestDurationMs| long.
+ EXPECT_EQ(0, voe_file_->StartRecordingPlayout(-1,
+ output_filename_.c_str()));
+ Sleep(kTestDurationMs);
+ EXPECT_EQ(0, voe_file_->StopRecordingPlayout(-1));
+ }
+
+ void VerifyOutput(int16_t target_value) {
+ FILE* output_file = fopen(output_filename_.c_str(), "rb");
+ ASSERT_TRUE(output_file != NULL);
+ int16_t output_value = 0;
+ int samples_read = 0;
+
+ // Skip the first segment to avoid initialization and ramping-in effects.
+ EXPECT_EQ(0, fseek(output_file, sizeof(output_value) *
+ kSampleRateHz / 1000 * kSkipOutputMs, SEEK_SET));
+ while (fread(&output_value, sizeof(output_value), 1, output_file) == 1) {
+ samples_read++;
+ EXPECT_EQ(output_value, target_value);
+ }
+
+ // Ensure that a reasonable amount was recorded. We use a loose
+ // tolerance to avoid flaky bot failures.
+ ASSERT_GE((samples_read * 1000.0) / kSampleRateHz, 0.4 * kTestDurationMs);
+
+ // Ensure we read the entire file.
+ ASSERT_NE(0, feof(output_file));
+ ASSERT_EQ(0, fclose(output_file));
+ }
+
+void VerifyEmptyOutput() {
+ FILE* output_file = fopen(output_filename_.c_str(), "rb");
+ ASSERT_TRUE(output_file != NULL);
+ ASSERT_EQ(0, fseek(output_file, 0, SEEK_END));
+ EXPECT_EQ(0, ftell(output_file));
+ ASSERT_EQ(0, fclose(output_file));
+}
+
+ int channel_;
+ const std::string input_filename_;
+ const std::string output_filename_;
+};
+
+// This test case is to ensure that StartPlayingFileLocally() and
+// StartPlayout() can be called in any order.
+// A DC signal is used as input. And the output of mixer is supposed to be:
+// 1. the same DC signal if file is played out,
+// 2. total silence if file is not played out,
+// 3. no output if playout is not started.
+TEST_F(FileBeforeStreamingTest, TestStartPlayingFileLocallyWithStartPlayout) {
+ GenerateInputFile();
+
+ TEST_LOG("Playout is not started. File will not be played out.\n");
+ EXPECT_EQ(0, voe_file_->StartPlayingFileLocally(
+ channel_, input_filename_.c_str(), true));
+ EXPECT_EQ(1, voe_file_->IsPlayingFileLocally(channel_));
+ RecordOutput();
+ VerifyEmptyOutput();
+
+ TEST_LOG("Playout is now started. File will be played out.\n");
+ EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
+ RecordOutput();
+ VerifyOutput(kInputValue);
+
+ TEST_LOG("Stop playing file. Only silence will be played out.\n");
+ EXPECT_EQ(0, voe_file_->StopPlayingFileLocally(channel_));
+ EXPECT_EQ(0, voe_file_->IsPlayingFileLocally(channel_));
+ RecordOutput();
+ VerifyOutput(kSilenceValue);
+
+ TEST_LOG("Start playing file again. File will be played out.\n");
+ EXPECT_EQ(0, voe_file_->StartPlayingFileLocally(
+ channel_, input_filename_.c_str(), true));
+ EXPECT_EQ(1, voe_file_->IsPlayingFileLocally(channel_));
+ RecordOutput();
+ VerifyOutput(kInputValue);
+
+ EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
+ EXPECT_EQ(0, voe_file_->StopPlayingFileLocally(channel_));
+}
diff --git a/voice_engine/test/auto_test/standard/file_test.cc b/voice_engine/test/auto_test/standard/file_test.cc
new file mode 100644
index 0000000..d8e8370
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/file_test.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
+#include "voice_engine/test/auto_test/voe_standard_test.h"
+#include "test/testsupport/fileutils.h"
+
+
+class FileTest : public AfterStreamingFixture {
+ protected:
+ // Creates the string åäö.pcm.
+ std::string CreateTrickyFilenameInUtf8() {
+ char filename[16] = { (char)0xc3, (char)0xa5,
+ (char)0xc3, (char)0xa4,
+ (char)0xc3, (char)0xb6,
+ static_cast<char>(0) };
+ return std::string(filename) + ".pcm";
+ }
+};
+
+TEST_F(FileTest, ManualRecordToFileForThreeSecondsAndPlayback) {
+ if (!FLAGS_include_timing_dependent_tests) {
+ TEST_LOG("Skipping test - running in slow execution environment.../n");
+ return;
+ }
+
+ SwitchToManualMicrophone();
+
+ std::string recording_filename =
+ webrtc::test::OutputPath() + CreateTrickyFilenameInUtf8();
+
+ TEST_LOG("Recording to %s for 3 seconds.\n", recording_filename.c_str());
+ EXPECT_EQ(0, voe_file_->StartRecordingMicrophone(recording_filename.c_str()));
+ Sleep(3000);
+ EXPECT_EQ(0, voe_file_->StopRecordingMicrophone());
+
+ TEST_LOG("Playing back %s.\n", recording_filename.c_str());
+ EXPECT_EQ(0, voe_file_->StartPlayingFileLocally(
+ channel_, recording_filename.c_str()));
+
+ // Play the file to the user and ensure the is-playing-locally
+ // and scaling methods also work. The clip is 3 seconds long.
+ Sleep(250);
+ EXPECT_EQ(1, voe_file_->IsPlayingFileLocally(channel_));
+ Sleep(1500);
+ TEST_LOG("Decreasing level by 50%%.\n");
+ EXPECT_EQ(0, voe_file_->ScaleLocalFilePlayout(channel_, 0.5f));
+ Sleep(1500);
+ EXPECT_EQ(0, voe_file_->IsPlayingFileLocally(channel_));
+}
+
+TEST_F(FileTest, ManualRecordPlayoutToWavFileForThreeSecondsAndPlayback) {
+ webrtc::CodecInst send_codec;
+ voe_codec_->GetSendCodec(channel_, send_codec);
+
+ std::string recording_filename =
+ webrtc::test::OutputPath() + "playout.wav";
+
+ TEST_LOG("Recording playout to %s.\n", recording_filename.c_str());
+ EXPECT_EQ(0, voe_file_->StartRecordingPlayout(
+ channel_, recording_filename.c_str(), &send_codec));
+ Sleep(3000);
+ EXPECT_EQ(0, voe_file_->StopRecordingPlayout(channel_));
+
+ TEST_LOG("Playing back the recording in looping mode.\n");
+ EXPECT_EQ(0, voe_file_->StartPlayingFileAsMicrophone(
+ channel_, recording_filename.c_str(), true, false,
+ webrtc::kFileFormatWavFile));
+
+ Sleep(2000);
+ EXPECT_EQ(1, voe_file_->IsPlayingFileAsMicrophone(channel_));
+ Sleep(2000);
+ // We should still be playing since we're looping.
+ EXPECT_EQ(1, voe_file_->IsPlayingFileAsMicrophone(channel_));
+
+ // Try scaling as well.
+ TEST_LOG("Decreasing level by 50%%.\n");
+ EXPECT_EQ(0, voe_file_->ScaleFileAsMicrophonePlayout(channel_, 0.5f));
+ Sleep(1000);
+
+ EXPECT_EQ(0, voe_file_->StopPlayingFileAsMicrophone(channel_));
+}
diff --git a/voice_engine/test/auto_test/standard/hardware_before_initializing_test.cc b/voice_engine/test/auto_test/standard/hardware_before_initializing_test.cc
new file mode 100644
index 0000000..540614e
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/hardware_before_initializing_test.cc
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "common_types.h"
+#include "before_initialization_fixture.h"
+
+using namespace webrtc;
+
+class HardwareBeforeInitializingTest : public BeforeInitializationFixture {
+};
+
+TEST_F(HardwareBeforeInitializingTest,
+ SetAudioDeviceLayerAcceptsPlatformDefaultBeforeInitializing) {
+ AudioLayers wanted_layer = kAudioPlatformDefault;
+ AudioLayers given_layer;
+ EXPECT_EQ(0, voe_hardware_->SetAudioDeviceLayer(wanted_layer));
+ EXPECT_EQ(0, voe_hardware_->GetAudioDeviceLayer(given_layer));
+ EXPECT_EQ(wanted_layer, given_layer) <<
+ "These should be the same before initializing.";
+}
diff --git a/voice_engine/test/auto_test/standard/hardware_before_streaming_test.cc b/voice_engine/test/auto_test/standard/hardware_before_streaming_test.cc
new file mode 100644
index 0000000..edb7f56
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/hardware_before_streaming_test.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cstring>
+
+#include "after_initialization_fixture.h"
+
+using namespace webrtc;
+
+static const char* kNoDevicesErrorMessage =
+ "Either you have no recording / playout device "
+ "on your system, or the method failed.";
+
+class HardwareBeforeStreamingTest : public AfterInitializationFixture {
+};
+
+// Tests that apply to both mobile and desktop:
+
+TEST_F(HardwareBeforeStreamingTest,
+ SetAudioDeviceLayerFailsSinceTheVoiceEngineHasBeenInitialized) {
+ EXPECT_NE(0, voe_hardware_->SetAudioDeviceLayer(kAudioPlatformDefault));
+ EXPECT_EQ(VE_ALREADY_INITED, voe_base_->LastError());
+}
+
+TEST_F(HardwareBeforeStreamingTest,
+ GetCPULoadSucceedsOnWindowsButNotOtherPlatforms) {
+ int load_percent;
+#if defined(_WIN32)
+ EXPECT_EQ(0, voe_hardware_->GetCPULoad(load_percent));
+#else
+ EXPECT_NE(0, voe_hardware_->GetCPULoad(load_percent)) <<
+ "Should fail on non-Windows platforms.";
+#endif
+}
+
+// Tests that only apply to mobile:
+
+#ifdef WEBRTC_IOS
+TEST_F(HardwareBeforeStreamingTest, ResetsAudioDeviceOnIphone) {
+ EXPECT_EQ(0, voe_hardware_->ResetAudioDevice());
+}
+#endif
+
+// Tests that only apply to desktop:
+#if !defined(WEBRTC_IOS) & !defined(WEBRTC_ANDROID)
+
+TEST_F(HardwareBeforeStreamingTest, GetSystemCpuLoadSucceeds) {
+#ifdef _WIN32
+ // This method needs some warm-up time on Windows. We sleep a good amount
+ // of time instead of retrying to make the test simpler.
+ Sleep(2000);
+#endif
+
+ int load_percent;
+ EXPECT_EQ(0, voe_hardware_->GetSystemCPULoad(load_percent));
+}
+
+TEST_F(HardwareBeforeStreamingTest, GetPlayoutDeviceStatusReturnsTrue) {
+ bool play_available = false;
+ EXPECT_EQ(0, voe_hardware_->GetPlayoutDeviceStatus(play_available));
+ ASSERT_TRUE(play_available) <<
+ "Ensures that the method works and that hardware is in the right state.";
+}
+
+TEST_F(HardwareBeforeStreamingTest, GetRecordingDeviceStatusReturnsTrue) {
+ bool recording_available = false;
+ EXPECT_EQ(0, voe_hardware_->GetRecordingDeviceStatus(recording_available));
+ EXPECT_TRUE(recording_available) <<
+ "Ensures that the method works and that hardware is in the right state.";
+}
+
+ // Win, Mac and Linux sound device tests.
+TEST_F(HardwareBeforeStreamingTest,
+ GetRecordingDeviceNameRetrievesDeviceNames) {
+ char device_name[128] = {0};
+ char guid_name[128] = {0};
+
+#ifdef _WIN32
+ EXPECT_EQ(0, voe_hardware_->GetRecordingDeviceName(
+ -1, device_name, guid_name));
+ EXPECT_GT(strlen(device_name), 0u) << kNoDevicesErrorMessage;
+ device_name[0] = '\0';
+
+ EXPECT_EQ(0, voe_hardware_->GetPlayoutDeviceName(
+ -1, device_name, guid_name));
+ EXPECT_GT(strlen(device_name), 0u) << kNoDevicesErrorMessage;
+
+#else
+ EXPECT_EQ(0, voe_hardware_->GetRecordingDeviceName(
+ 0, device_name, guid_name));
+ EXPECT_GT(strlen(device_name), 0u) << kNoDevicesErrorMessage;
+ device_name[0] = '\0';
+
+ EXPECT_EQ(0, voe_hardware_->GetPlayoutDeviceName(
+ 0, device_name, guid_name));
+ EXPECT_GT(strlen(device_name), 0u) << kNoDevicesErrorMessage;
+#endif // !WIN32
+}
+
+TEST_F(HardwareBeforeStreamingTest,
+ AllEnumeratedRecordingDevicesCanBeSetAsRecordingDevice) {
+ // Check recording side.
+ // Extended Win32 enumeration tests: unique GUID outputs on Vista and up:
+ // Win XP and below : device_name is copied to guid_name.
+ // Win Vista and up : device_name is the friendly name and GUID is a unique
+ // identifier.
+ // Other : guid_name is left unchanged.
+ int num_of_recording_devices = 0;
+ EXPECT_EQ(0, voe_hardware_->GetNumOfRecordingDevices(
+ num_of_recording_devices));
+ EXPECT_GT(num_of_recording_devices, 0) << kNoDevicesErrorMessage;
+
+ char device_name[128] = {0};
+ char guid_name[128] = {0};
+
+ for (int i = 0; i < num_of_recording_devices; i++) {
+ EXPECT_EQ(0, voe_hardware_->GetRecordingDeviceName(
+ i, device_name, guid_name));
+ EXPECT_GT(strlen(device_name), 0u) <<
+ "There should be no empty device names "
+ "among the ones the system gives us.";
+ EXPECT_EQ(0, voe_hardware_->SetRecordingDevice(i));
+ }
+}
+
+TEST_F(HardwareBeforeStreamingTest,
+ AllEnumeratedPlayoutDevicesCanBeSetAsPlayoutDevice) {
+ // Check playout side (see recording side test for more info on GUIDs).
+ int num_of_playout_devices = 0;
+ EXPECT_EQ(0, voe_hardware_->GetNumOfPlayoutDevices(
+ num_of_playout_devices));
+ EXPECT_GT(num_of_playout_devices, 0) << kNoDevicesErrorMessage;
+
+ char device_name[128] = {0};
+ char guid_name[128] = {0};
+
+ for (int i = 0; i < num_of_playout_devices; ++i) {
+ EXPECT_EQ(0, voe_hardware_->GetPlayoutDeviceName(
+ i, device_name, guid_name));
+ EXPECT_GT(strlen(device_name), 0u) <<
+ "There should be no empty device names "
+ "among the ones the system gives us.";
+ EXPECT_EQ(0, voe_hardware_->SetPlayoutDevice(i));
+ }
+}
+
+TEST_F(HardwareBeforeStreamingTest,
+ SetDeviceWithMagicalArgumentsSetsDefaultSoundDevices) {
+#ifdef _WIN32
+ // -1 means "default device" on Windows.
+ EXPECT_EQ(0, voe_hardware_->SetRecordingDevice(-1));
+ EXPECT_EQ(0, voe_hardware_->SetPlayoutDevice(-1));
+#else
+ EXPECT_EQ(0, voe_hardware_->SetRecordingDevice(0));
+ EXPECT_EQ(0, voe_hardware_->SetPlayoutDevice(0));
+#endif
+}
+
+#endif // !defined(WEBRTC_IOS) & !defined(WEBRTC_ANDROID)
diff --git a/voice_engine/test/auto_test/standard/hardware_test.cc b/voice_engine/test/auto_test/standard/hardware_test.cc
new file mode 100644
index 0000000..7310e52
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/hardware_test.cc
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_streaming_fixture.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "voe_test_defines.h"
+
+class HardwareTest : public AfterStreamingFixture {
+};
+
+#if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID)
+TEST_F(HardwareTest, AbleToQueryForDevices) {
+ int num_recording_devices = 0;
+ int num_playout_devices = 0;
+ EXPECT_EQ(0, voe_hardware_->GetNumOfRecordingDevices(num_recording_devices));
+ EXPECT_EQ(0, voe_hardware_->GetNumOfPlayoutDevices(num_playout_devices));
+
+ ASSERT_GT(num_recording_devices, 0) <<
+ "There seem to be no recording devices on your system, "
+ "and this test really doesn't make sense then.";
+ ASSERT_GT(num_playout_devices, 0) <<
+ "There seem to be no playout devices on your system, "
+ "and this test really doesn't make sense then.";
+
+ // Recording devices are handled a bit differently on Windows - we can
+ // just tell it to set the 'default' communication device there.
+#ifdef _WIN32
+ // Should also work while already recording.
+ EXPECT_EQ(0, voe_hardware_->SetRecordingDevice(
+ webrtc::AudioDeviceModule::kDefaultCommunicationDevice));
+ // Should also work while already playing.
+ EXPECT_EQ(0, voe_hardware_->SetPlayoutDevice(
+ webrtc::AudioDeviceModule::kDefaultCommunicationDevice));
+#else
+ // For other platforms, just use the first device encountered.
+ EXPECT_EQ(0, voe_hardware_->SetRecordingDevice(0));
+ EXPECT_EQ(0, voe_hardware_->SetPlayoutDevice(0));
+#endif
+
+ // It's hard to know what names this will return (it's system-dependent),
+ // so just check that it's possible to do it.
+ char device_name[128] = {0};
+ char guid_name[128] = {0};
+ EXPECT_EQ(0, voe_hardware_->GetRecordingDeviceName(
+ 0, device_name, guid_name));
+ EXPECT_EQ(0, voe_hardware_->GetPlayoutDeviceName(
+ 0, device_name, guid_name));
+}
+#endif
+
+#ifdef _WIN32
+TEST_F(HardwareTest, GetCpuLoadWorksOnWindows) {
+ int load = -1;
+ EXPECT_EQ(0, voe_hardware_->GetCPULoad(load));
+ EXPECT_GE(0, load);
+ TEST_LOG("Voice engine CPU load = %d%%\n", load);
+}
+#else
+TEST_F(HardwareTest, GetCpuLoadReturnsErrorOnNonWindowsPlatform) {
+ int load = -1;
+ EXPECT_EQ(-1, voe_hardware_->GetCPULoad(load));
+}
+#endif
+
+#if !defined(WEBRTC_MAC) && !defined(WEBRTC_ANDROID)
+TEST_F(HardwareTest, GetSystemCpuLoadWorksExceptOnMacAndAndroid) {
+#ifdef _WIN32
+ // This method needs some warm-up time on Windows. We sleep a good amount
+ // of time instead of retrying to make the test simpler.
+ Sleep(2000);
+#endif
+ int load = -1;
+ EXPECT_EQ(0, voe_hardware_->GetSystemCPULoad(load));
+ EXPECT_GE(load, 0);
+ TEST_LOG("System CPU load = %d%%\n", load);
+}
+#endif
+
+TEST_F(HardwareTest, BuiltInWasapiAECWorksForAudioWindowsCoreAudioLayer) {
+#ifdef WEBRTC_IOS
+ // Ensure the sound device is reset on iPhone.
+ EXPECT_EQ(0, voe_hardware_->ResetAudioDevice());
+ Sleep(2000);
+#endif
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
+
+ webrtc::AudioLayers given_layer;
+ EXPECT_EQ(0, voe_hardware_->GetAudioDeviceLayer(given_layer));
+ if (given_layer != webrtc::kAudioWindowsCore) {
+ // Not Windows Audio Core - then it shouldn't work.
+ EXPECT_EQ(-1, voe_hardware_->EnableBuiltInAEC(true));
+ EXPECT_EQ(-1, voe_hardware_->EnableBuiltInAEC(false));
+ return;
+ }
+
+ TEST_LOG("Testing AEC for Audio Windows Core.\n");
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+
+ // Can't be set after StartSend().
+ EXPECT_EQ(-1, voe_hardware_->EnableBuiltInAEC(true));
+ EXPECT_EQ(-1, voe_hardware_->EnableBuiltInAEC(false));
+
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_hardware_->EnableBuiltInAEC(true));
+
+ // Can't be called before StartPlayout().
+ EXPECT_EQ(-1, voe_base_->StartSend(channel_));
+
+ EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+ TEST_LOG("Processing capture data with built-in AEC...\n");
+ Sleep(2000);
+
+ TEST_LOG("Looping through capture devices...\n");
+ int num_devs = 0;
+ char dev_name[128] = { 0 };
+ char guid_name[128] = { 0 };
+ EXPECT_EQ(0, voe_hardware_->GetNumOfRecordingDevices(num_devs));
+ for (int dev_index = 0; dev_index < num_devs; ++dev_index) {
+ EXPECT_EQ(0, voe_hardware_->GetRecordingDeviceName(dev_index,
+ dev_name,
+ guid_name));
+ TEST_LOG("%d: %s\n", dev_index, dev_name);
+ EXPECT_EQ(0, voe_hardware_->SetRecordingDevice(dev_index));
+ Sleep(2000);
+ }
+
+ EXPECT_EQ(0, voe_hardware_->SetPlayoutDevice(-1));
+ EXPECT_EQ(0, voe_hardware_->SetRecordingDevice(-1));
+
+ TEST_LOG("Looping through render devices, restarting for each "
+ "device...\n");
+ EXPECT_EQ(0, voe_hardware_->GetNumOfPlayoutDevices(num_devs));
+ for (int dev_index = 0; dev_index < num_devs; ++dev_index) {
+ EXPECT_EQ(0, voe_hardware_->GetPlayoutDeviceName(dev_index,
+ dev_name,
+ guid_name));
+ TEST_LOG("%d: %s\n", dev_index, dev_name);
+ EXPECT_EQ(0, voe_hardware_->SetPlayoutDevice(dev_index));
+ Sleep(2000);
+ }
+
+ TEST_LOG("Using default devices...\n");
+ EXPECT_EQ(0, voe_hardware_->SetRecordingDevice(-1));
+ EXPECT_EQ(0, voe_hardware_->SetPlayoutDevice(-1));
+ Sleep(2000);
+
+ // Possible, but not recommended before StopSend().
+ EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
+
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
+ Sleep(2000); // To verify that there is no garbage audio.
+
+ TEST_LOG("Disabling built-in AEC.\n");
+ EXPECT_EQ(0, voe_hardware_->EnableBuiltInAEC(false));
+
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+ EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
+}
diff --git a/voice_engine/test/auto_test/standard/manual_hold_test.cc b/voice_engine/test/auto_test/standard/manual_hold_test.cc
new file mode 100644
index 0000000..68f28b4
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/manual_hold_test.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_streaming_fixture.h"
+
+// Note: This class includes sleeps and requires manual verification.
+class ManualHoldTest : public AfterStreamingFixture {
+};
+
+TEST_F(ManualHoldTest, SetOnHoldStatusBlockAudio) {
+ TEST_LOG("Channel not on hold => should hear audio.\n");
+ Sleep(2000);
+ TEST_LOG("Put channel on hold => should *not* hear audio.\n");
+ EXPECT_EQ(0, voe_base_->SetOnHoldStatus(channel_, true));
+ Sleep(2000);
+ TEST_LOG("Remove on hold => should hear audio again.\n");
+ EXPECT_EQ(0, voe_base_->SetOnHoldStatus(channel_, false));
+ Sleep(2000);
+ TEST_LOG("Put sending on hold => should *not* hear audio.\n");
+ EXPECT_EQ(0, voe_base_->SetOnHoldStatus(channel_, true, webrtc::kHoldSendOnly));
+ Sleep(2000);
+}
+
+TEST_F(ManualHoldTest, SetOnHoldStatusBlocksLocalFileAudio) {
+ TEST_LOG("Start playing a file locally => "
+ "you should now hear this file being played out.\n");
+ voe_file_->StopPlayingFileAsMicrophone(channel_);
+ EXPECT_EQ(0, voe_file_->StartPlayingFileLocally(
+ channel_, resource_manager_.long_audio_file_path().c_str(), true));
+ Sleep(2000);
+
+ TEST_LOG("Put playing on hold => should *not* hear audio.\n");
+ EXPECT_EQ(0, voe_base_->SetOnHoldStatus(
+ channel_, true, webrtc::kHoldPlayOnly));
+ Sleep(2000);
+}
diff --git a/voice_engine/test/auto_test/standard/mixing_test.cc b/voice_engine/test/auto_test/standard/mixing_test.cc
new file mode 100644
index 0000000..6a90c07
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/mixing_test.cc
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+
+#include <string>
+
+#include "after_initialization_fixture.h"
+#include "test/testsupport/fileutils.h"
+
+namespace webrtc {
+namespace {
+
+const int16_t kLimiterHeadroom = 29204; // == -1 dbFS
+const int16_t kInt16Max = 0x7fff;
+const int kSampleRateHz = 16000;
+const int kTestDurationMs = 3000;
+const int kSkipOutputMs = 500;
+
+} // namespace
+
+class MixingTest : public AfterInitializationFixture {
+ protected:
+ MixingTest()
+ : input_filename_(test::OutputPath() + "mixing_test_input.pcm"),
+ output_filename_(test::OutputPath() + "mixing_test_output.pcm") {
+ }
+
+ // Creates and mixes |num_remote_streams| which play a file "as microphone"
+ // with |num_local_streams| which play a file "locally", using a constant
+ // amplitude of |input_value|. The local streams manifest as "anonymous"
+ // mixing participants, meaning they will be mixed regardless of the number
+ // of participants. (A stream is a VoiceEngine "channel").
+ //
+ // The mixed output is verified to always fall between |max_output_value| and
+ // |min_output_value|, after a startup phase.
+ //
+ // |num_remote_streams_using_mono| of the remote streams use mono, with the
+ // remainder using stereo.
+ void RunMixingTest(int num_remote_streams,
+ int num_local_streams,
+ int num_remote_streams_using_mono,
+ int16_t input_value,
+ int16_t max_output_value,
+ int16_t min_output_value) {
+ ASSERT_LE(num_remote_streams_using_mono, num_remote_streams);
+
+ GenerateInputFile(input_value);
+
+ std::vector<int> local_streams(num_local_streams);
+ for (size_t i = 0; i < local_streams.size(); ++i) {
+ local_streams[i] = voe_base_->CreateChannel();
+ EXPECT_NE(-1, local_streams[i]);
+ }
+ StartLocalStreams(local_streams);
+ TEST_LOG("Playing %d local streams.\n", num_local_streams);
+
+ std::vector<int> remote_streams(num_remote_streams);
+ for (size_t i = 0; i < remote_streams.size(); ++i) {
+ remote_streams[i] = voe_base_->CreateChannel();
+ EXPECT_NE(-1, remote_streams[i]);
+ }
+ StartRemoteStreams(remote_streams, num_remote_streams_using_mono);
+ TEST_LOG("Playing %d remote streams.\n", num_remote_streams);
+
+ // Start recording the mixed output and wait.
+ EXPECT_EQ(0, voe_file_->StartRecordingPlayout(-1 /* record meeting */,
+ output_filename_.c_str()));
+ Sleep(kTestDurationMs);
+ EXPECT_EQ(0, voe_file_->StopRecordingPlayout(-1));
+
+ StopLocalStreams(local_streams);
+ StopRemoteStreams(remote_streams);
+
+ VerifyMixedOutput(max_output_value, min_output_value);
+ }
+
+ private:
+ // Generate input file with constant values equal to |input_value|. The file
+ // will be one second longer than the duration of the test.
+ void GenerateInputFile(int16_t input_value) {
+ FILE* input_file = fopen(input_filename_.c_str(), "wb");
+ ASSERT_TRUE(input_file != NULL);
+ for (int i = 0; i < kSampleRateHz / 1000 * (kTestDurationMs + 1000); i++) {
+ ASSERT_EQ(1u, fwrite(&input_value, sizeof(input_value), 1, input_file));
+ }
+ ASSERT_EQ(0, fclose(input_file));
+ }
+
+ void VerifyMixedOutput(int16_t max_output_value, int16_t min_output_value) {
+ // Verify the mixed output.
+ FILE* output_file = fopen(output_filename_.c_str(), "rb");
+ ASSERT_TRUE(output_file != NULL);
+ int16_t output_value = 0;
+ // Skip the first segment to avoid initialization and ramping-in effects.
+ EXPECT_EQ(0, fseek(output_file, sizeof(output_value) *
+ kSampleRateHz / 1000 * kSkipOutputMs, SEEK_SET));
+ int samples_read = 0;
+ while (fread(&output_value, sizeof(output_value), 1, output_file) == 1) {
+ samples_read++;
+ std::ostringstream trace_stream;
+ trace_stream << samples_read << " samples read";
+ SCOPED_TRACE(trace_stream.str());
+ EXPECT_LE(output_value, max_output_value);
+ EXPECT_GE(output_value, min_output_value);
+ }
+ // Ensure the recording length is close to the duration of the test.
+ // We have to use a relaxed tolerance here due to filesystem flakiness on
+ // the bots.
+ ASSERT_GE((samples_read * 1000.0) / kSampleRateHz,
+ 0.7 * (kTestDurationMs - kSkipOutputMs));
+ // Ensure we read the entire file.
+ ASSERT_NE(0, feof(output_file));
+ ASSERT_EQ(0, fclose(output_file));
+ }
+
+ // Start up local streams ("anonymous" participants).
+ void StartLocalStreams(const std::vector<int>& streams) {
+ for (size_t i = 0; i < streams.size(); ++i) {
+ EXPECT_EQ(0, voe_base_->StartPlayout(streams[i]));
+ EXPECT_EQ(0, voe_file_->StartPlayingFileLocally(streams[i],
+ input_filename_.c_str(), true));
+ }
+ }
+
+ void StopLocalStreams(const std::vector<int>& streams) {
+ for (size_t i = 0; i < streams.size(); ++i) {
+ EXPECT_EQ(0, voe_base_->StopPlayout(streams[i]));
+ EXPECT_EQ(0, voe_base_->DeleteChannel(streams[i]));
+ }
+ }
+
+ // Start up remote streams ("normal" participants).
+ void StartRemoteStreams(const std::vector<int>& streams,
+ int num_remote_streams_using_mono) {
+ // Use L16 at 16kHz to minimize distortion (file recording is 16kHz and
+ // resampling will cause distortion).
+ CodecInst codec_inst;
+ strcpy(codec_inst.plname, "L16");
+ codec_inst.channels = 1;
+ codec_inst.plfreq = kSampleRateHz;
+ codec_inst.pltype = 105;
+ codec_inst.pacsize = codec_inst.plfreq / 100;
+ codec_inst.rate = codec_inst.plfreq * sizeof(int16_t) * 8; // 8 bits/byte.
+
+ for (int i = 0; i < num_remote_streams_using_mono; ++i) {
+ StartRemoteStream(streams[i], codec_inst, 1234 + 2 * i);
+ }
+
+ // The remainder of the streams will use stereo.
+ codec_inst.channels = 2;
+ codec_inst.pltype++;
+ for (size_t i = num_remote_streams_using_mono; i < streams.size(); ++i) {
+ StartRemoteStream(streams[i], codec_inst, 1234 + 2 * i);
+ }
+ }
+
+ // Start up a single remote stream.
+ void StartRemoteStream(int stream, const CodecInst& codec_inst, int port) {
+ EXPECT_EQ(0, voe_codec_->SetRecPayloadType(stream, codec_inst));
+ EXPECT_EQ(0, voe_base_->SetLocalReceiver(stream, port));
+ EXPECT_EQ(0, voe_base_->SetSendDestination(stream, port, "127.0.0.1"));
+ EXPECT_EQ(0, voe_base_->StartReceive(stream));
+ EXPECT_EQ(0, voe_base_->StartPlayout(stream));
+ EXPECT_EQ(0, voe_codec_->SetSendCodec(stream, codec_inst));
+ EXPECT_EQ(0, voe_base_->StartSend(stream));
+ EXPECT_EQ(0, voe_file_->StartPlayingFileAsMicrophone(stream,
+ input_filename_.c_str(), true));
+ }
+
+ void StopRemoteStreams(const std::vector<int>& streams) {
+ for (size_t i = 0; i < streams.size(); ++i) {
+ EXPECT_EQ(0, voe_base_->StopSend(streams[i]));
+ EXPECT_EQ(0, voe_base_->StopPlayout(streams[i]));
+ EXPECT_EQ(0, voe_base_->StopReceive(streams[i]));
+ EXPECT_EQ(0, voe_base_->DeleteChannel(streams[i]));
+ }
+ }
+
+ const std::string input_filename_;
+ const std::string output_filename_;
+};
+
+// These tests assume a maximum of three mixed participants. We typically allow
+// a +/- 10% range around the expected output level to account for distortion
+// from coding and processing in the loopback chain.
+TEST_F(MixingTest, FourChannelsWithOnlyThreeMixed) {
+ const int16_t kInputValue = 1000;
+ const int16_t kExpectedOutput = kInputValue * 3;
+ RunMixingTest(4, 0, 4, kInputValue, 1.1 * kExpectedOutput,
+ 0.9 * kExpectedOutput);
+}
+
+// Ensure the mixing saturation protection is working. We can do this because
+// the mixing limiter is given some headroom, so the expected output is less
+// than full scale.
+TEST_F(MixingTest, VerifySaturationProtection) {
+ const int16_t kInputValue = 20000;
+ const int16_t kExpectedOutput = kLimiterHeadroom;
+ // If this isn't satisfied, we're not testing anything.
+ ASSERT_GT(kInputValue * 3, kInt16Max);
+ ASSERT_LT(1.1 * kExpectedOutput, kInt16Max);
+ RunMixingTest(3, 0, 3, kInputValue, 1.1 * kExpectedOutput,
+ 0.9 * kExpectedOutput);
+}
+
+TEST_F(MixingTest, SaturationProtectionHasNoEffectOnOneChannel) {
+ const int16_t kInputValue = kInt16Max;
+ const int16_t kExpectedOutput = kInt16Max;
+ // If this isn't satisfied, we're not testing anything.
+ ASSERT_GT(0.95 * kExpectedOutput, kLimiterHeadroom);
+ // Tighter constraints are required here to properly test this.
+ RunMixingTest(1, 0, 1, kInputValue, kExpectedOutput,
+ 0.95 * kExpectedOutput);
+}
+
+TEST_F(MixingTest, VerifyAnonymousAndNormalParticipantMixing) {
+ const int16_t kInputValue = 1000;
+ const int16_t kExpectedOutput = kInputValue * 2;
+ RunMixingTest(1, 1, 1, kInputValue, 1.1 * kExpectedOutput,
+ 0.9 * kExpectedOutput);
+}
+
+TEST_F(MixingTest, AnonymousParticipantsAreAlwaysMixed) {
+ const int16_t kInputValue = 1000;
+ const int16_t kExpectedOutput = kInputValue * 4;
+ RunMixingTest(3, 1, 3, kInputValue, 1.1 * kExpectedOutput,
+ 0.9 * kExpectedOutput);
+}
+
+TEST_F(MixingTest, VerifyStereoAndMonoMixing) {
+ const int16_t kInputValue = 1000;
+ const int16_t kExpectedOutput = kInputValue * 2;
+ RunMixingTest(2, 0, 1, kInputValue, 1.1 * kExpectedOutput,
+ 0.9 * kExpectedOutput);
+}
+
+} // namespace webrtc
diff --git a/voice_engine/test/auto_test/standard/neteq_stats_test.cc b/voice_engine/test/auto_test/standard/neteq_stats_test.cc
new file mode 100644
index 0000000..0cb4158
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/neteq_stats_test.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
+
+class NetEQStatsTest : public AfterStreamingFixture {
+};
+
+TEST_F(NetEQStatsTest, ManualPrintStatisticsAfterRunningAWhile) {
+ Sleep(5000);
+
+ webrtc::NetworkStatistics network_statistics;
+
+ EXPECT_EQ(0, voe_neteq_stats_->GetNetworkStatistics(
+ channel_, network_statistics));
+
+ TEST_LOG("Inspect these statistics and ensure they make sense.\n");
+
+ TEST_LOG(" currentAccelerateRate = %hu \n",
+ network_statistics.currentAccelerateRate);
+ TEST_LOG(" currentBufferSize = %hu \n",
+ network_statistics.currentBufferSize);
+ TEST_LOG(" currentDiscardRate = %hu \n",
+ network_statistics.currentDiscardRate);
+ TEST_LOG(" currentExpandRate = %hu \n",
+ network_statistics.currentExpandRate);
+ TEST_LOG(" currentPacketLossRate = %hu \n",
+ network_statistics.currentPacketLossRate);
+ TEST_LOG(" currentPreemptiveRate = %hu \n",
+ network_statistics.currentPreemptiveRate);
+ TEST_LOG(" preferredBufferSize = %hu \n",
+ network_statistics.preferredBufferSize);
+ TEST_LOG(" jitterPeaksFound = %i \n",
+ network_statistics.jitterPeaksFound);
+ TEST_LOG(" clockDriftPPM = %i \n",
+ network_statistics.clockDriftPPM);
+ TEST_LOG(" meanWaitingTimeMs = %i \n",
+ network_statistics.meanWaitingTimeMs);
+ TEST_LOG(" medianWaitingTimeMs = %i \n",
+ network_statistics.medianWaitingTimeMs);
+ TEST_LOG(" minWaitingTimeMs = %i \n",
+ network_statistics.minWaitingTimeMs);
+ TEST_LOG(" maxWaitingTimeMs = %i \n",
+ network_statistics.maxWaitingTimeMs);
+}
diff --git a/voice_engine/test/auto_test/standard/neteq_test.cc b/voice_engine/test/auto_test/standard/neteq_test.cc
new file mode 100644
index 0000000..8184535
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/neteq_test.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_streaming_fixture.h"
+
+class NetEQTest : public AfterStreamingFixture {
+ protected:
+ void SetUp() {
+ additional_channel_ = voe_base_->CreateChannel();
+ }
+
+ void TearDown() {
+ voe_base_->DeleteChannel(additional_channel_);
+ }
+
+ int additional_channel_;
+};
+
+TEST_F(NetEQTest, GetNetEQPlayoutModeReturnsDefaultModeByDefault) {
+ webrtc::NetEqModes mode;
+ EXPECT_EQ(0, voe_base_->GetNetEQPlayoutMode(channel_, mode));
+ EXPECT_EQ(webrtc::kNetEqDefault, mode);
+}
+
+TEST_F(NetEQTest, SetNetEQPlayoutModeActuallySetsTheModeForTheChannel) {
+ webrtc::NetEqModes mode;
+ // Set for the first channel but leave the second.
+ EXPECT_EQ(0, voe_base_->SetNetEQPlayoutMode(channel_, webrtc::kNetEqFax));
+ EXPECT_EQ(0, voe_base_->GetNetEQPlayoutMode(channel_, mode));
+ EXPECT_EQ(webrtc::kNetEqFax, mode);
+
+ EXPECT_EQ(0, voe_base_->GetNetEQPlayoutMode(additional_channel_, mode));
+ EXPECT_EQ(webrtc::kNetEqDefault, mode);
+
+ // Set the second channel, leave the first.
+ EXPECT_EQ(0, voe_base_->SetNetEQPlayoutMode(
+ additional_channel_, webrtc::kNetEqStreaming));
+ EXPECT_EQ(0, voe_base_->GetNetEQPlayoutMode(additional_channel_, mode));
+ EXPECT_EQ(webrtc::kNetEqStreaming, mode);
+
+ EXPECT_EQ(0, voe_base_->GetNetEQPlayoutMode(channel_, mode));
+ EXPECT_EQ(webrtc::kNetEqFax, mode);
+}
+
+TEST_F(NetEQTest, GetNetEQBgnModeReturnsBgnOnByDefault) {
+ webrtc::NetEqBgnModes bgn_mode;
+ EXPECT_EQ(0, voe_base_->GetNetEQBGNMode(channel_, bgn_mode));
+ EXPECT_EQ(webrtc::kBgnOn, bgn_mode);
+}
+
+TEST_F(NetEQTest, SetNetEQBgnModeActuallySetsTheBgnMode) {
+ webrtc::NetEqBgnModes bgn_mode;
+ EXPECT_EQ(0, voe_base_->SetNetEQBGNMode(channel_, webrtc::kBgnOff));
+ EXPECT_EQ(0, voe_base_->GetNetEQBGNMode(channel_, bgn_mode));
+ EXPECT_EQ(webrtc::kBgnOff, bgn_mode);
+
+ EXPECT_EQ(0, voe_base_->SetNetEQBGNMode(channel_, webrtc::kBgnFade));
+ EXPECT_EQ(0, voe_base_->GetNetEQBGNMode(channel_, bgn_mode));
+ EXPECT_EQ(webrtc::kBgnFade, bgn_mode);
+}
+
+TEST_F(NetEQTest, ManualSetEQPlayoutModeStillProducesOkAudio) {
+ EXPECT_EQ(0, voe_base_->SetNetEQPlayoutMode(channel_, webrtc::kNetEqDefault));
+ TEST_LOG("NetEQ default playout mode enabled => should hear OK audio.\n");
+ Sleep(2000);
+
+ EXPECT_EQ(0, voe_base_->SetNetEQPlayoutMode(
+ channel_, webrtc::kNetEqStreaming));
+ TEST_LOG("NetEQ streaming playout mode enabled => should hear OK audio.\n");
+ Sleep(2000);
+
+ EXPECT_EQ(0, voe_base_->SetNetEQPlayoutMode(channel_, webrtc::kNetEqFax));
+ TEST_LOG("NetEQ fax playout mode enabled => should hear OK audio.\n");
+ Sleep(2000);
+}
diff --git a/voice_engine/test/auto_test/standard/network_before_streaming_test.cc b/voice_engine/test/auto_test/standard/network_before_streaming_test.cc
new file mode 100644
index 0000000..7a41e80
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/network_before_streaming_test.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_initialization_fixture.h"
+
+class NetworkBeforeStreamingTest : public AfterInitializationFixture {
+ protected:
+ void SetUp() {
+ channel_ = voe_base_->CreateChannel();
+ }
+
+ void TearDown() {
+ voe_base_->DeleteChannel(channel_);
+ }
+
+ int channel_;
+};
+
+TEST_F(NetworkBeforeStreamingTest,
+ GetSourceInfoReturnsEmptyValuesForUnconfiguredChannel) {
+ char src_ip[32] = "0.0.0.0";
+ int src_rtp_port = 1234;
+ int src_rtcp_port = 1235;
+
+ EXPECT_EQ(0, voe_network_->GetSourceInfo(
+ channel_, src_rtp_port, src_rtcp_port, src_ip));
+ EXPECT_EQ(0, src_rtp_port);
+ EXPECT_EQ(0, src_rtcp_port);
+ EXPECT_STRCASEEQ("", src_ip);
+}
+
+TEST_F(NetworkBeforeStreamingTest,
+ GetSourceFilterReturnsEmptyValuesForUnconfiguredChannel) {
+ int filter_port = -1;
+ int filter_port_rtcp = -1;
+ char filter_ip[32] = "0.0.0.0";
+
+ EXPECT_EQ(0, voe_network_->GetSourceFilter(
+ channel_, filter_port, filter_port_rtcp, filter_ip));
+
+ EXPECT_EQ(0, filter_port);
+ EXPECT_EQ(0, filter_port_rtcp);
+ EXPECT_STRCASEEQ("", filter_ip);
+}
+
+TEST_F(NetworkBeforeStreamingTest, SetSourceFilterSucceeds) {
+ EXPECT_EQ(0, voe_network_->SetSourceFilter(channel_, 0));
+}
diff --git a/voice_engine/test/auto_test/standard/network_test.cc b/voice_engine/test/auto_test/standard/network_test.cc
new file mode 100644
index 0000000..8cc902d
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/network_test.cc
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/test/auto_test/fakes/fake_external_transport.h"
+#include "voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
+#include "voice_engine/test/auto_test/voe_test_interface.h"
+#include "voice_engine/test/auto_test/voe_standard_test.h"
+#include "voice_engine/include/mock/mock_voe_connection_observer.h"
+#include "voice_engine/include/mock/mock_voe_observer.h"
+
+static const int kDefaultRtpPort = 8000;
+static const int kDefaultRtcpPort = 8001;
+
+class NetworkTest : public AfterStreamingFixture {
+};
+
+using ::testing::Between;
+
+TEST_F(NetworkTest, GetSourceInfoReturnsPortsAndIpAfterReceivingPackets) {
+ // Give some time to send speech packets.
+ Sleep(200);
+
+ int rtp_port = 0;
+ int rtcp_port = 0;
+ char source_ip[32] = "127.0.0.1";
+
+ EXPECT_EQ(0, voe_network_->GetSourceInfo(channel_, rtp_port, rtcp_port,
+ source_ip));
+
+ EXPECT_EQ(kDefaultRtpPort, rtp_port);
+ EXPECT_EQ(kDefaultRtcpPort, rtcp_port);
+}
+
+TEST_F(NetworkTest, NoFilterIsEnabledByDefault) {
+ int filter_rtp_port = -1;
+ int filter_rtcp_port = -1;
+ char filter_ip[64] = { 0 };
+
+ EXPECT_EQ(0, voe_network_->GetSourceFilter(
+ channel_, filter_rtp_port, filter_rtcp_port, filter_ip));
+
+ EXPECT_EQ(0, filter_rtp_port);
+ EXPECT_EQ(0, filter_rtcp_port);
+ EXPECT_STREQ("", filter_ip);
+}
+
+TEST_F(NetworkTest, ManualCanFilterRtpPort) {
+ TEST_LOG("No filter, should hear audio.\n");
+ Sleep(1000);
+
+ int port_to_block = kDefaultRtpPort + 10;
+ EXPECT_EQ(0, voe_network_->SetSourceFilter(channel_, port_to_block));
+
+ // Changes should take effect immediately.
+ int filter_rtp_port = -1;
+ int filter_rtcp_port = -1;
+ char filter_ip[64] = { 0 };
+
+ EXPECT_EQ(0, voe_network_->GetSourceFilter(
+ channel_, filter_rtp_port, filter_rtcp_port, filter_ip));
+
+ EXPECT_EQ(port_to_block, filter_rtp_port);
+
+ TEST_LOG("Now filtering port %d, should not hear audio.\n", port_to_block);
+ Sleep(1000);
+
+ TEST_LOG("Removing filter, should hear audio.\n");
+ EXPECT_EQ(0, voe_network_->SetSourceFilter(channel_, 0));
+ Sleep(1000);
+}
+
+TEST_F(NetworkTest, ManualCanFilterIp) {
+ TEST_LOG("You should hear audio.\n");
+ Sleep(1000);
+
+ int rtcp_port_to_block = kDefaultRtcpPort + 10;
+ TEST_LOG("Filtering IP 10.10.10.10, should not hear audio.\n");
+ EXPECT_EQ(0, voe_network_->SetSourceFilter(
+ channel_, 0, rtcp_port_to_block, "10.10.10.10"));
+
+ int filter_rtp_port = -1;
+ int filter_rtcp_port = -1;
+ char filter_ip[64] = { 0 };
+ EXPECT_EQ(0, voe_network_->GetSourceFilter(
+ channel_, filter_rtp_port, filter_rtcp_port, filter_ip));
+
+ EXPECT_EQ(0, filter_rtp_port);
+ EXPECT_EQ(rtcp_port_to_block, filter_rtcp_port);
+ EXPECT_STREQ("10.10.10.10", filter_ip);
+}
+
+TEST_F(NetworkTest,
+ CallsObserverOnTimeoutAndRestartWhenPacketTimeoutNotificationIsEnabled) {
+ // First, get rid of the default, asserting observer and install our observer.
+ EXPECT_EQ(0, voe_base_->DeRegisterVoiceEngineObserver());
+ webrtc::MockVoEObserver mock_observer;
+ EXPECT_EQ(0, voe_base_->RegisterVoiceEngineObserver(mock_observer));
+
+ // Define expectations.
+ int expected_error = VE_RECEIVE_PACKET_TIMEOUT;
+ EXPECT_CALL(mock_observer, CallbackOnError(channel_, expected_error))
+ .Times(1);
+ expected_error = VE_PACKET_RECEIPT_RESTARTED;
+ EXPECT_CALL(mock_observer, CallbackOnError(channel_, expected_error))
+ .Times(1);
+
+ // Get some speech going.
+ Sleep(500);
+
+ // Enable packet timeout.
+ EXPECT_EQ(0, voe_network_->SetPacketTimeoutNotification(channel_, true, 1));
+
+ // Trigger a timeout.
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ Sleep(1500);
+
+ // Trigger a restart event.
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+ Sleep(500);
+}
+
+TEST_F(NetworkTest, DoesNotCallDeRegisteredObserver) {
+ // De-register the default observer. This test will fail if the observer gets
+ // called for any reason, so if this de-register doesn't work the test will
+ // fail.
+ EXPECT_EQ(0, voe_base_->DeRegisterVoiceEngineObserver());
+
+ // Get some speech going.
+ Sleep(500);
+
+ // Enable packet timeout.
+ EXPECT_EQ(0, voe_network_->SetPacketTimeoutNotification(channel_, true, 1));
+
+ // Trigger a timeout.
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ Sleep(1500);
+}
+
+TEST_F(NetworkTest, DeadOrAliveObserverSeesAliveMessagesIfEnabled) {
+ if (!FLAGS_include_timing_dependent_tests) {
+ TEST_LOG("Skipping test - running in slow execution environment.../n");
+ return;
+ }
+
+ webrtc::MockVoeConnectionObserver mock_observer;
+ EXPECT_EQ(0, voe_network_->RegisterDeadOrAliveObserver(
+ channel_, mock_observer));
+
+ // We should be called about 4 times in four seconds, but 3 is OK too.
+ EXPECT_CALL(mock_observer, OnPeriodicDeadOrAlive(channel_, true))
+ .Times(Between(3, 4));
+
+ EXPECT_EQ(0, voe_network_->SetPeriodicDeadOrAliveStatus(channel_, true, 1));
+ Sleep(4000);
+
+ EXPECT_EQ(0, voe_network_->DeRegisterDeadOrAliveObserver(channel_));
+}
+
+TEST_F(NetworkTest, DeadOrAliveObserverSeesDeadMessagesIfEnabled) {
+ if (!FLAGS_include_timing_dependent_tests) {
+ TEST_LOG("Skipping test - running in slow execution environment.../n");
+ return;
+ }
+
+ // "When do you see them?" - "All the time!"
+ webrtc::MockVoeConnectionObserver mock_observer;
+ EXPECT_EQ(0, voe_network_->RegisterDeadOrAliveObserver(
+ channel_, mock_observer));
+
+ Sleep(500);
+
+ // We should be called about 4 times in four seconds, but 3 is OK too.
+ EXPECT_CALL(mock_observer, OnPeriodicDeadOrAlive(channel_, false))
+ .Times(Between(3, 4));
+
+ EXPECT_EQ(0, voe_network_->SetPeriodicDeadOrAliveStatus(channel_, true, 1));
+ EXPECT_EQ(0, voe_rtp_rtcp_->SetRTCPStatus(channel_, false));
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ Sleep(4000);
+
+ EXPECT_EQ(0, voe_network_->DeRegisterDeadOrAliveObserver(channel_));
+}
+
+TEST_F(NetworkTest, CanSwitchToExternalTransport) {
+ EXPECT_EQ(0, voe_base_->StopReceive(channel_));
+ EXPECT_EQ(0, voe_base_->DeleteChannel(channel_));
+ channel_ = voe_base_->CreateChannel();
+
+ FakeExternalTransport external_transport(voe_network_);
+ EXPECT_EQ(0, voe_network_->RegisterExternalTransport(
+ channel_, external_transport));
+
+ EXPECT_EQ(0, voe_base_->StartReceive(channel_));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+ EXPECT_EQ(0, voe_base_->StartPlayout(channel_));
+
+ Sleep(1000);
+
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_base_->StopPlayout(channel_));
+ EXPECT_EQ(0, voe_base_->StopReceive(channel_));
+
+ EXPECT_EQ(0, voe_network_->DeRegisterExternalTransport(channel_));
+}
diff --git a/voice_engine/test/auto_test/standard/rtp_rtcp_before_streaming_test.cc b/voice_engine/test/auto_test/standard/rtp_rtcp_before_streaming_test.cc
new file mode 100644
index 0000000..93170f6
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/rtp_rtcp_before_streaming_test.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_initialization_fixture.h"
+
+using namespace webrtc;
+using namespace testing;
+
+class RtpRtcpBeforeStreamingTest : public AfterInitializationFixture {
+ protected:
+ void SetUp();
+ void TearDown();
+
+ int channel_;
+};
+
+void RtpRtcpBeforeStreamingTest::SetUp() {
+ EXPECT_THAT(channel_ = voe_base_->CreateChannel(), Not(Lt(0)));
+}
+
+void RtpRtcpBeforeStreamingTest::TearDown() {
+ EXPECT_EQ(0, voe_base_->DeleteChannel(channel_));
+}
+
+TEST_F(RtpRtcpBeforeStreamingTest,
+ GetRtcpStatusReturnsTrueByDefaultAndObeysSetRtcpStatus) {
+ bool on = false;
+ EXPECT_EQ(0, voe_rtp_rtcp_->GetRTCPStatus(channel_, on));
+ EXPECT_TRUE(on);
+ EXPECT_EQ(0, voe_rtp_rtcp_->SetRTCPStatus(channel_, false));
+ EXPECT_EQ(0, voe_rtp_rtcp_->GetRTCPStatus(channel_, on));
+ EXPECT_FALSE(on);
+ EXPECT_EQ(0, voe_rtp_rtcp_->SetRTCPStatus(channel_, true));
+ EXPECT_EQ(0, voe_rtp_rtcp_->GetRTCPStatus(channel_, on));
+ EXPECT_TRUE(on);
+}
+
+TEST_F(RtpRtcpBeforeStreamingTest, GetLocalSsrcObeysSetLocalSsrc) {
+ EXPECT_EQ(0, voe_rtp_rtcp_->SetLocalSSRC(channel_, 1234));
+ unsigned int result = 0;
+ EXPECT_EQ(0, voe_rtp_rtcp_->GetLocalSSRC(channel_, result));
+ EXPECT_EQ(1234u, result);
+}
diff --git a/voice_engine/test/auto_test/standard/rtp_rtcp_test.cc b/voice_engine/test/auto_test/standard/rtp_rtcp_test.cc
new file mode 100644
index 0000000..a1ecf76
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/rtp_rtcp_test.cc
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "test/testsupport/fileutils.h"
+#include "voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
+#include "voice_engine/test/auto_test/voe_standard_test.h"
+
+class TestRtpObserver : public webrtc::VoERTPObserver {
+ public:
+ TestRtpObserver();
+ virtual ~TestRtpObserver();
+ virtual void OnIncomingCSRCChanged(const int channel,
+ const unsigned int CSRC,
+ const bool added);
+ virtual void OnIncomingSSRCChanged(const int channel,
+ const unsigned int SSRC);
+ void Reset();
+ public:
+ unsigned int ssrc_[2];
+ unsigned int csrc_[2][2]; // Stores 2 CSRCs for each channel.
+ bool added_[2][2];
+ int size_[2];
+};
+
+TestRtpObserver::TestRtpObserver() {
+ Reset();
+}
+
+TestRtpObserver::~TestRtpObserver() {
+}
+
+void TestRtpObserver::Reset() {
+ for (int i = 0; i < 2; i++) {
+ ssrc_[i] = 0;
+ csrc_[i][0] = 0;
+ csrc_[i][1] = 0;
+ added_[i][0] = false;
+ added_[i][1] = false;
+ size_[i] = 0;
+ }
+}
+
+void TestRtpObserver::OnIncomingCSRCChanged(const int channel,
+ const unsigned int CSRC,
+ const bool added) {
+ char msg[128];
+ sprintf(msg, "=> OnIncomingCSRCChanged(channel=%d, CSRC=%u, added=%d)\n",
+ channel, CSRC, added);
+ TEST_LOG("%s", msg);
+
+ if (channel > 1)
+ return; // Not enough memory.
+
+ csrc_[channel][size_[channel]] = CSRC;
+ added_[channel][size_[channel]] = added;
+
+ size_[channel]++;
+ if (size_[channel] == 2)
+ size_[channel] = 0;
+}
+
+void TestRtpObserver::OnIncomingSSRCChanged(const int channel,
+ const unsigned int SSRC) {
+ char msg[128];
+ sprintf(msg, "\n=> OnIncomingSSRCChanged(channel=%d, SSRC=%u)\n", channel,
+ SSRC);
+ TEST_LOG("%s", msg);
+
+ ssrc_[channel] = SSRC;
+}
+
+class RtcpAppHandler : public webrtc::VoERTCPObserver {
+ public:
+ void OnApplicationDataReceived(const int channel,
+ const unsigned char sub_type,
+ const unsigned int name,
+ const unsigned char* data,
+ const unsigned short length_in_bytes);
+ void Reset();
+ ~RtcpAppHandler() {}
+ unsigned short length_in_bytes_;
+ unsigned char data_[256];
+ unsigned char sub_type_;
+ unsigned int name_;
+};
+
+
+static const char* const RTCP_CNAME = "Whatever";
+
+class RtpRtcpTest : public AfterStreamingFixture {
+ protected:
+ void SetUp() {
+ // We need a second channel for this test, so set it up.
+ second_channel_ = voe_base_->CreateChannel();
+ EXPECT_GE(second_channel_, 0);
+
+ EXPECT_EQ(0, voe_base_->SetSendDestination(
+ second_channel_, 8002, "127.0.0.1"));
+ EXPECT_EQ(0, voe_base_->SetLocalReceiver(
+ second_channel_, 8002));
+ EXPECT_EQ(0, voe_base_->StartReceive(second_channel_));
+ EXPECT_EQ(0, voe_base_->StartPlayout(second_channel_));
+ EXPECT_EQ(0, voe_rtp_rtcp_->SetLocalSSRC(second_channel_, 5678));
+ EXPECT_EQ(0, voe_base_->StartSend(second_channel_));
+
+ // We'll set up the RTCP CNAME and SSRC to something arbitrary here.
+ voe_rtp_rtcp_->SetRTCP_CNAME(channel_, RTCP_CNAME);
+ }
+
+ void TearDown() {
+ voe_base_->DeleteChannel(second_channel_);
+ }
+
+ int second_channel_;
+};
+
+void RtcpAppHandler::OnApplicationDataReceived(
+ const int /*channel*/, const unsigned char sub_type,
+ const unsigned int name, const unsigned char* data,
+ const unsigned short length_in_bytes) {
+ length_in_bytes_ = length_in_bytes;
+ memcpy(data_, &data[0], length_in_bytes);
+ sub_type_ = sub_type;
+ name_ = name;
+}
+
+void RtcpAppHandler::Reset() {
+ length_in_bytes_ = 0;
+ memset(data_, 0, sizeof(data_));
+ sub_type_ = 0;
+ name_ = 0;
+}
+
+TEST_F(RtpRtcpTest, RemoteRtcpCnameHasPropagatedToRemoteSide) {
+ if (!FLAGS_include_timing_dependent_tests) {
+ TEST_LOG("Skipping test - running in slow execution environment.../n");
+ return;
+ }
+
+ // We need to sleep a bit here for the name to propagate. For instance,
+ // 200 milliseconds is not enough, so we'll go with one second here.
+ Sleep(1000);
+
+ char char_buffer[256];
+ voe_rtp_rtcp_->GetRemoteRTCP_CNAME(channel_, char_buffer);
+ EXPECT_STREQ(RTCP_CNAME, char_buffer);
+}
+
+TEST_F(RtpRtcpTest, SSRCPropagatesCorrectly) {
+ unsigned int local_ssrc = 1234;
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_rtp_rtcp_->SetLocalSSRC(channel_, local_ssrc));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+
+ Sleep(1000);
+
+ unsigned int ssrc;
+ EXPECT_EQ(0, voe_rtp_rtcp_->GetLocalSSRC(channel_, ssrc));
+ EXPECT_EQ(local_ssrc, ssrc);
+
+ EXPECT_EQ(0, voe_rtp_rtcp_->GetRemoteSSRC(channel_, ssrc));
+ EXPECT_EQ(local_ssrc, ssrc);
+}
+
+TEST_F(RtpRtcpTest, RtcpApplicationDefinedPacketsCanBeSentAndReceived) {
+ RtcpAppHandler rtcp_app_handler;
+ EXPECT_EQ(0, voe_rtp_rtcp_->RegisterRTCPObserver(
+ channel_, rtcp_app_handler));
+
+ // Send data aligned to 32 bytes.
+ const char* data = "application-dependent data------";
+ unsigned short data_length = strlen(data);
+ unsigned int data_name = 0x41424344; // 'ABCD' in ascii
+ unsigned char data_subtype = 1;
+
+ EXPECT_EQ(0, voe_rtp_rtcp_->SendApplicationDefinedRTCPPacket(
+ channel_, data_subtype, data_name, data, data_length));
+
+ // Ensure the RTP-RTCP process gets scheduled.
+ Sleep(1000);
+
+ // Ensure we received the data in the callback.
+ EXPECT_EQ(data_length, rtcp_app_handler.length_in_bytes_);
+ EXPECT_EQ(0, memcmp(data, rtcp_app_handler.data_, data_length));
+ EXPECT_EQ(data_name, rtcp_app_handler.name_);
+ EXPECT_EQ(data_subtype, rtcp_app_handler.sub_type_);
+
+ EXPECT_EQ(0, voe_rtp_rtcp_->DeRegisterRTCPObserver(channel_));
+}
+
+TEST_F(RtpRtcpTest, DisabledRtcpObserverDoesNotReceiveData) {
+ RtcpAppHandler rtcp_app_handler;
+ EXPECT_EQ(0, voe_rtp_rtcp_->RegisterRTCPObserver(
+ channel_, rtcp_app_handler));
+
+ // Put observer in a known state before de-registering.
+ rtcp_app_handler.Reset();
+
+ EXPECT_EQ(0, voe_rtp_rtcp_->DeRegisterRTCPObserver(channel_));
+
+ const char* data = "whatever";
+ EXPECT_EQ(0, voe_rtp_rtcp_->SendApplicationDefinedRTCPPacket(
+ channel_, 1, 0x41424344, data, strlen(data)));
+
+ // Ensure the RTP-RTCP process gets scheduled.
+ Sleep(1000);
+
+ // Ensure we received no data.
+ EXPECT_EQ(0u, rtcp_app_handler.name_);
+ EXPECT_EQ(0u, rtcp_app_handler.sub_type_);
+}
+
+TEST_F(RtpRtcpTest, InsertExtraRTPPacketDealsWithInvalidArguments) {
+ const char payload_data[8] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H' };
+
+ EXPECT_EQ(-1, voe_rtp_rtcp_->InsertExtraRTPPacket(
+ -1, 0, false, payload_data, 8)) <<
+ "Should reject: invalid channel.";
+ EXPECT_EQ(-1, voe_rtp_rtcp_->InsertExtraRTPPacket(
+ channel_, -1, false, payload_data, 8)) <<
+ "Should reject: invalid payload type.";
+ EXPECT_EQ(-1, voe_rtp_rtcp_->InsertExtraRTPPacket(
+ channel_, 128, false, payload_data, 8)) <<
+ "Should reject: invalid payload type.";
+ EXPECT_EQ(-1, voe_rtp_rtcp_->InsertExtraRTPPacket(
+ channel_, 99, false, NULL, 8)) <<
+ "Should reject: bad pointer.";
+ EXPECT_EQ(-1, voe_rtp_rtcp_->InsertExtraRTPPacket(
+ channel_, 99, false, payload_data, 1500 - 28 + 1)) <<
+ "Should reject: invalid size.";
+}
+
+TEST_F(RtpRtcpTest, CanTransmitExtraRtpPacketsWithoutError) {
+ const char payload_data[8] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H' };
+
+ for (int i = 0; i < 128; ++i) {
+ // Try both with and without the marker bit set
+ EXPECT_EQ(0, voe_rtp_rtcp_->InsertExtraRTPPacket(
+ channel_, i, false, payload_data, 8));
+ EXPECT_EQ(0, voe_rtp_rtcp_->InsertExtraRTPPacket(
+ channel_, i, true, payload_data, 8));
+ }
+}
+
+// TODO(xians, phoglund): Re-enable when issue 372 is resolved.
+TEST_F(RtpRtcpTest, DISABLED_CanCreateRtpDumpFilesWithoutError) {
+ // Create two RTP dump files (3 seconds long). You can verify these after
+ // the test using rtpplay or NetEqRTPplay if you like.
+ std::string output_path = webrtc::test::OutputPath();
+ std::string incoming_filename = output_path + "dump_in_3sec.rtp";
+ std::string outgoing_filename = output_path + "dump_out_3sec.rtp";
+
+ EXPECT_EQ(0, voe_rtp_rtcp_->StartRTPDump(
+ channel_, incoming_filename.c_str(), webrtc::kRtpIncoming));
+ EXPECT_EQ(0, voe_rtp_rtcp_->StartRTPDump(
+ channel_, outgoing_filename.c_str(), webrtc::kRtpOutgoing));
+
+ Sleep(3000);
+
+ EXPECT_EQ(0, voe_rtp_rtcp_->StopRTPDump(channel_, webrtc::kRtpIncoming));
+ EXPECT_EQ(0, voe_rtp_rtcp_->StopRTPDump(channel_, webrtc::kRtpOutgoing));
+}
+
+TEST_F(RtpRtcpTest, ObserverGetsNotifiedOnSsrcChange) {
+ TestRtpObserver rtcp_observer;
+ EXPECT_EQ(0, voe_rtp_rtcp_->RegisterRTPObserver(
+ channel_, rtcp_observer));
+ rtcp_observer.Reset();
+
+ unsigned int new_ssrc = 7777;
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_rtp_rtcp_->SetLocalSSRC(channel_, new_ssrc));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+
+ Sleep(500);
+
+ // Verify we got the new SSRC.
+ EXPECT_EQ(new_ssrc, rtcp_observer.ssrc_[0]);
+
+ // Now try another SSRC.
+ unsigned int newer_ssrc = 1717;
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_rtp_rtcp_->SetLocalSSRC(channel_, newer_ssrc));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+
+ Sleep(500);
+
+ EXPECT_EQ(newer_ssrc, rtcp_observer.ssrc_[0]);
+
+ EXPECT_EQ(0, voe_rtp_rtcp_->DeRegisterRTPObserver(channel_));
+}
diff --git a/voice_engine/test/auto_test/standard/video_sync_test.cc b/voice_engine/test/auto_test/standard/video_sync_test.cc
new file mode 100644
index 0000000..b8aedcd
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/video_sync_test.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <cmath>
+#include <numeric>
+#include <vector>
+
+#include "voice_engine/test/auto_test/fixtures/after_streaming_fixture.h"
+
+#ifdef WEBRTC_IOS
+ const int kMinimumReasonableDelayEstimateMs = 30;
+#else
+ const int kMinimumReasonableDelayEstimateMs = 45;
+#endif // !WEBRTC_IOS
+
+class VideoSyncTest : public AfterStreamingFixture {
+ protected:
+ // This test will verify that delay estimates converge (e.g. the standard
+ // deviation for the last five seconds' estimates is less than 20) without
+ // manual observation. The test runs for 15 seconds, sampling once per second.
+ // All samples are checked so they are greater than |min_estimate|.
+ int CollectEstimatesDuring15Seconds(int min_estimate) {
+ Sleep(1000);
+
+ std::vector<int> all_delay_estimates;
+ for (int second = 0; second < 15; second++) {
+ int delay_estimate = 0;
+ EXPECT_EQ(0, voe_vsync_->GetDelayEstimate(channel_, delay_estimate));
+
+ EXPECT_GT(delay_estimate, min_estimate) <<
+ "The delay estimate can not conceivably get lower than " <<
+ min_estimate << " ms, it's unrealistic.";
+
+ all_delay_estimates.push_back(delay_estimate);
+ Sleep(1000);
+ }
+
+ return ComputeStandardDeviation(
+ all_delay_estimates.begin() + 10, all_delay_estimates.end());
+ }
+
+ void CheckEstimatesConvergeReasonablyWell(int min_estimate) {
+ float standard_deviation = CollectEstimatesDuring15Seconds(min_estimate);
+ EXPECT_LT(standard_deviation, 30.0f);
+ }
+
+ // Computes the standard deviation by first estimating the sample variance
+ // with an unbiased estimator.
+ float ComputeStandardDeviation(std::vector<int>::const_iterator start,
+ std::vector<int>::const_iterator end) const {
+ int num_elements = end - start;
+ int mean = std::accumulate(start, end, 0) / num_elements;
+ assert(num_elements > 1);
+
+ float variance = 0;
+ for (; start != end; ++start) {
+ variance += (*start - mean) * (*start - mean) / (num_elements - 1);
+ }
+ return std::sqrt(variance);
+ }
+};
+
+TEST_F(VideoSyncTest, CanGetPlayoutTimestampWhilePlayingWithoutSettingItFirst) {
+ unsigned int ignored;
+ EXPECT_EQ(0, voe_vsync_->GetPlayoutTimestamp(channel_, ignored));
+}
+
+TEST_F(VideoSyncTest, CannotSetInitTimestampWhilePlaying) {
+ EXPECT_EQ(-1, voe_vsync_->SetInitTimestamp(channel_, 12345));
+}
+
+TEST_F(VideoSyncTest, CannotSetInitSequenceNumberWhilePlaying) {
+ EXPECT_EQ(-1, voe_vsync_->SetInitSequenceNumber(channel_, 123));
+}
+
+TEST_F(VideoSyncTest, CanSetInitTimestampWhileStopped) {
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_vsync_->SetInitTimestamp(channel_, 12345));
+}
+
+TEST_F(VideoSyncTest, CanSetInitSequenceNumberWhileStopped) {
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_vsync_->SetInitSequenceNumber(channel_, 123));
+}
+
+// TODO(phoglund): pending investigation in
+// http://code.google.com/p/webrtc/issues/detail?id=438
+TEST_F(VideoSyncTest,
+ DISABLED_DelayEstimatesStabilizeDuring15sAndAreNotTooLow) {
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_vsync_->SetInitTimestamp(channel_, 12345));
+ EXPECT_EQ(0, voe_vsync_->SetInitSequenceNumber(channel_, 123));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+
+ CheckEstimatesConvergeReasonablyWell(kMinimumReasonableDelayEstimateMs);
+}
+
+// TODO(phoglund): pending investigation in
+// http://code.google.com/p/webrtc/issues/detail?id=438
+TEST_F(VideoSyncTest,
+ DISABLED_DelayEstimatesStabilizeAfterNetEqMinDelayChanges45s) {
+ EXPECT_EQ(0, voe_base_->StopSend(channel_));
+ EXPECT_EQ(0, voe_vsync_->SetInitTimestamp(channel_, 12345));
+ EXPECT_EQ(0, voe_vsync_->SetInitSequenceNumber(channel_, 123));
+ EXPECT_EQ(0, voe_base_->StartSend(channel_));
+
+ CheckEstimatesConvergeReasonablyWell(kMinimumReasonableDelayEstimateMs);
+ EXPECT_EQ(0, voe_vsync_->SetMinimumPlayoutDelay(channel_, 200));
+ CheckEstimatesConvergeReasonablyWell(kMinimumReasonableDelayEstimateMs);
+ EXPECT_EQ(0, voe_vsync_->SetMinimumPlayoutDelay(channel_, 0));
+ CheckEstimatesConvergeReasonablyWell(kMinimumReasonableDelayEstimateMs);
+}
+
+#if !defined(WEBRTC_ANDROID)
+TEST_F(VideoSyncTest, CanGetPlayoutBufferSize) {
+ int ignored;
+ EXPECT_EQ(0, voe_vsync_->GetPlayoutBufferSize(ignored));
+}
+#endif // !ANDROID
diff --git a/voice_engine/test/auto_test/standard/voe_base_misc_test.cc b/voice_engine/test/auto_test/standard/voe_base_misc_test.cc
new file mode 100644
index 0000000..f96d2c1
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/voe_base_misc_test.cc
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/test/auto_test/fixtures/before_initialization_fixture.h"
+
+#include <cstdlib>
+
+class VoeBaseMiscTest : public BeforeInitializationFixture {
+};
+
+using namespace testing;
+
+TEST_F(VoeBaseMiscTest, MaxNumChannelsIs32) {
+ EXPECT_EQ(32, voe_base_->MaxNumOfChannels());
+}
+
+TEST_F(VoeBaseMiscTest, GetVersionPrintsSomeUsefulInformation) {
+ char char_buffer[1024];
+ memset(char_buffer, 0, sizeof(char_buffer));
+ EXPECT_EQ(0, voe_base_->GetVersion(char_buffer));
+ EXPECT_THAT(char_buffer, ContainsRegex("VoiceEngine"));
+}
diff --git a/voice_engine/test/auto_test/standard/volume_test.cc b/voice_engine/test/auto_test/standard/volume_test.cc
new file mode 100644
index 0000000..fda867d
--- /dev/null
+++ b/voice_engine/test/auto_test/standard/volume_test.cc
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "after_streaming_fixture.h"
+
+#ifdef WEBRTC_LINUX
+#define DISABLED_ON_LINUX(test) DISABLED_##test
+#else
+#define DISABLED_ON_LINUX(test) test
+#endif
+
+class VolumeTest : public AfterStreamingFixture {
+};
+
+// TODO(phoglund): a number of tests are disabled here on Linux, all pending
+// investigation in
+// http://code.google.com/p/webrtc/issues/detail?id=367
+
+TEST_F(VolumeTest, DefaultSpeakerVolumeIsAtMost255) {
+ unsigned int volume = 1000;
+ EXPECT_EQ(0, voe_volume_control_->GetSpeakerVolume(volume));
+ EXPECT_LE(volume, 255u);
+}
+
+TEST_F(VolumeTest, SetVolumeBeforePlayoutWorks) {
+ // This is a rather specialized test, intended to exercise some PulseAudio
+ // code. However, these conditions should be satisfied on any platform.
+ unsigned int original_volume = 0;
+ EXPECT_EQ(0, voe_volume_control_->GetSpeakerVolume(original_volume));
+ Sleep(1000);
+
+ EXPECT_EQ(0, voe_volume_control_->SetSpeakerVolume(200));
+ unsigned int volume;
+ EXPECT_EQ(0, voe_volume_control_->GetSpeakerVolume(volume));
+ EXPECT_EQ(200u, volume);
+
+ PausePlaying();
+ ResumePlaying();
+ EXPECT_EQ(0, voe_volume_control_->GetSpeakerVolume(volume));
+ // Ensure the volume has not changed after resuming playout.
+ EXPECT_EQ(200u, volume);
+
+ PausePlaying();
+ EXPECT_EQ(0, voe_volume_control_->SetSpeakerVolume(100));
+ ResumePlaying();
+ // Ensure the volume set while paused is retained.
+ EXPECT_EQ(0, voe_volume_control_->GetSpeakerVolume(volume));
+ EXPECT_EQ(100u, volume);
+
+ EXPECT_EQ(0, voe_volume_control_->SetSpeakerVolume(original_volume));
+}
+
+TEST_F(VolumeTest, ManualSetVolumeWorks) {
+ unsigned int original_volume = 0;
+ EXPECT_EQ(0, voe_volume_control_->GetSpeakerVolume(original_volume));
+ Sleep(1000);
+
+ TEST_LOG("Setting speaker volume to 0 out of 255.\n");
+ EXPECT_EQ(0, voe_volume_control_->SetSpeakerVolume(0));
+ unsigned int volume;
+ EXPECT_EQ(0, voe_volume_control_->GetSpeakerVolume(volume));
+ EXPECT_EQ(0u, volume);
+ Sleep(1000);
+
+ TEST_LOG("Setting speaker volume to 100 out of 255.\n");
+ EXPECT_EQ(0, voe_volume_control_->SetSpeakerVolume(100));
+ EXPECT_EQ(0, voe_volume_control_->GetSpeakerVolume(volume));
+ EXPECT_EQ(100u, volume);
+ Sleep(1000);
+
+ // Set the volume to 255 very briefly so we don't blast the poor user
+ // listening to this. This is just to test the call succeeds.
+ EXPECT_EQ(0, voe_volume_control_->SetSpeakerVolume(255));
+ EXPECT_EQ(0, voe_volume_control_->GetSpeakerVolume(volume));
+ EXPECT_EQ(255u, volume);
+
+ TEST_LOG("Setting speaker volume to the original %d out of 255.\n",
+ original_volume);
+ EXPECT_EQ(0, voe_volume_control_->SetSpeakerVolume(original_volume));
+ Sleep(1000);
+}
+
+#if !defined(WEBRTC_IOS)
+
+TEST_F(VolumeTest, DISABLED_ON_LINUX(DefaultMicrophoneVolumeIsAtMost255)) {
+ unsigned int volume = 1000;
+ EXPECT_EQ(0, voe_volume_control_->GetMicVolume(volume));
+ EXPECT_LE(volume, 255u);
+}
+
+TEST_F(VolumeTest, DISABLED_ON_LINUX(
+ ManualRequiresMicrophoneCanSetMicrophoneVolumeWithAcgOff)) {
+ SwitchToManualMicrophone();
+ EXPECT_EQ(0, voe_apm_->SetAgcStatus(false));
+
+ unsigned int original_volume = 0;
+ EXPECT_EQ(0, voe_volume_control_->GetMicVolume(original_volume));
+
+ TEST_LOG("Setting microphone volume to 0.\n");
+ EXPECT_EQ(0, voe_volume_control_->SetMicVolume(channel_));
+ Sleep(1000);
+ TEST_LOG("Setting microphone volume to 255.\n");
+ EXPECT_EQ(0, voe_volume_control_->SetMicVolume(255));
+ Sleep(1000);
+ TEST_LOG("Setting microphone volume back to saved value.\n");
+ EXPECT_EQ(0, voe_volume_control_->SetMicVolume(original_volume));
+ Sleep(1000);
+}
+
+TEST_F(VolumeTest, ChannelScalingIsOneByDefault) {
+ float scaling = -1.0f;
+
+ EXPECT_EQ(0, voe_volume_control_->GetChannelOutputVolumeScaling(
+ channel_, scaling));
+ EXPECT_FLOAT_EQ(1.0f, scaling);
+}
+
+TEST_F(VolumeTest, ManualCanSetChannelScaling) {
+ EXPECT_EQ(0, voe_volume_control_->SetChannelOutputVolumeScaling(
+ channel_, 0.1f));
+
+ float scaling = 1.0f;
+ EXPECT_EQ(0, voe_volume_control_->GetChannelOutputVolumeScaling(
+ channel_, scaling));
+
+ EXPECT_FLOAT_EQ(0.1f, scaling);
+
+ TEST_LOG("Channel scaling set to 0.1: audio should be barely audible.\n");
+ Sleep(2000);
+}
+
+#endif // !WEBRTC_IOS
+
+#if !defined(WEBRTC_ANDROID) && !defined(WEBRTC_IOS)
+
+TEST_F(VolumeTest, InputMutingIsNotEnabledByDefault) {
+ bool is_muted = true;
+ EXPECT_EQ(0, voe_volume_control_->GetInputMute(channel_, is_muted));
+ EXPECT_FALSE(is_muted);
+}
+
+TEST_F(VolumeTest, DISABLED_ON_LINUX(ManualInputMutingMutesMicrophone)) {
+ SwitchToManualMicrophone();
+
+ // Enable muting.
+ EXPECT_EQ(0, voe_volume_control_->SetInputMute(channel_, true));
+ bool is_muted = false;
+ EXPECT_EQ(0, voe_volume_control_->GetInputMute(channel_, is_muted));
+ EXPECT_TRUE(is_muted);
+
+ TEST_LOG("Muted: talk into microphone and verify you can't hear yourself.\n");
+ Sleep(2000);
+
+ // Test that we can disable muting.
+ EXPECT_EQ(0, voe_volume_control_->SetInputMute(channel_, false));
+ EXPECT_EQ(0, voe_volume_control_->GetInputMute(channel_, is_muted));
+ EXPECT_FALSE(is_muted);
+
+ TEST_LOG("Unmuted: talk into microphone and verify you can hear yourself.\n");
+ Sleep(2000);
+}
+
+TEST_F(VolumeTest, DISABLED_ON_LINUX(SystemInputMutingIsNotEnabledByDefault)) {
+ bool is_muted = true;
+ EXPECT_EQ(0, voe_volume_control_->GetSystemInputMute(is_muted));
+ EXPECT_FALSE(is_muted);
+}
+
+TEST_F(VolumeTest, DISABLED_ON_LINUX(ManualSystemInputMutingMutesMicrophone)) {
+ SwitchToManualMicrophone();
+
+ // Enable system input muting.
+ EXPECT_EQ(0, voe_volume_control_->SetSystemInputMute(true));
+ bool is_muted = false;
+ EXPECT_EQ(0, voe_volume_control_->GetSystemInputMute(is_muted));
+ EXPECT_TRUE(is_muted);
+
+ TEST_LOG("Muted: talk into microphone and verify you can't hear yourself.\n");
+ Sleep(2000);
+
+ // Test that we can disable system input muting.
+ EXPECT_EQ(0, voe_volume_control_->SetSystemInputMute(false));
+ EXPECT_EQ(0, voe_volume_control_->GetSystemInputMute(is_muted));
+ EXPECT_FALSE(is_muted);
+
+ TEST_LOG("Unmuted: talk into microphone and verify you can hear yourself.\n");
+ Sleep(2000);
+}
+
+TEST_F(VolumeTest, SystemOutputMutingIsNotEnabledByDefault) {
+ bool is_muted = true;
+ EXPECT_EQ(0, voe_volume_control_->GetSystemOutputMute(is_muted));
+ EXPECT_FALSE(is_muted);
+}
+
+TEST_F(VolumeTest, ManualSystemOutputMutingMutesOutput) {
+ // Enable muting.
+ EXPECT_EQ(0, voe_volume_control_->SetSystemOutputMute(true));
+ bool is_muted = false;
+ EXPECT_EQ(0, voe_volume_control_->GetSystemOutputMute(is_muted));
+ EXPECT_TRUE(is_muted);
+
+ TEST_LOG("Muted: you should hear no audio.\n");
+ Sleep(2000);
+
+ // Test that we can disable muting.
+ EXPECT_EQ(0, voe_volume_control_->SetSystemOutputMute(false));
+ EXPECT_EQ(0, voe_volume_control_->GetSystemOutputMute(is_muted));
+ EXPECT_FALSE(is_muted);
+
+ TEST_LOG("Unmuted: you should hear audio.\n");
+ Sleep(2000);
+}
+
+TEST_F(VolumeTest, ManualTestInputAndOutputLevels) {
+ SwitchToManualMicrophone();
+
+ TEST_LOG("Speak and verify that the following levels look right:\n");
+ for (int i = 0; i < 5; i++) {
+ Sleep(1000);
+ unsigned int input_level = 0;
+ unsigned int output_level = 0;
+ unsigned int input_level_full_range = 0;
+ unsigned int output_level_full_range = 0;
+
+ EXPECT_EQ(0, voe_volume_control_->GetSpeechInputLevel(
+ input_level));
+ EXPECT_EQ(0, voe_volume_control_->GetSpeechOutputLevel(
+ channel_, output_level));
+ EXPECT_EQ(0, voe_volume_control_->GetSpeechInputLevelFullRange(
+ input_level_full_range));
+ EXPECT_EQ(0, voe_volume_control_->GetSpeechOutputLevelFullRange(
+ channel_, output_level_full_range));
+
+ TEST_LOG(" warped levels (0-9) : in=%5d, out=%5d\n",
+ input_level, output_level);
+ TEST_LOG(" linear levels (0-32768): in=%5d, out=%5d\n",
+ input_level_full_range, output_level_full_range);
+ }
+}
+
+TEST_F(VolumeTest, ChannelsAreNotPannedByDefault) {
+ float left = -1.0;
+ float right = -1.0;
+
+ EXPECT_EQ(0, voe_volume_control_->GetOutputVolumePan(channel_, left, right));
+ EXPECT_FLOAT_EQ(1.0, left);
+ EXPECT_FLOAT_EQ(1.0, right);
+}
+
+TEST_F(VolumeTest, ManualTestChannelPanning) {
+ TEST_LOG("Panning left.\n");
+ EXPECT_EQ(0, voe_volume_control_->SetOutputVolumePan(channel_, 0.8f, 0.1f));
+ Sleep(1000);
+
+ TEST_LOG("Back to center.\n");
+ EXPECT_EQ(0, voe_volume_control_->SetOutputVolumePan(channel_, 1.0f, 1.0f));
+ Sleep(1000);
+
+ TEST_LOG("Panning right.\n");
+ EXPECT_EQ(0, voe_volume_control_->SetOutputVolumePan(channel_, 0.1f, 0.8f));
+ Sleep(1000);
+
+ // To finish, verify that the getter works.
+ float left = 0.0f;
+ float right = 0.0f;
+
+ EXPECT_EQ(0, voe_volume_control_->GetOutputVolumePan(channel_, left, right));
+ EXPECT_FLOAT_EQ(0.1f, left);
+ EXPECT_FLOAT_EQ(0.8f, right);
+}
+
+#endif // !WEBRTC_ANDROID && !WEBRTC_IOS
diff --git a/voice_engine/test/auto_test/voe_cpu_test.cc b/voice_engine/test/auto_test/voe_cpu_test.cc
new file mode 100644
index 0000000..14e4a00
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_cpu_test.cc
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <cassert>
+#if defined(_WIN32)
+#include <conio.h>
+#endif
+
+#include "voe_cpu_test.h"
+
+using namespace webrtc;
+
+namespace voetest {
+
+#define CHECK(expr) \
+ if (expr) \
+ { \
+ printf("Error at line: %i, %s \n", __LINE__, #expr); \
+ printf("Error code: %i \n", base->LastError()); \
+ PAUSE \
+ return -1; \
+ }
+
+VoECpuTest::VoECpuTest(VoETestManager& mgr)
+ : _mgr(mgr) {
+
+}
+
+int VoECpuTest::DoTest() {
+ printf("------------------------------------------------\n");
+ printf(" CPU Reference Test\n");
+ printf("------------------------------------------------\n");
+
+ VoEBase* base = _mgr.BasePtr();
+ VoEFile* file = _mgr.FilePtr();
+ VoECodec* codec = _mgr.CodecPtr();
+ VoEAudioProcessing* apm = _mgr.APMPtr();
+
+ int channel(-1);
+ CodecInst isac;
+
+ isac.pltype = 104;
+ strcpy(isac.plname, "ISAC");
+ isac.pacsize = 960;
+ isac.plfreq = 32000;
+ isac.channels = 1;
+ isac.rate = -1;
+
+ CHECK(base->Init());
+ channel = base->CreateChannel();
+
+ CHECK(base->SetLocalReceiver(channel, 5566));
+ CHECK(base->SetSendDestination(channel, 5566, "127.0.0.1"));
+ CHECK(codec->SetRecPayloadType(channel, isac));
+ CHECK(codec->SetSendCodec(channel, isac));
+
+ CHECK(base->StartReceive(channel));
+ CHECK(base->StartPlayout(channel));
+ CHECK(base->StartSend(channel));
+ CHECK(file->StartPlayingFileAsMicrophone(channel, _mgr.AudioFilename(),
+ true, true));
+
+ CHECK(codec->SetVADStatus(channel, true));
+ CHECK(apm->SetAgcStatus(true, kAgcAdaptiveAnalog));
+ CHECK(apm->SetNsStatus(true, kNsModerateSuppression));
+ CHECK(apm->SetEcStatus(true, kEcAec));
+
+ TEST_LOG("\nMeasure CPU and memory while running a full-duplex"
+ " iSAC-swb call.\n\n");
+
+ PAUSE
+
+ CHECK(base->StopSend(channel));
+ CHECK(base->StopPlayout(channel));
+ CHECK(base->StopReceive(channel));
+
+ base->DeleteChannel(channel);
+ CHECK(base->Terminate());
+
+ return 0;
+}
+
+} // namespace voetest
diff --git a/voice_engine/test/auto_test/voe_cpu_test.h b/voice_engine/test/auto_test/voe_cpu_test.h
new file mode 100644
index 0000000..f883075
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_cpu_test.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_CPU_TEST_H
+#define WEBRTC_VOICE_ENGINE_VOE_CPU_TEST_H
+
+#include "voe_standard_test.h"
+
+namespace voetest {
+
+class VoETestManager;
+
+class VoECpuTest {
+ public:
+ VoECpuTest(VoETestManager& mgr);
+ ~VoECpuTest() {}
+ int DoTest();
+ private:
+ VoETestManager& _mgr;
+};
+
+} // namespace voetest
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_CPU_TEST_H
diff --git a/voice_engine/test/auto_test/voe_extended_test.cc b/voice_engine/test/auto_test/voe_extended_test.cc
new file mode 100644
index 0000000..a9d1e29
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_extended_test.cc
@@ -0,0 +1,7524 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <vector>
+
+#include "system_wrappers/interface/critical_section_wrapper.h"
+#include "system_wrappers/interface/event_wrapper.h"
+#include "system_wrappers/interface/ref_count.h"
+#include "system_wrappers/interface/thread_wrapper.h"
+#include "testsupport/fileutils.h"
+#include "voice_engine/voice_engine_defines.h"
+#include "voice_engine/test/auto_test/voe_extended_test.h"
+
+#if defined(_WIN32)
+#include <conio.h>
+#include <winsock2.h>
+#elif defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+#include <netdb.h>
+#endif
+
+using namespace webrtc;
+
+namespace voetest {
+
+// Set this flag to ensure that test packets are transmitted to
+// RemoteIP::RemotePort during tests of SetSendToS and SetSendGQos. Requires
+// receiver at the remote side and Wireshark with a proper ip.src filter.
+#define _SEND_TO_REMOTE_IP_
+
+#ifdef _SEND_TO_REMOTE_IP_
+const int RemotePort = 12345; // transmit to this UDP port
+const char* RemoteIP = "192.168.200.1"; // transmit to this IP address
+#endif
+
+#ifdef WEBRTC_IOS
+#define SLEEP_IF_IPHONE(x) SLEEP(x)
+#else
+#define SLEEP_IF_IPHONE(x)
+#endif
+
+#ifdef WEBRTC_ANDROID
+// Global pointers
+extern void* globalJavaVM;
+extern void* globalContext;
+#endif
+
+// ----------------------------------------------------------------------------
+// External AudioDeviceModule implementation
+// ----------------------------------------------------------------------------
+
+// static
+AudioDeviceModuleImpl* AudioDeviceModuleImpl::Create() {
+ AudioDeviceModuleImpl* xADM = new AudioDeviceModuleImpl();
+ if (xADM)
+ xADM->AddRef();
+ return xADM;
+}
+
+// static
+bool AudioDeviceModuleImpl::Destroy(AudioDeviceModuleImpl* adm) {
+ if (!adm)
+ return false;
+ int32_t count = adm->Release();
+ if (count != 0) {
+ return false;
+ } else {
+ delete adm;
+ return true;
+ }
+}
+
+AudioDeviceModuleImpl::AudioDeviceModuleImpl() :
+ _ref_count(0) {
+}
+
+AudioDeviceModuleImpl::~AudioDeviceModuleImpl() {
+}
+
+int32_t AudioDeviceModuleImpl::AddRef() {
+ return ++_ref_count;
+}
+
+int32_t AudioDeviceModuleImpl::Release() {
+ // Avoid self destruction in this mock implementation.
+ // Ensures that we can always check the reference counter while alive.
+ return --_ref_count;
+}
+
+// ----------------------------------------------------------------------------
+// External transport (Transport) implementations:
+// ----------------------------------------------------------------------------
+
+ExtendedTestTransport::ExtendedTestTransport(VoENetwork* ptr) :
+ myNetw(ptr), _thread(NULL), _lock(NULL), _event(NULL), _length(0),
+ _channel(0) {
+ const char* threadName = "voe_extended_test_external_thread";
+ _lock = CriticalSectionWrapper::CreateCriticalSection();
+ _event = EventWrapper::Create();
+ _thread = ThreadWrapper::CreateThread(Run, this, kHighPriority, threadName);
+ if (_thread) {
+ unsigned int id;
+ _thread->Start(id);
+ }
+}
+
+ExtendedTestTransport::~ExtendedTestTransport() {
+ if (_thread) {
+ _thread->SetNotAlive();
+ _event->Set();
+ if (_thread->Stop()) {
+ delete _thread;
+ _thread = NULL;
+ delete _event;
+ _event = NULL;
+ delete _lock;
+ _lock = NULL;
+ }
+ }
+}
+
+bool ExtendedTestTransport::Run(void* ptr) {
+ return static_cast<ExtendedTestTransport*> (ptr)->Process();
+}
+
+bool ExtendedTestTransport::Process() {
+ switch (_event->Wait(500)) {
+ case kEventSignaled:
+ _lock->Enter();
+ myNetw->ReceivedRTPPacket(_channel, _packetBuffer, _length);
+ _lock->Leave();
+ return true;
+ case kEventTimeout:
+ return true;
+ case kEventError:
+ break;
+ }
+ return true;
+}
+
+int ExtendedTestTransport::SendPacket(int channel, const void *data, int len) {
+ _lock->Enter();
+ if (len < 1612) {
+ memcpy(_packetBuffer, (const unsigned char*) data, len);
+ _length = len;
+ _channel = channel;
+ }
+ _lock->Leave();
+ _event->Set(); // triggers ReceivedRTPPacket() from worker thread
+ return len;
+}
+
+int ExtendedTestTransport::SendRTCPPacket(int channel, const void *data, int len) {
+ myNetw->ReceivedRTCPPacket(channel, data, len);
+ return len;
+}
+
+XTransport::XTransport(VoENetwork* netw, VoEFile* file) :
+ _netw(netw), _file(file) {
+}
+
+int XTransport::SendPacket(int channel, const void *data, int len) {
+ // loopback
+ // _netw->ReceivedRTPPacket(channel, data, len);
+
+ return 0;
+}
+
+int XTransport::SendRTCPPacket(int, const void *, int) {
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoERTPObserver
+// ----------------------------------------------------------------------------
+
+XRTPObserver::XRTPObserver() :
+ _SSRC(0) {
+}
+
+XRTPObserver::~XRTPObserver() {
+}
+
+void XRTPObserver::OnIncomingCSRCChanged(const int /*channel*/, const unsigned int /*CSRC*/,
+ const bool /*added*/) {
+}
+
+void XRTPObserver::OnIncomingSSRCChanged(const int /*channel*/, const unsigned int SSRC) {
+ // char msg[128];
+ // sprintf(msg, "OnIncomingSSRCChanged(channel=%d, SSRC=%lu)\n",
+ // channel, SSRC);
+ // TEST_LOG(msg);
+
+ _SSRC = SSRC; // skip channel dependency for simplicty
+
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::PrepareTest(const char* str) const {
+ TEST_LOG("\n\n================================================\n");
+ TEST_LOG("\tExtended *%s* Test\n", str);
+ TEST_LOG("================================================\n\n");
+
+ return 0;
+}
+
+int VoEExtendedTest::TestPassed(const char* str) const {
+ TEST_LOG("\n\n------------------------------------------------\n");
+ TEST_LOG("\tExtended *%s* test passed!\n", str);
+ TEST_LOG("------------------------------------------------\n\n");
+
+ return 0;
+}
+
+void VoEExtendedTest::OnPeriodicDeadOrAlive(const int /*channel*/, const bool alive) {
+ _alive = alive;
+ if (alive) {
+ TEST_LOG("=> ALIVE ");
+ } else {
+ TEST_LOG("=> DEAD ");
+ }
+ fflush(NULL);
+}
+
+void VoEExtendedTest::CallbackOnError(const int errCode, int) {
+ _errCode = errCode;
+ TEST_LOG("\n************************\n");
+ TEST_LOG(" RUNTIME ERROR: %d \n", errCode);
+ TEST_LOG("************************\n");
+}
+
+VoEExtendedTest::VoEExtendedTest(VoETestManager& mgr) :
+ _mgr(mgr) {
+ for (int i = 0; i < 32; i++) {
+ _listening[i] = false;
+ _playing[i] = false;
+ _sending[i] = false;
+ }
+}
+
+VoEExtendedTest::~VoEExtendedTest() {
+}
+
+void VoEExtendedTest::StartMedia(int channel, int rtpPort, bool listen,
+ bool playout, bool send) {
+ VoEBase* voe_base_ = _mgr.BasePtr();
+
+ _listening[channel] = false;
+ _playing[channel] = false;
+ _sending[channel] = false;
+
+ voe_base_->SetLocalReceiver(channel, rtpPort);
+ voe_base_->SetSendDestination(channel, rtpPort, "127.0.0.1");
+ if (listen) {
+ _listening[channel] = true;
+ voe_base_->StartReceive(channel);
+ }
+ if (playout) {
+ _playing[channel] = true;
+ voe_base_->StartPlayout(channel);
+ }
+ if (send) {
+ _sending[channel] = true;
+ voe_base_->StartSend(channel);
+ }
+}
+
+void VoEExtendedTest::StopMedia(int channel) {
+ VoEBase* voe_base_ = _mgr.BasePtr();
+
+ if (_listening[channel]) {
+ _listening[channel] = false;
+ voe_base_->StopReceive(channel);
+ }
+ if (_playing[channel]) {
+ _playing[channel] = false;
+ voe_base_->StopPlayout(channel);
+ }
+ if (_sending[channel]) {
+ _sending[channel] = false;
+ voe_base_->StopSend(channel);
+ }
+}
+
+void VoEExtendedTest::Play(int channel, unsigned int timeMillisec, bool addFileAsMicrophone,
+ bool addTimeMarker) {
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoEFile* file = _mgr.FilePtr();
+
+ voe_base_->StartPlayout(channel);
+ TEST_LOG("[playing]");
+ fflush(NULL);
+ if (addFileAsMicrophone) {
+ file->StartPlayingFileAsMicrophone(channel, _mgr.AudioFilename(), true, true);
+ TEST_LOG("[file as mic]");
+ fflush(NULL);
+ }
+ if (addTimeMarker) {
+ float dtSec = (float) ((float) timeMillisec / 1000.0);
+ TEST_LOG("[dT=%.1f]", dtSec);
+ fflush(NULL); // print sleep time in seconds
+ }
+ SLEEP(timeMillisec);
+ voe_base_->StopPlayout(channel);
+ file->StopPlayingFileAsMicrophone(channel);
+}
+
+void VoEExtendedTest::Sleep(unsigned int timeMillisec, bool addMarker) {
+ if (addMarker) {
+ float dtSec = (float) ((float) timeMillisec / 1000.0);
+ TEST_LOG("[dT=%.1f]", dtSec); // print sleep time in seconds
+ }
+ ::Sleep(timeMillisec);
+}
+
+int VoEExtendedTest::TestBase() {
+#ifndef _WIN32
+ // Sleep a bit instead when pause not supported
+#undef PAUSE
+#define PAUSE SLEEP(2000);
+#endif
+
+ PrepareTest("Base");
+
+ // TODO(phoglund): make this an actual instance variable. I think the
+ // macro black magic will make more sense then. This is named like an
+ // instance variable since it is required in order to appease the
+ // gods of darkness.
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoENetwork* netw = _mgr.NetworkPtr();
+#ifdef _TEST_RTP_RTCP_
+ VoERTP_RTCP* rtp = _mgr.RTP_RTCPPtr();
+#endif
+
+ //////////////////////////
+ // SetTraceFileName
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST(SetTraceFileName - SetDebugTraceFileName); ANL();
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile(NULL)); MARK();
+ // don't use these files
+ std::string output_path = webrtc::test::OutputPath();
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+ (output_path + "VoEBase_trace_dont_use.txt").c_str())); MARK();
+ // use these instead
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(""
+ (output_path + "VoEBase_trace.txt").c_str())); MARK();
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStream |
+ kTraceStateInfo |
+ kTraceWarning |
+ kTraceError |
+ kTraceCritical |
+ kTraceApiCall |
+ kTraceMemory |
+ kTraceInfo)); MARK();
+
+ ANL(); AOK(); ANL(); ANL();
+#endif
+
+ ///////////////////////////////////////
+ // RegisterVoiceEngineObserver
+ // DeRegisterVoiceEngineObserver
+ TEST(SetObserver);
+ ANL();
+
+ TEST_MUSTPASS(voe_base_->RegisterVoiceEngineObserver(*this));
+ MARK();
+ SLEEP(100);
+ TEST_MUSTPASS(voe_base_->DeRegisterVoiceEngineObserver());
+ MARK();
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ /////////////////////
+ // GetVersion
+ TEST(GetVersion);
+ ANL();
+
+ char version[1024];
+ // audio device module and AudioProcessing fail to getversion when they
+ // are not initiliazed
+ TEST_MUSTPASS(voe_base_->GetVersion(version));
+ MARK();
+ TEST_LOG("\n-----\n%s\n-----\n", version);
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ ///////////////
+ // Init
+ TEST(Init);
+ ANL();
+
+ TEST_MUSTPASS(voe_base_->Init());
+ MARK();
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ TEST_MUSTPASS(voe_base_->Init());
+ MARK();
+ // ensure that no new memory is allocated at the second call (check
+ // trace file)
+ TEST_MUSTPASS(voe_base_->Init());
+ MARK();
+ TEST_MUSTPASS(voe_base_->Terminate());
+#if (!defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID))
+ // verify AEC recording
+ TEST_MUSTPASS(voe_base_->Init());
+ MARK(); // verify output dat-files
+ TEST_MUSTPASS(voe_base_->Terminate());
+#endif
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ ////////////////////
+ // Terminate
+ TEST(Terminate);
+ ANL();
+ TEST_MUSTPASS(voe_base_->Terminate());
+ MARK(); // should be ignored
+ TEST_MUSTPASS(voe_base_->Init());
+ TEST_MUSTPASS(voe_base_->Terminate());
+ MARK(); // should terminate
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // ------------------------------------------------------------------------
+ // >> Init(AudioDeviceModule)
+ //
+ // Note that our mock implementation of the ADM also mocks the
+ // reference counting part. This approach enables us to keep track
+ // of the internal reference counter without checking return values
+ // from the ADM and we also avoid the built-in self destruction.
+ //
+ // TODO(henrika): this test does not verify that external ADM methods
+ // are called by the VoiceEngine once registered. We could extend
+ // the mock implementation and add counters for each ADM API to ensure
+ // that they are called in the correct sequence and the correct number
+ // of times.
+ TEST_LOG("\nTesting: Init in combination with an external ADM\n");
+
+ // Create the ADM and call AddRef within the factory method.
+ AudioDeviceModuleImpl* xADM = AudioDeviceModuleImpl::Create();
+ ASSERT_FALSE(xADM == NULL);
+ ASSERT_TRUE(xADM->ReferenceCounter() == 1);
+
+ // Verify default usage case for external ADM.
+ TEST_MUSTPASS(voe_base_->Init(xADM));MARK();
+ ASSERT_TRUE(xADM->ReferenceCounter() == 2);
+ TEST_MUSTPASS(voe_base_->Terminate());
+ ASSERT_TRUE(xADM->ReferenceCounter() == 1);
+
+ // Our reference-count implementation does not self destruct.
+ // We do it manually here instead by calling Release followed by delete.
+ ASSERT_TRUE(AudioDeviceModuleImpl::Destroy(xADM));
+ ANL();
+ AOK();ANL();
+
+ // >> end of Init(AudioDeviceModule)
+ // ------------------------------------------------------------------------
+
+ ///////////////////////////
+ // MaxNumOfChannels
+ TEST(MaxNumOfChannels);
+ ANL();
+ TEST_MUSTPASS(voe_base_->MaxNumOfChannels() < 0);
+ MARK();
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ ////////////////////////
+ // CreateChannel
+ // DeleteChannel
+
+ int i;
+ int channel;
+ int nChannels(voe_base_->MaxNumOfChannels());
+
+ TEST(CreateChannel);
+ ANL();
+ TEST(DeleteChannel);
+ ANL();
+
+ TEST_MUSTPASS(voe_base_->Init());
+
+ channel = voe_base_->CreateChannel();
+ MARK();
+ TEST_MUSTPASS(channel != 0);
+ channel = voe_base_->CreateChannel();
+ MARK();
+ TEST_MUSTPASS(channel != 1);
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ MARK();
+ TEST_MUSTPASS(voe_base_->DeleteChannel(1));
+ MARK();
+
+ // create and delete one channel many times
+ for (i = 0; i < 10; i++) {
+ channel = voe_base_->CreateChannel();
+ MARK();
+ TEST_MUSTPASS(channel != 0); // should be 0 each time
+ TEST_MUSTPASS(voe_base_->DeleteChannel(channel));
+ MARK();
+ }
+ // create max number of channels
+ for (i = 0; i < nChannels; i++) {
+ channel = voe_base_->CreateChannel();
+ MARK();
+ TEST_MUSTPASS(channel != i);
+ }
+ channel = voe_base_->CreateChannel();
+ MARK(); // should fail since no more channels can now be created
+ TEST_MUSTPASS(channel != -1);
+
+ int aChannel = (((nChannels - 17) > 0) ? (nChannels - 17) : 0);
+ TEST_MUSTPASS(voe_base_->DeleteChannel(aChannel));
+ MARK();
+ channel = voe_base_->CreateChannel();
+ MARK(); // should reuse channel
+ TEST_MUSTPASS(channel != aChannel);
+
+ // delete all created channels
+ for (i = 0; i < nChannels; i++) {
+ TEST_MUSTPASS(voe_base_->DeleteChannel(i));
+ MARK();
+ }
+
+ // try to delete a non-existing channel
+ TEST_MUSTPASS(-1 != voe_base_->DeleteChannel(aChannel));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // ------------------------------------------------------------------------
+ // >> SetLocalReceiver
+ //
+ // State: VE not initialized, no existing channels
+ TEST_MUSTPASS(voe_base_->Init());
+
+ int ch;
+
+ TEST(SetLocalReceiver);
+ ANL();
+
+ // no channel created yet => should fail
+ TEST_MUSTPASS(!voe_base_->SetLocalReceiver(0, 100));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ ch = voe_base_->CreateChannel();
+
+#ifdef WEBRTC_IOS
+ printf("\nNOTE: Local IP must be set in source code (line %d) \n",
+ __LINE__ + 1);
+ char* localIp = "127.0.0.1";
+#else
+ char localIp[64] = { 0 };
+ TEST_MUSTPASS(netw->GetLocalIP(localIp));
+ MARK();
+ // NOTE: This API is supported on Win, Mac and Linux and may fail or not
+ // return local IP for other platforms.
+#endif
+
+ // trivial invalid function calls
+ TEST_MUSTPASS(!voe_base_->SetLocalReceiver(ch+1, 12345));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+ TEST_MUSTPASS(!voe_base_->SetLocalReceiver(ch, -1));
+ MARK();
+ TEST_ERROR(VE_INVALID_PORT_NMBR);
+
+ // check conflict with ongoing receiving
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(!voe_base_->SetLocalReceiver(ch, 12345));
+ MARK();
+ TEST_ERROR(VE_ALREADY_LISTENING);
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ // check conflict with ongoing transmission
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ TEST_MUSTPASS(!voe_base_->SetLocalReceiver(ch, 12345));
+ MARK();
+ TEST_ERROR(VE_ALREADY_SENDING);
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+
+ // valid function calls
+ // Need to sleep between, otherwise it may fail for unknown reason
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345));
+ MARK();
+ SLEEP(100);
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345));
+ MARK();
+ SLEEP(100);
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345, kVoEDefault, localIp));
+ MARK();
+ SLEEP(100);
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345, kVoEDefault, NULL,
+ "230.1.2.3"));
+ MARK();
+ SLEEP(100);
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345, kVoEDefault, localIp,
+ "230.1.2.3"));
+ MARK();
+ SLEEP(100);
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345, 5555, NULL));
+ MARK();
+ SLEEP(100);
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345));
+ MARK();
+ SLEEP(100);
+
+ // STATE: no media but sockets exists and are binded to 12345 and 12346
+ // respectively
+
+ // Add some dynamic tests as well:
+
+ // ensure that last setting is used (cancels old settings)
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345));
+ MARK();
+ SLEEP(100);
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 44444));
+ MARK();
+ SLEEP(100);
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 54321));
+ MARK();
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 54321, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ Play(ch, 1000, true, true);
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(ch));
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of SetLocalReceiver
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> GetLocalReceiver
+ //
+ // State: VE initialized, no existing channels
+ TEST(GetLocalReceiver);
+ ANL();
+
+ int port;
+ char ipaddr[64];
+ int RTCPport;
+
+ ch = voe_base_->CreateChannel();
+
+ // verify non-configured (blank) local receiver
+ TEST_MUSTPASS(voe_base_->GetLocalReceiver(ch, port, RTCPport, ipaddr));
+ MARK();
+ TEST_MUSTPASS(port != 0);
+ TEST_MUSTPASS(RTCPport != 0);
+ TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+ // check some trivial set/get combinations
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345))
+ TEST_MUSTPASS(voe_base_->GetLocalReceiver(ch, port, RTCPport, ipaddr));
+ MARK();
+ TEST_MUSTPASS(port != 12345);
+ TEST_MUSTPASS(RTCPport != 12346);
+ TEST_MUSTPASS(strcmp(ipaddr, "0.0.0.0") != 0); // now binded to "any" IP
+
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345, 55555))
+ TEST_MUSTPASS(voe_base_->GetLocalReceiver(ch, port, RTCPport, ipaddr));
+ MARK();
+ TEST_MUSTPASS(port != 12345);
+ TEST_MUSTPASS(RTCPport != 55555);
+ TEST_MUSTPASS(strcmp(ipaddr, "0.0.0.0") != 0);
+
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345, kVoEDefault, localIp))
+ TEST_MUSTPASS(voe_base_->GetLocalReceiver(ch, port, RTCPport, ipaddr));
+ MARK();
+ TEST_MUSTPASS(port != 12345);
+ TEST_MUSTPASS(RTCPport != 12346);
+ TEST_MUSTPASS(strcmp(ipaddr, localIp) != 0);
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(ch));
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of GetLocalReceiver
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> SetSendDestination
+ //
+ // State: VE initialized, no existing channels
+ TEST(SetSendDestination);
+ ANL();
+
+ // call without existing channel
+ TEST_MUSTPASS(!voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ ch = voe_base_->CreateChannel();
+
+ // trivial fail tests
+ TEST_MUSTPASS(!voe_base_->SetSendDestination(ch, 65536, "127.0.0.1"));
+ MARK();
+ TEST_ERROR(VE_INVALID_PORT_NMBR); // invalid RTP port
+ TEST_MUSTPASS(!voe_base_->SetSendDestination(ch, 12345, "127.0.0.1", 65536));
+ MARK();
+ TEST_ERROR(VE_INVALID_PORT_NMBR); // invalid source port
+ TEST_MUSTPASS(!voe_base_->SetSendDestination(ch, 12345, "127.0.0.1", kVoEDefault,
+ 65536));
+ MARK();
+ TEST_ERROR(VE_INVALID_PORT_NMBR); // invalid RTCP port
+ TEST_MUSTPASS(!voe_base_->SetSendDestination(ch, 12345, "127.0.0.300"));
+ MARK();
+ TEST_ERROR(VE_INVALID_IP_ADDRESS); // invalid IP address
+
+ // sockets must be created first to support multi-cast (not required
+ // otherwise)
+ TEST_MUSTPASS(!voe_base_->SetSendDestination(ch, 55555, "230.0.0.1"));
+ MARK();
+ TEST_ERROR(VE_SOCKET_ERROR);
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 55555)); // create sockets
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 55555, "230.0.0.1"));
+ MARK(); // should work now
+
+ voe_base_->DeleteChannel(0);
+ ch = voe_base_->CreateChannel();
+
+ // STATE: one channel created, no sockets exist
+
+ // valid function calls
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 33333, "127.0.0.1"));
+ MARK();
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 33333, "127.0.0.1", 44444));
+ MARK();
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 33333, "127.0.0.1", kVoEDefault,
+ 55555));
+ MARK();
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 33333, "127.0.0.1", 44444,
+ 55555));
+ MARK();
+
+ voe_base_->DeleteChannel(0);
+ ch = voe_base_->CreateChannel();
+
+ // create receive sockets first and then an extra pair of send sockets
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 44444));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 44444, "127.0.0.1", 11111));
+ MARK(); // binds to 11111
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(ch));
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of SetSendDestination
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> GetSendDestination
+ //
+ // State: VE initialized, no existing channels
+ TEST(GetSendDestination);
+ ANL();
+
+ int sourcePort;
+
+ ch = voe_base_->CreateChannel();
+
+ // verify non-configured (blank) local receiver
+ TEST_MUSTPASS(voe_base_->GetSendDestination(ch, port, ipaddr, sourcePort,
+ RTCPport));
+ MARK();
+ TEST_MUSTPASS(port != 0);
+ TEST_MUSTPASS(sourcePort != 0);
+ TEST_MUSTPASS(RTCPport != 0);
+ TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+ // check some trivial set/get combinations
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 44444, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->GetSendDestination(ch, port, ipaddr, sourcePort,
+ RTCPport));
+ MARK();
+ TEST_MUSTPASS(port != 44444);
+ TEST_MUSTPASS(sourcePort != 0); // should be 0 since no local receiver has
+ // NOT been defined yet
+ TEST_MUSTPASS(RTCPport != 44445);
+ TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 55555));
+ TEST_MUSTPASS(voe_base_->GetSendDestination(ch, port, ipaddr, sourcePort,
+ RTCPport));
+ MARK();
+ TEST_MUSTPASS(port != 44444);
+ TEST_MUSTPASS(sourcePort != 55555); // should be equal to local port
+ TEST_MUSTPASS(RTCPport != 44445);
+ TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+ voe_base_->DeleteChannel(0);
+ ch = voe_base_->CreateChannel();
+
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 44444, "127.0.0.1"));
+ // NULL as IP-address input should work as well
+ TEST_MUSTPASS(voe_base_->GetSendDestination(ch, port, NULL, sourcePort,
+ RTCPport));
+ MARK();
+ TEST_MUSTPASS(port != 44444);
+ TEST_MUSTPASS(sourcePort != 0);
+ TEST_MUSTPASS(RTCPport != 44445);
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(ch));
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of GetLocalReceiver
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> StartReceive
+ // >> StopReceive
+ //
+ // State: VE initialized, no existing channels
+ TEST(StartReceive);
+ ANL();
+ TEST(StopReceive);
+ ANL();
+
+ // call without existing channel
+ TEST_MUSTPASS(!voe_base_->StartReceive(0));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+ TEST_MUSTPASS(!voe_base_->StopReceive(0));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ ch = voe_base_->CreateChannel();
+
+ // sockets must be created first
+ TEST_MUSTPASS(!voe_base_->StartReceive(0));
+ MARK();
+ TEST_ERROR(VE_SOCKETS_NOT_INITED);
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 55555));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ MARK(); // should work this time
+
+ // enable again (should work)
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ MARK();
+
+ // Stop/Start (should work)
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ MARK();
+
+ // Verify in loopback
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 55555, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ Play(ch, 1000, true, true);
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ MARK();
+
+ voe_base_->DeleteChannel(0);
+ ch = voe_base_->CreateChannel();
+
+ // Ensure that it is OK to add delay between SetLocalReceiver and StarListen
+ TEST_LOG("\nspeak after 2 seconds and ensure that no delay is added:\n");
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 55555));
+
+ Sleep(2000, true); // adding emulated delay here
+
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 55555, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ Play(ch, 2000, true, true);
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(ch));
+ ANL();
+
+ // Multi-channel tests
+
+ for (i = 0; i < voe_base_->MaxNumOfChannels(); i++) {
+ ch = voe_base_->CreateChannel();
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 11111+2*i));
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ MARK();
+ }
+ for (i = 0; i < voe_base_->MaxNumOfChannels(); i++) {
+ TEST_MUSTPASS(voe_base_->StopReceive(i));
+ MARK();
+ voe_base_->DeleteChannel(i);
+ }
+ for (i = 0; i < voe_base_->MaxNumOfChannels(); i++) {
+ ch = voe_base_->CreateChannel();
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 11111+2*i));
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+ MARK();
+ voe_base_->DeleteChannel(ch);
+ }
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of StartReceive/StopReceive
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> StartPlayout
+ // >> StopPlayout
+ //
+ // State: VE initialized, no existing channels
+ TEST(StartPlayout);
+ ANL();
+ TEST(StopPlayout);
+ ANL();
+
+ // call without existing channel
+ TEST_MUSTPASS(!voe_base_->StartPlayout(0));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+ TEST_MUSTPASS(!voe_base_->StopPlayout(0));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ ch = voe_base_->CreateChannel();
+
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ MARK();
+
+ voe_base_->DeleteChannel(ch);
+
+ // Multi-channel tests
+ const int MaxNumberOfPlayingChannels(kVoiceEngineMaxNumOfActiveChannels);
+
+ for (i = 0; i < MaxNumberOfPlayingChannels; i++) {
+ ch = voe_base_->CreateChannel();
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+ MARK();
+ }
+ for (i = 0; i < MaxNumberOfPlayingChannels; i++) {
+ TEST_MUSTPASS(voe_base_->StopPlayout(i));
+ MARK();
+ voe_base_->DeleteChannel(i);
+ }
+ for (i = 0; i < MaxNumberOfPlayingChannels; i++) {
+ ch = voe_base_->CreateChannel();
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ MARK();
+ voe_base_->DeleteChannel(ch);
+ }
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of StartPlayout/StopPlayout
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> StartSend
+ // >> StopSend
+ //
+ // State: VE initialized, no existing channels
+ TEST(StartSend);
+ ANL();
+ TEST(StopSend);
+ ANL();
+
+ // call without existing channel
+ TEST_MUSTPASS(!voe_base_->StartSend(0));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+ TEST_MUSTPASS(!voe_base_->StopSend(0));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ ch = voe_base_->CreateChannel();
+
+ // call without initialized destination
+ TEST_MUSTPASS(!voe_base_->StartSend(ch));
+ MARK();
+ TEST_ERROR(VE_DESTINATION_NOT_INITED);
+
+ // initialize destination and try again (should work even without existing
+ // sockets)
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 33333, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ MARK();
+ SLEEP(100);
+
+ // STATE: sockets should now have been created automatically at the first
+ // transmitted packet should be binded to 33333 and "0.0.0.0"
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ MARK();
+
+ voe_base_->DeleteChannel(ch);
+ ch = voe_base_->CreateChannel();
+
+ // try loopback with unique send sockets (closed when channel is deleted or
+ // new source is set)
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 33333));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 33333, "127.0.0.1", 44444));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ Play(ch, 2000, true, true);
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ voe_base_->DeleteChannel(ch);
+ ANL();
+
+ // Multi-channel tests
+ for (i = 0; i < voe_base_->MaxNumOfChannels(); i++) {
+ ch = voe_base_->CreateChannel();
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 33333 + 2*i));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 33333 + 2*i, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ MARK();
+ }
+ for (i = 0; i < voe_base_->MaxNumOfChannels(); i++) {
+ TEST_MUSTPASS(voe_base_->StopSend(i));
+ MARK();
+ voe_base_->DeleteChannel(i);
+ }
+ for (i = 0; i < voe_base_->MaxNumOfChannels(); i++) {
+ ch = voe_base_->CreateChannel();
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 45633 + 2*i));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 45633 + 2*i, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ MARK();
+ voe_base_->DeleteChannel(ch);
+ }
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of StartSend/StopSend
+ // ------------------------------------------------------------------------
+
+ //////////////////////////////
+ // SetNetEQPlayoutMode
+ // GetNetEQPlayoutMode
+ TEST(SetNetEQPlayoutMode);
+ ANL();
+ TEST(GetNetEQPlayoutMode);
+ ANL();
+
+ NetEqModes mode;
+
+ ch = voe_base_->CreateChannel();
+
+ // invalid function calls (should fail)
+ TEST_MUSTPASS(!voe_base_->GetNetEQPlayoutMode(ch+1, mode));
+ MARK();
+ TEST_MUSTPASS(!voe_base_->SetNetEQPlayoutMode(ch+1, kNetEqDefault));
+ MARK();
+
+ // verify default mode (should be kNetEqDefault)
+ TEST_MUSTPASS(voe_base_->GetNetEQPlayoutMode(ch, mode));
+ MARK();
+ TEST_MUSTPASS(mode != kNetEqDefault);
+ TEST_MUSTPASS(voe_base_->SetNetEQPlayoutMode(ch, kNetEqStreaming));
+ MARK();
+ voe_base_->DeleteChannel(ch);
+
+ // ensure that default mode is set as soon as new channel is created
+ ch = voe_base_->CreateChannel();
+ TEST_MUSTPASS(voe_base_->GetNetEQPlayoutMode(ch, mode));
+ MARK();
+ TEST_MUSTPASS(mode != kNetEqDefault);
+ voe_base_->DeleteChannel(ch);
+
+ // verify Set/Get for all supported modes and max number of channels
+ for (i = 0; i < voe_base_->MaxNumOfChannels(); i++) {
+ ch = voe_base_->CreateChannel();
+
+ // verify Set/Get for all supported modes
+ TEST_MUSTPASS(voe_base_->SetNetEQPlayoutMode(i, kNetEqDefault));
+ MARK();
+ TEST_MUSTPASS(voe_base_->GetNetEQPlayoutMode(i, mode));
+ MARK();
+ TEST_MUSTPASS(mode != kNetEqDefault);
+ TEST_MUSTPASS(voe_base_->SetNetEQPlayoutMode(i, kNetEqStreaming));
+ MARK();
+ TEST_MUSTPASS(voe_base_->GetNetEQPlayoutMode(i, mode));
+ MARK();
+ TEST_MUSTPASS(mode != kNetEqStreaming);
+ TEST_MUSTPASS(voe_base_->SetNetEQPlayoutMode(i, kNetEqFax));
+ MARK();
+ TEST_MUSTPASS(voe_base_->GetNetEQPlayoutMode(i, mode));
+ MARK();
+ TEST_MUSTPASS(mode != kNetEqFax);
+ SLEEP(50);
+ }
+
+ for (i = 0; i < voe_base_->MaxNumOfChannels(); i++) {
+ voe_base_->DeleteChannel(i);
+ }
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ //////////////////////////////
+ // SetNetEQBGNMode
+ // GetNetEQBGNMode
+ TEST(SetNetEQBGNMode);
+ ANL();
+ TEST(GetNetEQBGNMode);
+ ANL();
+
+ NetEqBgnModes bgnMode;
+
+ ch = voe_base_->CreateChannel();
+
+ // invalid function calls (should fail)
+ TEST_MUSTPASS(!voe_base_->GetNetEQBGNMode(ch+1, bgnMode));
+ MARK();
+ TEST_MUSTPASS(!voe_base_->SetNetEQBGNMode(ch+1, kBgnOn));
+ MARK();
+
+ // verify default mode (should be kBgnOn)
+ TEST_MUSTPASS(voe_base_->GetNetEQBGNMode(ch, bgnMode));
+ MARK();
+ TEST_MUSTPASS(bgnMode != kBgnOn);
+ voe_base_->DeleteChannel(ch);
+
+ // ensure that default mode is set as soon as new channel is created
+ ch = voe_base_->CreateChannel();
+ TEST_MUSTPASS(voe_base_->GetNetEQBGNMode(ch, bgnMode));
+ MARK();
+ TEST_MUSTPASS(bgnMode != kBgnOn);
+ voe_base_->DeleteChannel(ch);
+
+ // verify Set/Get for all supported modes and max number of channels
+ for (i = 0; i < voe_base_->MaxNumOfChannels(); i++) {
+ ch = voe_base_->CreateChannel();
+
+ // verify Set/Get for all supported modes
+ TEST_MUSTPASS(voe_base_->SetNetEQBGNMode(i, kBgnOn));
+ MARK();
+ TEST_MUSTPASS(voe_base_->GetNetEQBGNMode(i, bgnMode));
+ MARK();
+ TEST_MUSTPASS(bgnMode != kBgnOn);
+ TEST_MUSTPASS(voe_base_->SetNetEQBGNMode(i, kBgnFade));
+ MARK();
+ TEST_MUSTPASS(voe_base_->GetNetEQBGNMode(i, bgnMode));
+ MARK();
+ TEST_MUSTPASS(bgnMode != kBgnFade);
+ TEST_MUSTPASS(voe_base_->SetNetEQBGNMode(i, kBgnOff));
+ MARK();
+ TEST_MUSTPASS(voe_base_->GetNetEQBGNMode(i, bgnMode));
+ MARK();
+ TEST_MUSTPASS(bgnMode != kBgnOff);
+ SLEEP(50);
+ }
+
+ for (i = 0; i < voe_base_->MaxNumOfChannels(); i++) {
+ voe_base_->DeleteChannel(i);
+ }
+
+ // Verify real-time performance for all playout modes in full duplex
+
+ ch = voe_base_->CreateChannel();
+
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch , 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 12345, "127.0.0.1"));
+
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+
+ TEST_MUSTPASS(voe_base_->SetNetEQPlayoutMode(ch, kNetEqDefault));
+ MARK();
+ TEST_LOG("\nenjoy full duplex using kNetEqDefault playout mode...\n");
+ PAUSE
+
+ TEST_MUSTPASS(voe_base_->SetNetEQPlayoutMode(ch, kNetEqStreaming));
+ MARK();
+ TEST_LOG("\nenjoy full duplex using kNetEqStreaming playout mode...\n");
+ PAUSE
+
+ TEST_MUSTPASS(voe_base_->SetNetEQPlayoutMode(ch, kNetEqFax));
+ MARK();
+ TEST_LOG("\nenjoy full duplex using kNetEqFax playout mode...\n");
+ PAUSE
+
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ voe_base_->DeleteChannel(ch);
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ /////////////////////
+ // Full duplex tests
+
+ ch = voe_base_->CreateChannel(); // We must delete this channel first to be able
+ // to reuse port 12345
+
+ // start with default case, also test non-default RTCP port
+#ifdef _TEST_RTP_RTCP_
+ TEST_MUSTPASS(rtp->SetRTCP_CNAME(ch, "Johnny"));
+#endif
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345, 12349));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 12345, "127.0.0.1", kVoEDefault,
+ 12349));
+
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+
+ TEST_LOG("full duplex is now activated (1)\n");
+ TEST_LOG("waiting for RTCP packet...\n");
+
+ SLEEP(7000); // Make sure we get RTCP packet
+ PAUSE;
+
+ // Verify that we got RTCP packet from correct source port
+#ifdef _TEST_RTP_RTCP_
+ char tmpStr[64] = { 0 };
+ TEST_MUSTPASS(rtp->GetRemoteRTCP_CNAME(ch, tmpStr));
+ TEST_MUSTPASS(_stricmp("Johnny", tmpStr));
+#endif
+ int rtpPort(0), rtcpPort(0);
+ char ipAddr[64] = { 0 };
+ TEST_MUSTPASS(netw->GetSourceInfo(ch, rtpPort, rtcpPort, ipAddr));
+ TEST_MUSTPASS(12349 != rtcpPort);
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ // Call StartSend before StartReceive
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 12345, "127.0.0.1"));
+
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+
+ TEST_LOG("\nfull duplex is now activated (2)\n");
+
+ PAUSE
+
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ // Try again using same ports
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 12345, "127.0.0.1"));
+
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+
+ TEST_LOG("\nfull duplex is now activated (3)\n");
+ TEST_LOG("waiting for RTCP packet...\n");
+
+ SLEEP(7000); // Make sure we get RTCP packet
+ PAUSE
+
+ // Verify correct RTCP source port
+ TEST_MUSTPASS(netw->GetSourceInfo(ch, rtpPort, rtcpPort, ipAddr));
+ TEST_MUSTPASS(12345+1 != rtcpPort);
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ voe_base_->DeleteChannel(ch);
+ ch = voe_base_->CreateChannel();
+
+ // Try with extra send socket
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch , 22222));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 22222, "127.0.0.1", 11111));
+
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+
+ TEST_LOG("\nfull duplex is now activated (4)\n");
+
+ PAUSE
+
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ // repeat default case starting with a fresh channel
+
+ voe_base_->DeleteChannel(ch);
+ ch = voe_base_->CreateChannel();
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch , 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 12345, "127.0.0.1"));
+
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+
+ TEST_LOG("\nfull duplex is now activated (5)\n");
+
+ PAUSE
+
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ // restart call again
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 12345));
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+
+ TEST_LOG("\nfull duplex is now activated (6)\n");
+
+ PAUSE
+
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ // force sending from new socket
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch , 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 12345, "127.0.0.1", 12350,
+ 12359));
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+ TEST_LOG("\nfull duplex is now activated (7)\n");
+
+ PAUSE
+
+ // Test getting send settings
+ TEST_MUSTPASS(voe_base_->GetSendDestination(ch, rtpPort, ipAddr, sourcePort,
+ rtcpPort));
+ TEST_MUSTPASS(12345 != rtpPort);
+ TEST_MUSTPASS(_stricmp("127.0.0.1", ipAddr));
+ TEST_MUSTPASS(12350 != sourcePort);
+ TEST_MUSTPASS(12359 != rtcpPort);
+
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ // new channel and new port
+ ch = voe_base_->CreateChannel();
+
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch , 33221));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 33221, "127.0.0.1"));
+
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+
+ TEST_LOG("\nfull duplex is now activated (8)\n");
+
+ PAUSE
+
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+
+ voe_base_->DeleteChannel(ch);
+ ch = voe_base_->CreateChannel();
+
+#ifndef WEBRTC_IOS
+ // bind to local IP and try again
+ strcpy(localIp, "127.0.0.1");
+#else
+ localIp = "127.0.0.1";
+#endif
+
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch, 33221, 12349, localIp));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch, 33221, localIp));
+
+ TEST_MUSTPASS(voe_base_->StartReceive(ch));
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StartSend(ch));
+
+ TEST_LOG("\nfull duplex is now activated (9)\n");
+
+ PAUSE
+
+ TEST_MUSTPASS(voe_base_->GetLocalReceiver(ch, rtpPort, rtcpPort, ipAddr));
+ TEST_MUSTPASS(33221 != rtpPort);
+ TEST_MUSTPASS(_stricmp(localIp, ipAddr));
+ TEST_MUSTPASS(12349 != rtcpPort);
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ //////////////////////
+ // Trace filter tests
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST(SetTraceFilter); ANL();
+
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile(GetFilename(""
+ "VoEBase_trace_filter.txt").c_str())); MARK();
+ SLEEP(100);
+
+ // Test a few different filters, verify in trace file
+ // Each SetTraceFilter calls should be seen once, no more, no less
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceNone)); MARK();
+ SLEEP(300);
+ // API call and info should NOT be seen in log
+ TEST_MUSTPASS(voe_base_->SetOnHoldStatus(0, true)); MARK();
+ // API call and error should NOT be seen in log
+ TEST_MUSTPASS(!voe_base_->SetOnHoldStatus(999, true)); MARK();
+
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceApiCall |
+ kTraceCritical |
+ kTraceError |
+ kTraceWarning)); MARK();
+ SLEEP(300);
+ // API call should and info should NOT be seen in log
+ TEST_MUSTPASS(voe_base_->SetOnHoldStatus(0, false)); MARK();
+ // API call and error should be seen in log
+ TEST_MUSTPASS(!voe_base_->SetOnHoldStatus(999, true)); MARK();
+
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceApiCall | kTraceInfo));
+ MARK();
+ SLEEP(300);
+ // API call and info should be seen in log
+ TEST_MUSTPASS(voe_base_->SetOnHoldStatus(0, true)); MARK();
+ // API call should and error should NOT be seen in log
+ TEST_MUSTPASS(!voe_base_->SetOnHoldStatus(999, true)); MARK();
+
+ // Back to default
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceAll)); MARK();
+ SLEEP(300);
+
+ AOK(); ANL();
+#endif
+
+ // ------------------------------------------------------------------------
+ // >> Multiple instance testing
+ //
+ // We should support 8 instances simultaneously
+ // and at least one should be able to have a call running
+
+ // One instance is already created
+ VoiceEngine* instVE[7];
+ VoEBase* baseVE[7];
+ for (int instNum = 0; instNum < 7; instNum++) {
+ instVE[instNum] = VoiceEngine::Create();
+ baseVE[instNum] = VoEBase::GetInterface(instVE[instNum]);
+ TEST_MUSTPASS(baseVE[instNum]->Init());
+ TEST_MUSTPASS(baseVE[instNum]->CreateChannel());
+ }
+
+ TEST_LOG("Created 7 more instances of VE, make sure audio is ok...\n\n");
+ PAUSE
+
+ for (int instNum = 0; instNum < 7; instNum++) {
+ TEST_MUSTPASS(baseVE[instNum]->DeleteChannel(0));
+ TEST_MUSTPASS(baseVE[instNum]->Terminate());
+ baseVE[instNum]->Release();
+ VoiceEngine::Delete(instVE[instNum]);
+ }
+
+ AOK();
+ ANL();
+
+ //////////////
+ // Close down
+ TEST_MUSTPASS(voe_base_->StopSend(ch));
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ TEST_MUSTPASS(voe_base_->StopReceive(ch));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(ch));
+
+ voe_base_->DeleteChannel(0);
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestCallReport
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestCallReport() {
+ // Get required sub-API pointers
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoECallReport* report = _mgr.CallReportPtr();
+ VoEFile* file = _mgr.FilePtr();
+ VoEAudioProcessing* apm = _mgr.APMPtr();
+ VoENetwork* netw = _mgr.NetworkPtr();
+
+ PrepareTest("CallReport");
+
+ // check if this interface is supported
+ if (!report) {
+ TEST_LOG("VoECallReport is not supported!");
+ return -1;
+ }
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+ GetFilename("VoECallReport_trace.txt").c_str()));
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+ kTraceStateInfo |
+ kTraceWarning |
+ kTraceError |
+ kTraceCritical |
+ kTraceApiCall |
+ kTraceMemory |
+ kTraceInfo));
+#endif
+
+ TEST_MUSTPASS(voe_base_->Init());
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, _mgr.AudioFilename(),
+ true, true));
+
+ ///////////////////////////
+ // Actual test starts here
+ TEST(ResetCallReportStatistics);
+ ANL();
+ TEST_MUSTPASS(!report->ResetCallReportStatistics(-2));
+ MARK(); // not OK
+ TEST_MUSTPASS(!report->ResetCallReportStatistics(1));
+ MARK(); // not OK
+ TEST_MUSTPASS(report->ResetCallReportStatistics(0));
+ MARK(); // OK
+ TEST_MUSTPASS(report->ResetCallReportStatistics(-1));
+ MARK(); // OK
+ AOK();
+ ANL();
+
+ bool enabled = false;
+ EchoStatistics echo;
+ TEST(GetEchoMetricSummary);
+ ANL();
+ TEST_MUSTPASS(apm->GetEcMetricsStatus(enabled));
+ TEST_MUSTPASS(enabled != false);
+ TEST_MUSTPASS(apm->SetEcMetricsStatus(true));
+ TEST_MUSTPASS(report->GetEchoMetricSummary(echo)); // all outputs will be
+ // -100 in loopback (skip further tests)
+ AOK();
+ ANL();
+
+ // TODO(xians): investigate the cause of test failure before enabling.
+ /*
+ StatVal delays;
+ TEST(GetRoundTripTimeSummary);
+ ANL();
+ // All values should be >=0 since RTCP is now on
+ TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
+ MARK();
+ TEST_MUSTPASS(delays.min == -1);
+ TEST_MUSTPASS(delays.max == -1);
+ TEST_MUSTPASS(delays.average == -1);
+ rtp_rtcp->SetRTCPStatus(0, false);
+ // All values should be -1 since RTCP is off
+ TEST_MUSTPASS(report->GetRoundTripTimeSummary(0, delays));
+ MARK();
+ TEST_MUSTPASS(delays.min != -1);
+ TEST_MUSTPASS(delays.max != -1);
+ TEST_MUSTPASS(delays.average != -1);
+ rtp_rtcp->SetRTCPStatus(0, true);
+ AOK();
+ ANL();
+ */
+
+ int nDead = 0;
+ int nAlive = 0;
+ TEST(GetDeadOrAliveSummary);
+ ANL();
+ // All results should be -1 since dead-or-alive is not active
+ TEST_MUSTPASS(report->GetDeadOrAliveSummary(0, nDead, nAlive) != -1);
+ MARK();
+ TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 1));
+ SLEEP(2000);
+ // All results should be >= 0 since dead-or-alive is active
+ TEST_MUSTPASS(report->GetDeadOrAliveSummary(0, nDead, nAlive));
+ MARK();
+ TEST_MUSTPASS(nDead == -1);
+ TEST_MUSTPASS(nAlive == -1)
+ TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, false));
+ AOK();
+ ANL();
+
+ TEST(WriteReportToFile);
+ ANL();
+
+ // Greek and Coptic (see http://www.utf8-chartable.de/unicode-utf8-table.pl)
+ char fileNameUTF8[64];
+
+ fileNameUTF8[0] = (char) 0xce;
+ fileNameUTF8[1] = (char) 0xba;
+ fileNameUTF8[2] = (char) 0xce;
+ fileNameUTF8[3] = (char) 0xbb;
+ fileNameUTF8[4] = (char) 0xce;
+ fileNameUTF8[5] = (char) 0xbd;
+ fileNameUTF8[6] = (char) 0xce;
+ fileNameUTF8[7] = (char) 0xbe;
+ fileNameUTF8[8] = '.';
+ fileNameUTF8[9] = 't';
+ fileNameUTF8[10] = 'x';
+ fileNameUTF8[11] = 't';
+ fileNameUTF8[12] = 0;
+
+ TEST_MUSTPASS(!report->WriteReportToFile(NULL));
+ MARK();
+ TEST_MUSTPASS(report->WriteReportToFile("call_report.txt"));
+ MARK();
+ TEST_MUSTPASS(report->WriteReportToFile(fileNameUTF8));
+ MARK(); // should work with UTF-8 as well (κλνξ.txt)
+ AOK();
+ ANL();
+
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestCodec
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestCodec() {
+ PrepareTest("Codec");
+
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoECodec* codec = _mgr.CodecPtr();
+ VoEFile* file = _mgr.FilePtr();
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+ GetFilename("VoECodec_trace.txt").c_str()));
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+ kTraceStateInfo |
+ kTraceWarning |
+ kTraceError |
+ kTraceCritical |
+ kTraceApiCall |
+ kTraceMemory |
+ kTraceInfo));
+#endif
+
+ TEST_MUSTPASS(voe_base_->Init());
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+ ExtendedTestTransport* ptrTransport(NULL);
+ ptrTransport = new ExtendedTestTransport(netw);
+ TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+#else
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+#endif
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+
+ ///////////////////////////
+ // Actual test starts here
+
+ int i;
+ int err;
+
+ CodecInst cinst;
+
+ /////////////////////////
+ // GetNumOfCodecs
+
+ int nCodecs;
+
+ TEST(GetNumOfCodecs);
+ ANL();
+ // validate #codecs
+ nCodecs = codec->NumOfCodecs();
+ MARK();
+ TEST_MUSTPASS(nCodecs < 0);
+ AOK();
+ ANL();
+
+ ///////////////////
+ // GetCodec
+ TEST(GetCodec);
+ ANL();
+ // scan all supported codecs
+ nCodecs = codec->NumOfCodecs();
+ for (int index = 0; index < nCodecs; index++) {
+ TEST_MUSTPASS(codec->GetCodec(index, cinst));
+ TEST_LOG("[%2d] %16s: fs=%6d, pt=%4d, rate=%7d, ch=%2d, size=%5d", index, cinst.plname,
+ cinst.plfreq, cinst.pltype, cinst.rate, cinst.channels, cinst.pacsize);
+ if (cinst.pltype == -1) {
+ TEST_LOG(" <= NOTE pt=-1\n");
+ } else {
+ ANL();
+ }
+ }
+
+ // ensure that an invalid index parameter is detected
+ TEST_MUSTPASS(-1 != codec->GetCodec(-1, cinst));
+ nCodecs = codec->NumOfCodecs();
+ TEST_MUSTPASS(-1 != codec->GetCodec(nCodecs, cinst));
+ MARK();
+ // ensure that error code is VE_INVALID_LISTNR
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_LISTNR);
+ AOK();
+ ANL();
+
+ ///////////////////////
+ // GetSendCodec
+ TEST(GetSendCodec);
+ ANL();
+
+ CodecInst defaultCodec;
+
+ // check the channel parameter
+ int nMaxChannels(voe_base_->MaxNumOfChannels());
+ TEST_MUSTPASS(-1 != codec->GetSendCodec(nMaxChannels-1, cinst));
+ MARK(); // not created
+ TEST_MUSTPASS(-1 != codec->GetSendCodec(nMaxChannels, cinst));
+ MARK(); // out of range
+ TEST_MUSTPASS(-1 != codec->GetSendCodec(-1, cinst));
+ MARK(); // out of range
+ TEST_MUSTPASS(codec->GetSendCodec(0, cinst));
+ MARK(); // OK
+
+ nCodecs = codec->NumOfCodecs();
+ for (int index = 0; index < nCodecs; index++) {
+ TEST_MUSTPASS(codec->GetCodec(index, defaultCodec));
+ if (codec->SetSendCodec(0, defaultCodec) == 0) {
+ TEST_MUSTPASS(codec->GetSendCodec(0, cinst));
+ MARK();
+ //TEST_LOG("[%2d] %s: fs=%d, pt=%d, rate=%d, ch=%d, size=%d\n",
+ // index, cinst.plname, cinst.plfreq, cinst.pltype, cinst.rate,
+ // cinst.channels, cinst.pacsize);
+ TEST_MUSTPASS(cinst.pacsize != defaultCodec.pacsize);
+ TEST_MUSTPASS(cinst.plfreq != defaultCodec.plfreq);
+ TEST_MUSTPASS(cinst.pltype != defaultCodec.pltype);
+ TEST_MUSTPASS(cinst.rate != defaultCodec.rate);
+ TEST_MUSTPASS(cinst.channels != defaultCodec.channels);
+ }
+ }
+
+ ANL();
+ AOK();
+ ANL();
+
+ ///////////////////////
+ // SetSendCodec
+ TEST(SetSendCodec);
+ ANL();
+
+ // --- Scan all supported codecs and set default parameters
+
+ nCodecs = codec->NumOfCodecs();
+ for (int index = 0; index < nCodecs; index++) {
+ // Get default (ACM) settings
+ TEST_MUSTPASS(codec->GetCodec(index, cinst));
+ defaultCodec = cinst;
+ TEST_LOG("[%2d] %s (default): fs=%d, pt=%d, rate=%d, ch=%d, size=%d\n",
+ index, cinst.plname, cinst.plfreq, cinst.pltype, cinst.rate,
+ cinst.channels, cinst.pacsize);
+
+ // Verify invalid codec names
+ if (!_stricmp("CN", cinst.plname) || !_stricmp("telephone-event",
+ cinst.plname)
+ || !_stricmp("red", cinst.plname)) {
+ // default settings for invalid payload names (should give
+ // VE_INVALID_PLNAME)
+ TEST_MUSTPASS(!codec->SetSendCodec(0, cinst));
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ continue;
+ }
+
+ // If we build the ACM with more codecs than we have payload types,
+ // some codecs will be given -1 as default payload type. This is a fix
+ // to ensure that we can complete these tests also for this case.
+ if (cinst.pltype == -1) {
+ cinst.pltype = 97;
+ }
+
+ // --- Default settings
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ // --- Packet size
+ TEST_LOG("\npacsize : ");
+
+ for (int pacsize = 80; pacsize < 1440; pacsize += 80) {
+ cinst.pacsize = pacsize;
+ if (-1 != codec->SetSendCodec(0, cinst)) {
+ // log valid packet size
+ TEST_LOG("%d ", pacsize);
+ } else {
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ }
+ }
+ cinst.pacsize = defaultCodec.pacsize;
+
+ // --- Audio channels (1/mono or 2/stereo)
+ TEST_LOG("\nchannels: ");
+ for (int channels = 1; channels < 4; channels++) {
+ cinst.channels = channels;
+ if (-1 != codec->SetSendCodec(0, cinst)) {
+ // Valid channels currently.
+ // 1 should always be OK for all codecs.
+ // 2 is OK for stereo codecs and some of mono codecs.
+ TEST_LOG("%d ", channels);
+ } else {
+ // Invalide channels. Currently there should be two cases:
+ // 2 would fail to some mono codecs with VE_CANNOT_SET_SEND_CODEC;
+ // 3(and higher) should always fail with VE_INVALID_ARGUMENT.
+ err = voe_base_->LastError();
+ ASSERT_TRUE((err == VE_INVALID_ARGUMENT)||
+ (err == VE_CANNOT_SET_SEND_CODEC));
+ }
+ }
+ cinst.channels = defaultCodec.channels;
+
+ // --- Payload frequency
+ TEST_LOG("\nplfreq : ");
+ cinst.plfreq = defaultCodec.plfreq;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.plfreq);
+
+ // --- Payload name
+
+ strcpy(cinst.plname, "INVALID");
+ TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+ {
+ // ensure that error code is VE_INVALID_PLNAME
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ }
+
+ // restore default plname
+ strcpy(cinst.plname, defaultCodec.plname);
+
+ // --- Payload type (dynamic range is 96-127)
+ TEST_LOG("\npltype : ");
+ // All PT should be OK, test a few different
+ cinst.pltype = defaultCodec.pltype;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.pltype);
+ cinst.pltype = defaultCodec.pltype + 1;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.pltype);
+ const int valid_pltypes[4] = { 0, 96, 117, 127 };
+ for (i = 0; i < static_cast<int> (sizeof(valid_pltypes) / sizeof(int)); i++) {
+ cinst.pltype = valid_pltypes[i];
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.pltype);
+ }
+ // Restore default
+ cinst.pltype = defaultCodec.pltype;
+
+ // --- Codec rate
+ TEST_LOG("\nrate : ");
+ if (_stricmp("isac", cinst.plname) == 0) {
+ // ISAC
+ if (cinst.plfreq == 16000) {
+ int valid_rates[3] = { -1, 10000, 32000 };
+ // failed in RegisterPayload when rate is 32000
+ for (i = 0; i < static_cast<int> (sizeof(valid_rates) / sizeof(int)); i++) {
+ cinst.rate = valid_rates[i];
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.rate);
+ }
+ cinst.rate = 0; // invalid
+ TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+ {
+ // ensure that error code is VE_CANNOT_SET_SEND_CODEC
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ }
+ ANL();
+ } else //ISACSWB
+ {
+ // rate changing fails in RegisterPayload
+ int valid_rates[8] = { -1, 10000, 25000, 32000, 35000, 45000, 50000, 52000 };
+ for (i = 0; i < static_cast<int> (sizeof(valid_rates) / sizeof(int)); i++) {
+ cinst.rate = valid_rates[i];
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.rate);
+ }
+ int invalid_rates[3] = { 0, 5000, 57000 }; // invalid
+ for (i = 0; i < static_cast<int> (sizeof(invalid_rates) / sizeof(int)); i++) {
+ cinst.rate = invalid_rates[i];
+ TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+ {
+ // ensure that error code is VE_CANNOT_SET_SEND_CODEC
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ }
+ }
+ ANL();
+ }
+ } else if (_stricmp("amr", cinst.plname) == 0) {
+ int valid_rates[8] = { 4750, 5150, 5900, 6700, 7400, 7950, 10200, 12200 };
+ for (i = 0;
+ i < static_cast<int> (sizeof(valid_rates) / sizeof(int));
+ i++) {
+ cinst.rate = valid_rates[i];
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.rate);
+ }
+ ANL();
+ } else if (_stricmp("g7291", cinst.plname) == 0) {
+ int valid_rates[12] = { 8000, 12000, 14000, 16000, 18000, 20000, 22000,
+ 24000, 26000, 28000, 30000, 32000 };
+ for (i = 0;
+ i < static_cast<int> (sizeof(valid_rates) / sizeof(int));
+ i++) {
+ cinst.rate = valid_rates[i];
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.rate);
+ }
+ ANL();
+ } else if (_stricmp("amr-wb", cinst.plname) == 0) {
+ int valid_rates[9] = { 7000, 9000, 12000, 14000, 16000, 18000, 20000,
+ 23000, 24000 };
+ for (i = 0;
+ i < static_cast<int> (sizeof(valid_rates) / sizeof(int));
+ i++) {
+ cinst.rate = valid_rates[i];
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.rate);
+ }
+ TEST_LOG(" <=> ");
+ ANL();
+ } else if (_stricmp("speex", cinst.plname) == 0) {
+ // Valid speex rates are > 2000, testing some of them here
+ int valid_rates[9] = { 2001, 4000, 7000, 11000, 15000, 20000, 25000,
+ 33000, 46000 };
+ for (i = 0;
+ i < static_cast<int> (sizeof(valid_rates) / sizeof(int));
+ i++) {
+ cinst.rate = valid_rates[i];
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.rate);
+ }
+ cinst.rate = 2000; // invalid
+ TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+ {
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ }
+ ANL();
+ } else if (_stricmp("silk", cinst.plname) == 0) {
+ // Valid Silk rates are 6000 - 40000, listing some of them here
+ int valid_rates[7] = { 6000, 10000, 15000, 20000, 25000, 32000, 40000 };
+ for (i = 0;
+ i < static_cast<int> (sizeof(valid_rates) / sizeof(int));
+ i++) {
+ cinst.rate = valid_rates[i];
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.rate);
+ }
+ cinst.rate = 5999; // invalid
+ TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+ {
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ }
+ cinst.rate = 40001; // invalid
+ TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst))
+ {
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ }
+ ANL();
+ } else {
+ // Use default rate for all other codecs.
+ cinst.rate = defaultCodec.rate;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_LOG("%d ", cinst.rate);
+ cinst.rate = defaultCodec.rate + 17;
+ TEST_MUSTPASS(!codec->SetSendCodec(0, cinst));
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ ANL();
+ }
+ cinst.rate = defaultCodec.rate;
+
+ // run some extra tests for L16
+ if (_stricmp("l16", cinst.plname) == 0) {
+ if (8000 == cinst.plfreq) {
+ // valid pacsizes: 80, 160, 240, 320
+ cinst.pacsize = 480; // only supported in combination with 16kHz
+ TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst));
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ cinst.pacsize = 640; // only supported in combination with 16kHz
+ TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst));
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ } else {
+ // valid pacsizes: 160, 320, 480, 640
+ cinst.pacsize = 80; // only supported in combination with 8kHz
+ TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst));
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ cinst.pacsize = 240; // only supported in combination with 8kHz
+ TEST_MUSTPASS(-1 != codec->SetSendCodec(0, cinst));
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ }
+ }
+ ANL();
+ } // for (int index = 0; index < nCodecs; index++)
+
+ // restore PCMU
+ const CodecInst tmp = { 0, "PCMU", 8000, 160, 1, 64000 };
+ TEST_MUSTPASS(codec->SetSendCodec(0, tmp));
+
+ ANL();
+ AOK();
+ ANL();
+
+ ///////
+ // VAD
+
+ const int VADSleep = 0;
+
+ bool disabledDTX;
+ VadModes mode;
+ bool enabled;
+
+ // verify default settings (should be OFF, kVadConventional and DTX enabled)
+ TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+ TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+ disabledDTX);
+ TEST_MUSTPASS(enabled != false);
+ TEST_MUSTPASS(mode != kVadConventional);
+ TEST_MUSTPASS(disabledDTX != true);
+
+ // enable default VAD settings
+ TEST_MUSTPASS(codec->SetVADStatus(0, true));
+ TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+ TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+ disabledDTX);
+ TEST_MUSTPASS(enabled != true);
+ TEST_MUSTPASS(mode != kVadConventional);
+ TEST_MUSTPASS(disabledDTX != false);
+ SLEEP(VADSleep);
+
+ // set kVadConventional mode
+ TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadConventional));
+ TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+ TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+ disabledDTX);
+ TEST_MUSTPASS(mode != kVadConventional);
+ SLEEP(VADSleep);
+
+ // set kVadAggressiveLow mode
+ TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveLow));
+ TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+ TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+ disabledDTX);
+ TEST_MUSTPASS(mode != kVadAggressiveLow);
+ SLEEP(VADSleep);
+
+ // set kVadAggressiveMid mode
+ TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveMid));
+ TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+ TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+ disabledDTX);
+ TEST_MUSTPASS(mode != kVadAggressiveMid);
+ SLEEP(VADSleep);
+
+ // set kVadAggressiveMid mode
+ TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadAggressiveHigh));
+ TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+ TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+ disabledDTX);
+ TEST_MUSTPASS(mode != kVadAggressiveHigh);
+ SLEEP(VADSleep);
+
+ // turn DTX OFF (audio should not be affected by VAD decisions)
+ TEST_MUSTPASS(codec->SetVADStatus(0, true, kVadConventional, true));
+ TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+ TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+ disabledDTX);
+ TEST_MUSTPASS(disabledDTX != true);
+ SLEEP(VADSleep);
+
+ // try to enable DTX again (should fail since VAD is disabled)
+ TEST_MUSTPASS(codec->SetVADStatus(0, false, kVadConventional, false));
+ TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+ TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+ disabledDTX);
+ TEST_MUSTPASS(disabledDTX == false);
+ SLEEP(VADSleep);
+
+ // disable VAD
+ TEST_MUSTPASS(codec->SetVADStatus(0, false));
+ TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+ TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+ disabledDTX);
+ TEST_MUSTPASS(enabled != false);
+ SLEEP(VADSleep);
+
+ // restore default VAD
+ TEST_MUSTPASS(codec->SetVADStatus(0, true));
+ TEST_MUSTPASS(codec->SetVADStatus(0, false));
+ TEST_MUSTPASS(codec->GetVADStatus(0, enabled, mode, disabledDTX));
+ TEST_LOG("VAD: enabled=%d, mode=%d, disabledDTX=%d\n", enabled, mode,
+ disabledDTX);
+ TEST_MUSTPASS(enabled != false);
+ TEST_MUSTPASS(mode != kVadConventional);
+ TEST_MUSTPASS(disabledDTX != true);
+ SLEEP(VADSleep);
+
+ AOK();
+ ANL();
+ ANL();
+
+ //////////////////////
+ // GetRecCodec
+ TEST(GetRecCodec);
+ ANL();
+
+ // stop all streaming first
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+
+ // start loopback streaming (PCMU is default)
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0,8000,"127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0,8000));
+#endif
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(100); // ensure that at least one packets is received
+
+ // scan all supported and valid codecs
+ CodecInst newCodec;
+ for (i = 0; i < codec->NumOfCodecs(); i++) {
+ TEST_MUSTPASS(codec->GetCodec(i, newCodec));
+ // test all valid send codecs
+ if (!_stricmp("red", newCodec.plname) || !_stricmp("cn", newCodec.plname)
+ || !_stricmp("telephone-event", newCodec.plname)) {
+ continue; // Ignore these
+ }
+ if (-1 != codec->SetSendCodec(0, newCodec)) {
+ SLEEP(150);
+ // verify correct detection
+ TEST_MUSTPASS(codec->GetRecCodec(0, cinst));
+ TEST_LOG("%s %s ", newCodec.plname, cinst.plname);
+ TEST_MUSTPASS(_stricmp(newCodec.plname, cinst.plname) != 0);
+ TEST_MUSTPASS(cinst.pltype != newCodec.pltype);
+ TEST_MUSTPASS(cinst.plfreq != newCodec.plfreq);
+ }
+ }
+
+ // stop streaming
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+#ifdef WEBRTC_CODEC_AMR
+ //////////////////////////
+ // SetAMREncFormat
+
+ // Fresh channel
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ TEST(SetAMREncFormat); ANL();
+
+ //set another codec which is not AMR
+ TEST_MUSTPASS(codec->GetCodec(0, cinst));
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ //try to change the encode format, tests should fail
+ TEST_MUSTPASS(-1 != codec->SetAMREncFormat(0)); MARK();
+ TEST_MUSTPASS(-1 != codec->SetAMREncFormat(0, kRfc3267BwEfficient));
+ MARK();
+ TEST_MUSTPASS(-1 != codec->SetAMREncFormat(0, kRfc3267OctetAligned));
+ MARK();
+ TEST_MUSTPASS(-1 != codec->SetAMREncFormat(0, kRfc3267FileStorage));
+ MARK();
+
+ //set AMR as encoder
+ strcpy(cinst.plname,"AMR");
+ cinst.channels=1; cinst.plfreq=8000; cinst.rate=12200; cinst.pltype=112;
+ cinst.pacsize=160;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ //try to change the encode format, tests should pass
+ TEST_MUSTPASS(codec->SetAMREncFormat(0)); MARK();
+ TEST_MUSTPASS(codec->SetAMREncFormat(0, kRfc3267BwEfficient)); MARK();
+ TEST_MUSTPASS(codec->SetAMREncFormat(0, kRfc3267OctetAligned)); MARK();
+ TEST_MUSTPASS(codec->SetAMREncFormat(0, kRfc3267FileStorage)); MARK();
+ TEST_MUSTPASS(-1 != codec->SetAMREncFormat(-1)); MARK();
+ TEST_MUSTPASS(codec->SetAMREncFormat(0)); MARK(); // restore default
+
+ ANL();
+ AOK();
+ ANL();
+
+ //////////////////////////
+ // SetAMRDecFormat
+
+ TEST(SetAMRDecFormat); ANL();
+
+ // It should not be possible to set AMR dec format before valid AMR decoder
+ // is registered
+ TEST_MUSTPASS(!codec->SetAMRDecFormat(0)); MARK();
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_AUDIO_CODING_MODULE_ERROR);
+
+ // Ensure that ACM::RegisterReceiveCodec(AMR) is called
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+
+ // All these tests should now pass
+ TEST_MUSTPASS(codec->SetAMRDecFormat(0)); MARK();
+ TEST_MUSTPASS(codec->SetAMRDecFormat(0, kRfc3267BwEfficient)); MARK();
+ TEST_MUSTPASS(codec->SetAMRDecFormat(0, kRfc3267OctetAligned)); MARK();
+ TEST_MUSTPASS(codec->SetAMRDecFormat(0, kRfc3267FileStorage)); MARK();
+ TEST_MUSTPASS(-1 != codec->SetAMRDecFormat(-1)); MARK();
+ TEST_MUSTPASS(codec->SetAMRDecFormat(0)); MARK(); // restore default
+
+ ANL();
+ AOK();
+ ANL();
+#endif // #ifdef WEBRTC_CODEC_AMR
+#ifdef WEBRTC_CODEC_AMRWB
+ //////////////////////////
+ // SetAMRWbEncFormat
+
+ // Fresh channel
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ TEST(SetAMRWbEncFormat); ANL();
+
+ //set another codec which is not AMR-wb
+ TEST_MUSTPASS(codec->GetCodec(0, cinst));
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ //try to change the encode format, tests should fail
+ TEST_MUSTPASS(-1 != codec->SetAMRWbEncFormat(0)); MARK();
+ TEST_MUSTPASS(-1 != codec->SetAMRWbEncFormat(0, kRfc3267BwEfficient));
+ MARK();
+ TEST_MUSTPASS(-1 != codec->SetAMRWbEncFormat(0, kRfc3267OctetAligned));
+ MARK();
+ TEST_MUSTPASS(-1 != codec->SetAMRWbEncFormat(0, kRfc3267FileStorage));
+ MARK();
+
+ //set AMR-wb as encoder
+ strcpy(cinst.plname,"AMR-WB");
+ cinst.channels=1; cinst.plfreq=16000; cinst.rate=20000;
+ cinst.pltype=112; cinst.pacsize=320;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ //try to change the encode format, tests should pass
+ TEST_MUSTPASS(codec->SetAMRWbEncFormat(0)); MARK();
+ TEST_MUSTPASS(codec->SetAMRWbEncFormat(0, kRfc3267BwEfficient)); MARK();
+ TEST_MUSTPASS(codec->SetAMRWbEncFormat(0, kRfc3267OctetAligned)); MARK();
+ TEST_MUSTPASS(codec->SetAMRWbEncFormat(0, kRfc3267FileStorage)); MARK();
+ TEST_MUSTPASS(-1 != codec->SetAMRWbEncFormat(-1)); MARK();
+ TEST_MUSTPASS(codec->SetAMRWbEncFormat(0)); MARK(); // restore default
+
+ ANL();
+ AOK();
+ ANL();
+
+ //////////////////////////
+ // SetAMRDecFormat
+
+ TEST(SetAMRWbDecFormat); ANL();
+
+ // It should not be possible to set AMR dec format before valid AMR decoder
+ // is registered
+ TEST_MUSTPASS(!codec->SetAMRWbDecFormat(0)); MARK();
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_AUDIO_CODING_MODULE_ERROR);
+
+ // Ensure that ACM::RegisterReceiveCodec(AMR) is called
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+
+ // All these tests should now pass
+ TEST_MUSTPASS(codec->SetAMRWbDecFormat(0)); MARK();
+ TEST_MUSTPASS(codec->SetAMRWbDecFormat(0, kRfc3267BwEfficient)); MARK();
+ TEST_MUSTPASS(codec->SetAMRWbDecFormat(0, kRfc3267OctetAligned)); MARK();
+ TEST_MUSTPASS(codec->SetAMRWbDecFormat(0, kRfc3267FileStorage)); MARK();
+ TEST_MUSTPASS(-1 != codec->SetAMRWbDecFormat(-1)); MARK();
+ TEST_MUSTPASS(codec->SetAMRWbDecFormat(0)); MARK(); // restore default
+
+ ANL();
+ AOK();
+ ANL();
+#endif // #ifdef WEBRTC_CODEC_AMRWB
+ ///////////////////////////////
+ // SetSendCNPayloadType
+ TEST(SetSendCNPayloadType);
+ ANL();
+
+ TEST_MUSTPASS(-1 != codec->SetSendCNPayloadType(-1, 0));
+ MARK(); // invalid channel
+
+ // Invalid payload range (only dynamic range [96,127]
+ TEST_MUSTPASS(-1 != codec->SetSendCNPayloadType(0, 0));
+ MARK(); // invalid PT
+ TEST_MUSTPASS(-1 != codec->SetSendCNPayloadType(0, 95));
+ MARK(); // invalid PT
+ TEST_MUSTPASS(-1 != codec->SetSendCNPayloadType(0, 128));
+ MARK(); // invalid PT
+ TEST_MUSTPASS(-1 != codec->SetSendCNPayloadType(0, -1));
+ MARK(); // invalid PT
+
+ // Not possible to change PT for 8000
+ TEST_MUSTPASS(!codec->SetSendCNPayloadType(0, 96, kFreq8000Hz));
+ MARK();
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_PLFREQ);
+
+ // Try some dynamic for 16000 and 32000 as well
+ TEST_MUSTPASS(codec->SetSendCNPayloadType(0, 96, kFreq16000Hz));
+ MARK();
+ TEST_MUSTPASS(codec->SetSendCNPayloadType(0, 96, kFreq32000Hz));
+ MARK(); // same should work
+ TEST_MUSTPASS(codec->SetSendCNPayloadType(0, 127, kFreq16000Hz));
+ MARK();
+ TEST_MUSTPASS(codec->SetSendCNPayloadType(0, 127, kFreq32000Hz));
+ MARK();
+ TEST_MUSTPASS(codec->SetSendCNPayloadType(0, 100, kFreq32000Hz));
+ MARK();
+
+ ANL();
+ AOK();
+ ANL();
+
+ /////////////////////////////
+ // SetRecPayloadType
+ TEST(SetRecPayloadType);
+ ANL();
+
+ // scan all supported and valid codecs without changing payloads
+ nCodecs = codec->NumOfCodecs();
+ for (i = 0; i < nCodecs; i++) {
+ TEST_MUSTPASS(codec->GetCodec(i, newCodec));
+ // If no default payload type is defined, we use 127
+ if (-1 == newCodec.pltype) {
+ newCodec.pltype = 127;
+ }
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, newCodec));
+ MARK(); // use default
+ newCodec.pltype = 99;
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, newCodec));
+ MARK(); // use same PT on all
+ newCodec.pltype = -1;
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, newCodec));
+ MARK(); // deregister all PTs
+ }
+
+ ANL();
+ AOK();
+ ANL();
+
+ /////////////////////////////
+ // GetRecPayloadType
+ TEST(GetRecPayloadType);
+ ANL();
+
+ CodecInst extraCodec;
+ for (i = 0; i < nCodecs; i++) {
+ // Set defaults
+ TEST_MUSTPASS(codec->GetCodec(i, newCodec));
+ // If no default payload type is defined, we use 127
+ if (-1 == newCodec.pltype) {
+ newCodec.pltype = 127;
+ }
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, newCodec));
+ //TEST_LOG("[%2d] %s (SetRec): fs=%d, pt=%d, rate=%d, ch=%d, size=%d\n",
+ // i, newCodec.plname, newCodec.plfreq, newCodec.pltype, newCodec.rate,
+ // newCodec.channels, newCodec.pacsize);
+ extraCodec.pltype = -1; // don't know this yet
+ extraCodec.plfreq = newCodec.plfreq;
+ extraCodec.rate = newCodec.rate;
+ extraCodec.channels = newCodec.channels;
+ strcpy(extraCodec.plname, newCodec.plname);
+ // Verfify that setting is OK
+ TEST_MUSTPASS(codec->GetRecPayloadType(0, extraCodec));
+ //TEST_LOG("[%2d] %s (GetRec): fs=%d, pt=%d, rate=%d, ch=%d, size=%d\n",
+ // i, extraCodec.plname, extraCodec.plfreq, extraCodec.pltype,
+ // extraCodec.rate, extraCodec.channels, extraCodec.pacsize);
+ TEST_MUSTPASS(newCodec.pltype != extraCodec.pltype);
+ TEST_MUSTPASS(newCodec.plfreq != extraCodec.plfreq);
+ TEST_MUSTPASS(newCodec.channels != extraCodec.channels);
+ }
+
+ AOK();
+ ANL();
+
+ ////////////////////////////////////////////////////
+ // SetRecPayloadType - remove receive codecs
+ TEST(SetRecPayloadType - removing receive codecs);
+ ANL();
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 8000, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 8000));
+#endif
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ if (file) {
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0,
+ _mgr.AudioFilename(),
+ true,
+ true));
+ }
+
+ // Scan all supported and valid codecs and remove from receiving db, then
+ // restore
+ nCodecs = codec->NumOfCodecs();
+ for (i = 0; i < nCodecs; i++) {
+ TEST_MUSTPASS(codec->GetCodec(i, cinst));
+ if (!_stricmp("red", cinst.plname) || !_stricmp("cn", cinst.plname)
+ || !_stricmp("telephone-event", cinst.plname)) {
+ continue; // Ignore these
+ }
+ TEST_LOG("Testing codec: %s", cinst.plname);
+ fflush(NULL);
+
+ if (-1 == cinst.pltype) {
+ // If no default payload type is defined, we use 127,
+ // codec is not registered for receiving
+ cinst.pltype = 127;
+ } else {
+ // Remove codec
+ memcpy(&extraCodec, &cinst, sizeof(CodecInst));
+ extraCodec.pltype = -1;
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, extraCodec));
+ }
+
+ // Set send codec
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ // Verify no audio
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_LOG(" silence");
+ fflush(NULL);
+ SLEEP(800);
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+
+ // Restore codec
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+
+ // Verify audio
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_LOG(" audio");
+ fflush(NULL);
+ SLEEP(800);
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+
+ if (127 == cinst.pltype) {
+ // If no default payload type is defined, i.e. we have set pt to
+ //127 above,
+ // make sure we remove codec from receiving
+ cinst.pltype = -1;
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+ }
+
+ ANL();
+ }
+
+ // Remove certain codecs
+ TEST_LOG("Removing receive codecs:");
+ for (i = 0; i < nCodecs; i++) {
+ TEST_MUSTPASS(codec->GetCodec(i, cinst));
+ if (!_stricmp("ipcmwb", cinst.plname) || !_stricmp("pcmu", cinst.plname)
+ || !_stricmp("eg711a", cinst.plname)) {
+ TEST_LOG(" %s", cinst.plname);
+ memcpy(&extraCodec, &cinst, sizeof(CodecInst));
+ extraCodec.pltype = -1;
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, extraCodec));
+ }
+ }
+ ANL();
+
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+
+ // Test sending all codecs - verify audio/no audio depending on codec
+ TEST_LOG("Looping through send codecs \n");
+ TEST_LOG("Verify that removed codecs are not audible and the other are \n");
+ for (i = 0; i < nCodecs; i++) {
+ TEST_MUSTPASS(codec->GetCodec(i, cinst));
+ if (!_stricmp("red", cinst.plname) || !_stricmp("cn", cinst.plname)
+ || !_stricmp("telephone-event", cinst.plname)) {
+ continue; // Ignore these
+ }
+ TEST_LOG("Testing codec: %s \n", cinst.plname);
+
+ // If no default payload type is defined, we use 127 and set receive
+ // payload type
+ if (-1 == cinst.pltype) {
+ cinst.pltype = 127;
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ }
+
+ // Set send codec
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ // Verify audio/no audio
+ SLEEP(800);
+ }
+
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+
+ // Restore codecs
+ TEST_LOG("Restoring receive codecs:");
+ for (i = 0; i < nCodecs; i++) {
+ TEST_MUSTPASS(codec->GetCodec(i, cinst));
+ if (!_stricmp("ipcmwb", cinst.plname) || !_stricmp("pcmu", cinst.plname)
+ || !_stricmp("eg711a", cinst.plname)) {
+ TEST_LOG(" %s", cinst.plname);
+ memcpy(&extraCodec, &cinst, sizeof(CodecInst));
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+ }
+ }
+ ANL();
+
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+
+ // Test sending all codecs - verify audio
+ TEST_LOG("Looping through send codecs \n");
+ TEST_LOG("Verify that all codecs are audible \n");
+ for (i = 0; i < nCodecs; i++) {
+ TEST_MUSTPASS(codec->GetCodec(i, cinst));
+ if (!_stricmp("red", cinst.plname) || !_stricmp("cn", cinst.plname)
+ || !_stricmp("telephone-event", cinst.plname)) {
+ continue; // Ignore these
+ }
+ TEST_LOG("Testing codec: %s \n", cinst.plname);
+
+ // If no default payload type is defined, we use 127 and set receive
+ // payload type
+ if (-1 == cinst.pltype) {
+ cinst.pltype = 127;
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ }
+
+ // Set send codec
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ // Verify audio/no audio
+ SLEEP(800);
+ }
+
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+
+ // Fresh channel
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+#if defined(WEBRTC_CODEC_ISAC)
+
+ /////////////////////////////////////
+ // SetISACInitTargetRate - wb
+ TEST(SetISACInitTargetRate);
+ ANL();
+
+ // set PCMU as sending codec
+ cinst.channels = 1;
+ cinst.pacsize = 160;
+ cinst.plfreq = 8000;
+ strcpy(cinst.plname, "PCMU");
+ cinst.pltype = 0;
+ cinst.rate = 64000;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 10000));
+ MARK(); // should fail since iSAC is not active
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_CODEC_ERROR);
+
+ // set iSAC as sending codec (16kHz)
+ cinst.channels = 1;
+ cinst.plfreq = 16000;
+ strcpy(cinst.plname, "ISAC");
+ cinst.pltype = 103;
+ cinst.rate = -1; // adaptive rate
+ cinst.pacsize = 480; // 30ms
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ TEST_MUSTPASS(!codec->SetISACInitTargetRate(1, 10000));
+ MARK(); // invalid channel
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 500));
+ MARK(); // invalid target rates (too small)
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 33000));
+ MARK(); // invalid target rates (too large)
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 10000));
+ MARK(); // life is good now
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 0));
+ MARK(); // 0 is a valid rate
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 32000));
+ MARK(); // try max as well
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 32000, true));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 32000, false));
+ MARK();
+
+ cinst.pacsize = 960; // 60ms
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 32000, false));
+ MARK();
+
+ cinst.rate = 20000;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 32000));
+ MARK(); // only works in adaptive mode
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_AUDIO_CODING_MODULE_ERROR);
+
+ cinst.rate = -1;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 32000));
+ MARK(); // back to adaptive mode
+
+ ANL();
+ AOK();
+ ANL();
+
+ /////////////////////////////////////
+ // SetISACInitTargetRate - swb
+ TEST(ISACSWB SetISACInitTargetRate);
+ ANL();
+
+ // set iSAC as sending codec
+ cinst.channels = 1;
+ cinst.plfreq = 32000;
+ strcpy(cinst.plname, "ISAC");
+ cinst.pltype = 104;
+ cinst.rate = -1; // default rate
+ cinst.pacsize = 960; // 30ms
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ TEST_MUSTPASS(!codec->SetISACInitTargetRate(1, 10000));
+ MARK(); // invalid channel
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, -1));
+ MARK(); // invalid target rates (too small)
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, -1));
+ MARK(); // invalid target rates (too small)
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 500));
+ MARK(); // invalid target rates (too small)
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(!codec->SetISACInitTargetRate(0, 57000));
+ MARK(); // invalid target rates (valid range is [10000, 56000])
+
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 10000));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 0));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 56000));
+ MARK(); // try max as well
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 56000, true));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACInitTargetRate(0, 56000, false));
+ MARK();
+
+ ANL();
+ AOK();
+ ANL();
+
+ ////////////////////////////////
+ // SetISACMaxRate
+ TEST(SetISACMaxRate);
+ ANL();
+
+ // set PCMU as sending codec
+ cinst.channels = 1;
+ cinst.pacsize = 160;
+ cinst.plfreq = 8000;
+ strcpy(cinst.plname, "PCMU");
+ cinst.pltype = 0;
+ cinst.rate = 64000;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ TEST_MUSTPASS(!codec->SetISACMaxRate(0, 48000));
+ MARK(); // should fail since iSAC is not active
+ TEST_MUSTPASS(voe_base_->LastError() != VE_CODEC_ERROR);
+
+ // set iSAC as sending codec
+ cinst.channels = 1;
+ cinst.plfreq = 16000;
+ strcpy(cinst.plname, "ISAC");
+ cinst.pltype = 103;
+ cinst.rate = -1; // adaptive rate
+ cinst.pacsize = 480; // 30ms
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ TEST_MUSTPASS(!codec->SetISACMaxRate(1, 48000));
+ MARK(); // invalid channel
+ TEST_MUSTPASS(voe_base_->LastError() != VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(!codec->SetISACMaxRate(0, 31900));
+ MARK(); // invalid target rates (too small)
+ TEST_MUSTPASS(voe_base_->LastError() != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(!codec->SetISACMaxRate(0, 53500));
+ MARK(); // invalid target rates (too large)
+ TEST_MUSTPASS(voe_base_->LastError() != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 32000));
+ MARK(); // life is good now
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 40000));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 48000));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 53400));
+ MARK(); // try max as well (default)
+
+ cinst.pacsize = 960; // 60ms
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 48000));
+ MARK();
+
+ cinst.rate = 20000;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 40000));
+ MARK(); // also works in non-adaptive mode
+
+ ANL();
+ AOK();
+ ANL();
+
+ TEST(ISACSWB SetISACMaxRate);
+ ANL();
+ // set iSAC as sending codec
+ cinst.channels = 1;
+ cinst.plfreq = 32000;
+ strcpy(cinst.plname, "ISAC");
+ cinst.pltype = 104;
+ cinst.rate = 45000; // instantaneous mode
+ cinst.pacsize = 960; // 30ms
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ TEST_MUSTPASS(!codec->SetISACMaxRate(1, 48000));
+ MARK(); // invalid channel
+ TEST_MUSTPASS(voe_base_->LastError() != VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(!codec->SetISACMaxRate(0, 31900));
+ MARK(); // invalid target rates (too small)
+ TEST_MUSTPASS(voe_base_->LastError() != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(!codec->SetISACMaxRate(0, 107500));
+ MARK(); // invalid target rates (too large)
+ TEST_MUSTPASS(voe_base_->LastError() != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 32000));
+ MARK(); // life is good now
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 40000));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 55000));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 80000));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 107000));
+ MARK(); // try max as well (default)
+
+
+ cinst.rate = -1; // adaptive mode
+ cinst.pacsize = 960; // 30ms
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ TEST_MUSTPASS(!codec->SetISACMaxRate(1, 48000));
+ MARK(); // invalid channel
+ TEST_MUSTPASS(voe_base_->LastError() != VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(!codec->SetISACMaxRate(0, 31900));
+ MARK(); // invalid target rates (too small)
+ TEST_MUSTPASS(voe_base_->LastError() != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(!codec->SetISACMaxRate(0, 107500));
+ MARK(); // invalid target rates (too large)
+ TEST_MUSTPASS(voe_base_->LastError() != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 32000));
+ MARK(); // life is good now
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 40000));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 55000));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 80000));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACMaxRate(0, 107000));
+ MARK(); // try max as well (default)
+
+ ANL();
+ AOK();
+ ANL();
+
+ ////////////////////////////////
+ // SetISACMaxPayloadSize
+ TEST(SetISACMaxPayloadSize);
+ ANL();
+
+ // set PCMU as sending codec
+ cinst.channels = 1;
+ cinst.pacsize = 160;
+ cinst.plfreq = 8000;
+ strcpy(cinst.plname, "PCMU");
+ cinst.pltype = 0;
+ cinst.rate = 64000;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 120));
+ MARK(); // should fail since iSAC is not active
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_CODEC_ERROR);
+
+ // set iSAC as sending codec
+ cinst.channels = 1;
+ cinst.plfreq = 16000;
+ strcpy(cinst.plname, "ISAC");
+ cinst.pltype = 103;
+ cinst.rate = -1; // adaptive rate
+ cinst.pacsize = 480; // 30ms
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(1, 120));
+ MARK(); // invalid channel
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 100));
+ MARK(); // invalid size (too small)
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 410));
+ MARK(); // invalid size (too large)
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 200));
+ MARK(); // life is good now
+ TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 120));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 400));
+ MARK();
+
+ ANL();
+ AOK();
+ ANL();
+
+ TEST(ISACSWB SetISACMaxPayloadSize);
+ ANL();
+ // set iSAC as sending codec
+ cinst.channels = 1;
+ cinst.plfreq = 32000;
+ strcpy(cinst.plname, "ISAC");
+ cinst.pltype = 104;
+ cinst.rate = 45000; // default rate
+ cinst.pacsize = 960; // 30ms
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+
+ TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(1, 100));
+ MARK(); // invalid channel
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 100));
+ MARK(); // invalid size (too small)
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(!codec->SetISACMaxPayloadSize(0, 610));
+ MARK(); // invalid size (too large)
+ err = voe_base_->LastError();
+ TEST_MUSTPASS(err != VE_INVALID_ARGUMENT);
+
+ TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 200));
+ MARK(); // life is good now
+ TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 120));
+ MARK();
+ TEST_MUSTPASS(codec->SetISACMaxPayloadSize(0, 600));
+ MARK();
+
+ ANL();
+ AOK();
+ ANL();
+
+ // set iSAC as sending codec
+ // set iSAC-wb as sending codec
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+ TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+#else
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 8001, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 8001));
+#endif
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ std::string output_path = webrtc::test::OutputPath();
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+ 0, (output_path + "audio_long16.pcm").c_str(), true , true));
+ cinst.channels = 1;
+ TEST_LOG("Testing codec: Switch between iSAC-wb and iSAC-swb \n");
+ TEST_LOG("Testing codec: iSAC wideband \n");
+ strcpy(cinst.plname, "ISAC");
+ cinst.pltype = 103;
+ cinst.rate = -1; // default rate
+ cinst.pacsize = 480; // 30ms
+ cinst.plfreq = 16000;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ SLEEP(2000);
+ TEST_LOG(" : iSAC superwideband \n");
+ cinst.pltype = 104;
+ cinst.rate = -1; // default rate
+ cinst.pacsize = 960; // 30ms
+ cinst.plfreq = 32000;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ SLEEP(2000);
+ TEST_LOG(" : iSAC wideband \n");
+ strcpy(cinst.plname, "ISAC");
+ cinst.pltype = 103;
+ cinst.rate = -1; // default rate
+ cinst.pacsize = 480; // 30ms
+ cinst.plfreq = 16000;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ SLEEP(2000);
+ TEST_LOG(" : iSAC superwideband \n");
+ cinst.pltype = 104;
+ cinst.rate = -1; // default rate
+ cinst.pacsize = 960; // 30ms
+ cinst.plfreq = 32000;
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ SLEEP(2000);
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+#else
+ TEST_LOG("Skipping extended iSAC API tests - "
+ "WEBRTC_CODEC_ISAC not defined\n");
+#endif // #if defined(WEBRTC_CODEC_ISAC)
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+ TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+ delete ptrTransport;
+#endif
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestDtmf
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestDtmf() {
+ PrepareTest("Dtmf");
+
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoEDtmf* dtmf = _mgr.DtmfPtr();
+ VoECodec* codec = _mgr.CodecPtr();
+ VoEVolumeControl* volume = _mgr.VolumeControlPtr();
+
+ std::string output_path = webrtc::test::OutputPath();
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+ (output_path + "VoEDtmf_trace.txt").c_str()));
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+ kTraceStateInfo |
+ kTraceWarning |
+ kTraceError |
+ kTraceCritical |
+ kTraceApiCall |
+ kTraceMemory |
+ kTraceInfo));
+ //#endif
+ TEST_MUSTPASS(voe_base_->Init());
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+
+ ///////////////////////////
+ // Actual test starts here
+
+ // SetDtmfFeedbackStatus
+ TEST(SetDtmfFeedbackStatus & GetDtmfFeedbackStatus);
+ ANL();
+ bool dtmfFeedback = false, dtmfDirectFeedback = true;
+ TEST_MUSTPASS(dtmf->GetDtmfFeedbackStatus(dtmfFeedback,
+ dtmfDirectFeedback));
+ TEST_MUSTPASS(!dtmfFeedback);
+ TEST_MUSTPASS(dtmfDirectFeedback);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0));
+ MARK();
+ SLEEP(500);
+
+ TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(false, false));
+ TEST_MUSTPASS(dtmf->GetDtmfFeedbackStatus(dtmfFeedback,
+ dtmfDirectFeedback));
+ TEST_MUSTPASS(dtmfFeedback);
+ TEST_MUSTPASS(dtmfDirectFeedback);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0));
+ MARK();
+ SLEEP(500);
+
+ TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(false, true));
+ TEST_MUSTPASS(dtmf->GetDtmfFeedbackStatus(dtmfFeedback,
+ dtmfDirectFeedback));
+ TEST_MUSTPASS(dtmfFeedback);
+ TEST_MUSTPASS(!dtmfDirectFeedback);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0));
+ MARK();
+ SLEEP(500);
+
+ TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(true, false));
+ TEST_MUSTPASS(dtmf->GetDtmfFeedbackStatus(dtmfFeedback,
+ dtmfDirectFeedback));
+ TEST_MUSTPASS(!dtmfFeedback);
+ TEST_MUSTPASS(dtmfDirectFeedback);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0));
+ MARK();
+ SLEEP(500);
+
+ TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(true, true));
+ TEST_MUSTPASS(dtmf->GetDtmfFeedbackStatus(dtmfFeedback,
+ dtmfDirectFeedback));
+ TEST_MUSTPASS(!dtmfFeedback);
+ TEST_MUSTPASS(!dtmfDirectFeedback);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(false, false));
+
+ AOK();
+ ANL();
+
+ // SendDtmf
+ TEST(SendDtmf);
+ ANL();
+
+ // Fail tests
+ // Event
+ // the eventcode is changed to unsigned char, so -1 will be interpreted as
+ // 255, 256->0
+ TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, -1, false, 160, 10));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 16, false, 160, 10));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ // Length
+ TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 0, true, 99, 10));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 0, true, 60001, 10));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 20, true, -1, 10));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ // Volume
+ TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 0, true, 160, -1));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 0, true, 160, 37));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ // Without sending
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(!dtmf->SendTelephoneEvent(0, 0, true));
+ MARK();
+ TEST_MUSTPASS(VE_NOT_SENDING != voe_base_->LastError());
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+
+ // Testing Dtmf out-of-band: event, length and volume
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 16, true));
+ MARK();
+ SLEEP(500); // Flash, not audible
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 100, 10));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 400, 10));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 160, 0));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 160, 36));
+ MARK();
+ SLEEP(500);
+
+ // Testing Dtmf inband: event, length and volume
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, false));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 15, false));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, false, 100, 10));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, false, 400, 10));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, false, 160, 0));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, false, 160, 36));
+ MARK();
+ SLEEP(500);
+
+ // Testing other events out-of-band: event and length
+ // These are not audible
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 17, true, 100, 10));
+ MARK();
+ SLEEP(200);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 32, true, 100, 10));
+ MARK();
+ SLEEP(200);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 78, true, 100, 10));
+ MARK();
+ SLEEP(200);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 255, true, 100, 10));
+ MARK();
+ SLEEP(200);
+ // the minimum length is 100 for the telephoneevent
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 32, true, 100, 10));
+ MARK();
+ SLEEP(200);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 32, true, 1000, 10));
+ MARK();
+ SLEEP(1200);
+
+ AOK();
+ ANL();
+
+ // PlayDtmfTone
+ TEST(PlayDtmfTone);
+ ANL();
+ TEST_MUSTPASS(!dtmf->PlayDtmfTone(-1, 200, 10));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!dtmf->PlayDtmfTone(16, 200, 10));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!dtmf->PlayDtmfTone(0, 9, 10));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!dtmf->PlayDtmfTone(0, 200, -1));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!dtmf->PlayDtmfTone(0, 200, 37));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+
+ TEST_MUSTPASS(dtmf->PlayDtmfTone(0));
+ MARK();
+ SLEEP(500);
+ // the minimum length fo the DtmfTone is 100
+ TEST_MUSTPASS(dtmf->PlayDtmfTone(0, 100, 10));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->PlayDtmfTone(0, 2000, 10));
+ MARK();
+ SLEEP(2300);
+ TEST_MUSTPASS(dtmf->PlayDtmfTone(0, 200, 0));
+ MARK();
+ SLEEP(500);
+ TEST_MUSTPASS(dtmf->PlayDtmfTone(0, 200, 36));
+ MARK();
+ SLEEP(500);
+
+ AOK();
+ ANL();
+
+ // SetTelephoneEventDetection
+ TEST(SetTelephoneEventDetection);
+ ANL();
+ AOK();
+ ANL();
+
+ // Testing sending Dtmf under VAD/CN
+ TEST(SendDtmf - with VAD enabled);
+ ANL();
+ // Mute mic
+ TEST_MUSTPASS(volume->SetInputMute(0, true));
+ MARK();
+ // Enable VAD
+ TEST_MUSTPASS(codec->SetVADStatus(0, true));
+ MARK();
+ // Send Dtmf
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 400));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 9, true, 400));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 400));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 9, true, 400));
+ MARK();
+ SLEEP(1000);
+ // Switch codec
+ CodecInst ci;
+#if (!defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID))
+ ci.channels = 1;
+ ci.pacsize = 480;
+ ci.plfreq = 16000;
+ strcpy(ci.plname, "ISAC");
+ ci.pltype = 103;
+ ci.rate = -1;
+#else
+ ci.pltype = 119;
+ strcpy(ci.plname, "isaclc");
+ ci.plfreq = 16000;
+ ci.pacsize = 320;
+ ci.channels = 1;
+ ci.rate = 40000;
+#endif
+ TEST_MUSTPASS(codec->SetSendCodec(0, ci));
+ MARK();
+ // Send Dtmf
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 400));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 9, true, 400));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 0, true, 400));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, 9, true, 400));
+ MARK();
+ SLEEP(1000);
+ SLEEP(4000);
+ // Disable VAD
+ TEST_MUSTPASS(codec->SetVADStatus(0, false));
+ MARK();
+ // Unmute
+ TEST_MUSTPASS(volume->SetInputMute(0, false));
+ MARK();
+
+ AOK();
+ ANL();
+
+ // SetSendTelephoneEventPayloadType
+ TEST(SetSendTelephoneEventPayloadType);
+ ANL();
+ TEST_MUSTPASS(!dtmf->SetSendTelephoneEventPayloadType(0, 128));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+
+ TEST_MUSTPASS(dtmf->SetSendTelephoneEventPayloadType(0, 96));
+ MARK();
+ TEST_MUSTPASS(dtmf->SetSendTelephoneEventPayloadType(0, 127));
+ MARK();
+ TEST_MUSTPASS(dtmf->SetSendTelephoneEventPayloadType(0, 106));
+ MARK(); // restore default
+
+ AOK();
+ ANL();
+
+#ifdef WEBRTC_DTMF_DETECTION
+ TEST(RegisterTelephoneEventDetection - several channels); ANL();
+
+ ci.channels = 1;
+ ci.pacsize = 160;
+ ci.plfreq = 8000;
+ ci.pltype = 0;
+ ci.rate = 64000;
+ strcpy(ci.plname, "PCMU");
+ TEST_MUSTPASS(codec->SetSendCodec(0, ci));
+
+ int ch2 = voe_base_->CreateChannel();
+ TEST_MUSTPASS(voe_base_->SetSendDestination(ch2, 8002, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(ch2, 8002));
+ TEST_MUSTPASS(voe_base_->StartReceive(ch2));
+ TEST_MUSTPASS(codec->SetSendCodec(ch2, ci));
+ TEST_MUSTPASS(voe_base_->StartPlayout(ch2));
+ TEST_MUSTPASS(voe_base_->StartSend(ch2));
+ MARK();
+
+ DtmfCallback *d = new DtmfCallback();
+ TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(false));
+
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+
+ // In-band
+ TEST_MUSTPASS(dtmf->RegisterTelephoneEventDetection(0, kInBand, *d));
+ TEST_MUSTPASS(dtmf->RegisterTelephoneEventDetection(ch2, kInBand, *d));
+ TEST_LOG("\nSending in-band telephone events:");
+ for(int i = 0; i < 16; i++)
+ {
+ TEST_LOG("\n %d ", i); fflush(NULL);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, i, false, 160, 10));
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(ch2, i, false, 160, 10));
+ SLEEP(500);
+ }
+ TEST_LOG("\nDetected %d events \n", d->counter);
+ TEST_MUSTPASS(d->counter != 32);
+ TEST_MUSTPASS(dtmf->DeRegisterTelephoneEventDetection(0));
+ TEST_MUSTPASS(dtmf->DeRegisterTelephoneEventDetection(ch2));
+
+ // Out-of-band
+ d->counter = 0;
+ TEST_MUSTPASS(dtmf->RegisterTelephoneEventDetection(0, kOutOfBand, *d));
+ TEST_MUSTPASS(dtmf->RegisterTelephoneEventDetection(ch2, kOutOfBand, *d));
+ TEST_LOG("\nSending out-band telephone events:");
+ for(int i = 0; i < 16; i++)
+ {
+ TEST_LOG("\n %d ", i); fflush(NULL);
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(0, i, true, 160, 10));
+ TEST_MUSTPASS(dtmf->SendTelephoneEvent(ch2, i, true, 160, 10));
+ SLEEP(500);
+ }
+ TEST_LOG("\nDetected %d events \n", d->counter);
+ TEST_MUSTPASS(d->counter != 32);
+ TEST_MUSTPASS(dtmf->DeRegisterTelephoneEventDetection(0));
+ TEST_MUSTPASS(dtmf->DeRegisterTelephoneEventDetection(ch2));
+ delete d;
+
+ AOK(); ANL();
+#endif
+
+ TEST_MUSTPASS(dtmf->SetDtmfFeedbackStatus(true, false));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestEncryption
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestEncryption() {
+ PrepareTest("Encryption");
+
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoEFile* file = _mgr.FilePtr();
+ VoEEncryption* encrypt = _mgr.EncryptionPtr();
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+ GetFilename("VoEEncryption_trace.txt").c_str()));
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+ kTraceStateInfo |
+ kTraceWarning |
+ kTraceError |
+ kTraceCritical |
+ kTraceApiCall |
+ kTraceMemory |
+ kTraceInfo));
+#endif
+ TEST_MUSTPASS(voe_base_->Init());
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, _mgr.AudioFilename(),
+ true, true));
+
+ ///////////////////////////
+ // Actual test starts here
+
+ unsigned char key1[30] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
+
+#ifdef WEBRTC_SRTP
+ unsigned char key2[30]; // Different than key1 in first position
+ memcpy(key2, key1, 30);
+ key2[0] = 99;
+ unsigned char key3[30]; // Different than key1 in last position
+ memcpy(key3, key1, 30);
+ key3[29] = 99;
+ unsigned char key4[29]; // Same as key1 but shorter
+ memcpy(key4, key1, 29);
+
+ TEST(SRTP - Fail tests); ANL();
+
+ // Send
+ // Incorrect parameters when not all protection is enabled
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 30, kAuthHmacSha1,
+ 20, 4, kNoProtection, key1));
+ TEST_MUSTPASS(VE_SRTP_ERROR != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 30, kAuthHmacSha1,
+ 20, 4, kEncryption key1));
+ TEST_MUSTPASS(VE_SRTP_ERROR != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 30, kAuthHmacSha1,
+ 20, 4, kAuthentication, key1));
+ TEST_MUSTPASS(VE_SRTP_ERROR != voe_base_->LastError());
+ MARK();
+ // Incorrect cipher key length
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 15,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 257,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 15, kAuthHmacSha1,
+ 20, 4, kEncryptionAndAuthentication,
+ key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 257, kAuthHmacSha1,
+ 20, 4, kEncryptionAndAuthentication,
+ key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ // Incorrect auth key length
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 21, 4,
+ kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 257, 4,
+ kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ // Incorrect auth tag length
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 21,
+ kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 20, 13,
+ kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+
+ // key NULL pointer
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, NULL));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+
+ // Same for receive
+ // Incorrect parameters when not all protection is enabled
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 30, kAuthHmacSha1,
+ 20, 4, kNoProtection, key1));
+ TEST_MUSTPASS(VE_SRTP_ERROR != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 30, kAuthHmacSha1,
+ 20, 4, kEncryption key1));
+ TEST_MUSTPASS(VE_SRTP_ERROR != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 30, kAuthHmacSha1,
+ 20, 4, kAuthentication, key1));
+ TEST_MUSTPASS(VE_SRTP_ERROR != voe_base_->LastError());
+ MARK();
+ // Incorrect cipher key length
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 15,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 257,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 15,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 257,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ // Incorrect auth key length
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode,
+ 30, kAuthHmacSha1, 21, 4,
+ kEncryptionAndAuthentication,
+ key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ // it crashed the application
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 257, 4,
+ kEncryptionAndAuthentication,
+ key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ // Incorrect auth tag length
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 21,
+ kEncryptionAndAuthentication,
+ key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ // it crashed the application
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 20, 13,
+ kEncryptionAndAuthentication,
+ key1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ // key NULL pointer
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ NULL));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ ANL();
+
+ TEST(SRTP - Should hear audio at all time); ANL();
+
+ // Authentication only
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthHmacSha1, 20,
+ 4, kAuthentication, key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthHmacSha1,
+ 20, 4, kAuthentication, key1));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ MARK(); SLEEP(2000);
+ ANL();
+
+ // No protection
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthNull, 0, 0,
+ kNoProtection, key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthNull, 0, 0,
+ kNoProtection, key1));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ MARK(); SLEEP(2000);
+
+ // Encryption only
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 0, 0, kEncryption key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 0, 0,
+ kEncryption key1));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ MARK(); SLEEP(2000);
+
+ // Authentication only
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthHmacSha1, 20,
+ 4, kAuthentication, key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthHmacSha1,
+ 20, 4, kAuthentication, key1));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ MARK(); SLEEP(2000);
+ ANL();
+
+ // Switching between keys
+ TEST(SRTP - Different keys - should hear audio at all time); ANL();
+
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key2));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key2));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key1));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key1));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key2));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key2));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 8000));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 8000, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, _mgr.AudioFilename(),
+ true, true));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ MARK(); SLEEP(2000);
+ ANL();
+
+ // Testing different keys that should be silent
+ TEST(SRTP - Should be silent or garbage); ANL();
+
+ // key1 and key2
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key2));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key2));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key1));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 0, 0, kEncryption key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 0, 0,
+ kEncryption key2));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthHmacSha1,
+ 20, 4, kAuthentication, key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthHmacSha1,
+ 20, 4, kAuthentication, key2));
+ MARK(); SLEEP(2000);
+
+ // key1 and key3
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key3));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key3));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key1));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 0, 0, kEncryption key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 0, 0,
+ kEncryption key3));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthHmacSha1, 20,
+ 4, kAuthentication, key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthHmacSha1,
+ 20, 4, kAuthentication, key3));
+ MARK(); SLEEP(2000);
+
+ // key1 and key4
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key4));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication, key4));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1, 20, 4,
+ kEncryptionAndAuthentication,
+ key1));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 0, 0, kEncryption key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthNull, 0, 0,
+ kEncryption key4));
+ MARK(); SLEEP(2000);
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherNull, 0, kAuthHmacSha1, 20,
+ 4, kAuthentication, key1));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherNull, 0, kAuthHmacSha1,
+ 20, 4, kAuthentication, key4));
+ MARK(); SLEEP(2000);
+ ANL();
+
+ // Back to normal
+ TEST(SRTP - Back to normal - should hear audio); ANL();
+
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ MARK(); SLEEP(2000);
+ ANL();
+
+ // SRTCP tests
+ TEST(SRTCP - Ignore voice or not); ANL();
+ VoERTP_RTCP* rtp_rtcp = _mgr.RTP_RTCPPtr();
+ char tmpStr[32];
+
+ // First test that RTCP packet is received and OK without encryption
+
+ TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik1"));
+ MARK(); SLEEP(8000);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+ TEST_MUSTPASS(_stricmp("Henrik1", tmpStr));
+
+ // Enable SRTP and SRTCP send and receive
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1,
+ 20, 4, kEncryptionAndAuthentication, key1, true));
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1,
+ 20, 4, kEncryptionAndAuthentication, key1, true));
+ TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik2"));
+ MARK(); SLEEP(8000);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+ TEST_MUSTPASS(_stricmp("Henrik2", tmpStr));
+
+ // Disable SRTP and SRTCP send
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik3"));
+ MARK(); SLEEP(8000);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+ TEST_MUSTPASS(_stricmp("Henrik2", tmpStr)); // Should not have changed
+
+ // Enable SRTP send, but disable SRTCP send
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1,
+ 20, 4, kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik4"));
+ MARK(); SLEEP(8000);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+ TEST_MUSTPASS(_stricmp("Henrik2", tmpStr)); // Should not have changed
+
+ // Enable SRTP and SRTCP send, disable SRTP and SRTCP receive
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->EnableSRTPSend(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1,
+ 20, 4, kEncryptionAndAuthentication, key1, true));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik5"));
+ MARK(); SLEEP(8000);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+ TEST_MUSTPASS(_stricmp("Henrik2", tmpStr)); // Should not have changed
+
+ // Enable SRTP receive, but disable SRTCP receive
+ TEST_MUSTPASS(encrypt->EnableSRTPReceive(0, kCipherAes128CounterMode, 30,
+ kAuthHmacSha1,
+ 20, 4, kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik6"));
+ MARK(); SLEEP(8000);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+ TEST_MUSTPASS(_stricmp("Henrik2", tmpStr)); // Should not have changed
+
+ // Disable all
+ TEST_MUSTPASS(encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(rtp_rtcp->SetRTCP_CNAME(0, "Henrik7"));
+ MARK(); SLEEP(8000);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCP_CNAME(0, tmpStr));
+ TEST_MUSTPASS(_stricmp("Henrik7", tmpStr));
+ ANL();
+
+#else
+ TEST(SRTP disabled - Fail tests);
+ ANL();
+
+ TEST_MUSTPASS(!encrypt->EnableSRTPSend(0, kCipherNull, 30, kAuthHmacSha1,
+ 20, 4, kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != voe_base_->LastError());
+ TEST_MUSTPASS(!encrypt->EnableSRTPReceive(0, kCipherNull, 30, kAuthHmacSha1,
+ 20, 4, kEncryptionAndAuthentication, key1));
+ TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != voe_base_->LastError());
+ TEST_MUSTPASS(!encrypt->DisableSRTPSend(0));
+ TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != voe_base_->LastError());
+ TEST_MUSTPASS(!encrypt->DisableSRTPReceive(0));
+ TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != voe_base_->LastError());
+ ANL();
+#endif
+ AOK();
+
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestExternalMedia
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestExternalMedia() {
+ PrepareTest("VoEExternalMedia");
+
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoEExternalMedia* xmedia = _mgr.ExternalMediaPtr();
+
+ // check if this interface is supported
+ if (!xmedia) {
+ TEST_LOG("VoEExternalMedia is not supported!");
+ return -1;
+ }
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+ GetFilename("VoEExternalMedia_trace.txt").c_str()));
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(
+ kTraceStateInfo | kTraceStateInfo | kTraceWarning |
+ kTraceError | kTraceCritical | kTraceApiCall |
+ kTraceMemory | kTraceInfo));
+#endif
+ TEST_MUSTPASS(voe_base_->Init());
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+
+ int getLen = 0;
+ WebRtc_Word16 vector[32000];
+ memset(vector, 0, 32000 * sizeof(short));
+
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+
+ // ExternalPlayoutGetData
+ TEST(ExternalPlayoutGetData);
+ ANL();
+
+ TEST_MUSTPASS(!xmedia->SetExternalPlayoutStatus(true));
+ TEST_MUSTPASS(VE_ALREADY_SENDING != voe_base_->LastError());
+ TEST_MUSTPASS(!xmedia->ExternalPlayoutGetData(vector, 16000, 100, getLen));
+ TEST_MUSTPASS(VE_INVALID_OPERATION != voe_base_->LastError());
+
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(xmedia->SetExternalPlayoutStatus(true));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+
+ TEST_MUSTPASS(xmedia->ExternalPlayoutGetData(vector, 48000, 0, getLen));
+ TEST_MUSTPASS(480 != getLen);
+ SLEEP(10);
+ TEST_MUSTPASS(xmedia->ExternalPlayoutGetData(vector, 16000, 3000, getLen));
+ TEST_MUSTPASS(160 != getLen);
+ SLEEP(10);
+
+ TEST_MUSTPASS(!xmedia->ExternalPlayoutGetData(vector, 8000, 100, getLen));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!xmedia->ExternalPlayoutGetData(vector, 16000, -1, getLen));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(xmedia->SetExternalPlayoutStatus(false));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+
+ // SetExternalRecording
+ TEST(SetExternalRecording);
+ ANL();
+
+ TEST_MUSTPASS(!xmedia->SetExternalRecordingStatus(true));
+ TEST_MUSTPASS(VE_ALREADY_SENDING != voe_base_->LastError());
+ TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 160, 16000, 20));
+ TEST_MUSTPASS(VE_INVALID_OPERATION != voe_base_->LastError());
+
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(xmedia->SetExternalRecordingStatus(true));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+
+ TEST_MUSTPASS(xmedia->ExternalRecordingInsertData(vector, 480, 48000, 0));
+ SLEEP(10);
+ TEST_MUSTPASS(xmedia->ExternalRecordingInsertData(vector, 640, 16000, 0));
+ SLEEP(40);
+
+ TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 160, 16000, -1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 80, 8000, 20));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 0, 16000, 20));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 80, 16000, 20));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 500, 16000, 20));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(xmedia->SetExternalRecordingStatus(false));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+
+#else // #ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+ TEST_MUSTPASS(!xmedia->SetExternalPlayoutStatus(true));
+ TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != voe_base_->LastError());
+ TEST_MUSTPASS(!xmedia->ExternalPlayoutGetData(vector, 16000, 100, getLen));
+ TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != voe_base_->LastError());
+ TEST_MUSTPASS(!xmedia->SetExternalRecordingStatus(true));
+ TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != voe_base_->LastError());
+ TEST_MUSTPASS(!xmedia->ExternalRecordingInsertData(vector, 160, 16000, 20));
+ TEST_MUSTPASS(VE_FUNC_NOT_SUPPORTED != voe_base_->LastError());
+
+#endif // #ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ ANL();
+ AOK();
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestFile
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestFile() {
+ PrepareTest("File");
+
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoEFile* file = _mgr.FilePtr();
+ VoECodec* codec = _mgr.CodecPtr();
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+ GetFilename("VoEFile_trace.txt").c_str())); MARK();
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+ kTraceStateInfo |
+ kTraceWarning |
+ kTraceError |
+ kTraceCritical |
+ kTraceApiCall |
+ kTraceMemory |
+ kTraceInfo));
+#endif
+
+ TEST_MUSTPASS(voe_base_->Init());
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+
+ ///////////////////////////
+ // Actual test starts here
+
+ const int dT(100);
+
+ TEST(StartPlayingFileLocally);
+ ANL();
+ TEST(StopPlayingFileLocally);
+ ANL();
+
+ voe_base_->StopPlayout(0);
+ std::string output_path = webrtc::test::OutputPath();
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "audio_long16.pcm").c_str()));MARK();
+ voe_base_->StartPlayout(0);
+ MARK(); // file should be mixed in and played out
+ SLEEP(dT);
+ TEST_MUSTPASS(!file->StartPlayingFileLocally(
+ 0, (output_path + "audio_long16.pcm").c_str()));
+ MARK(); // should fail (must stop first)
+ TEST_MUSTPASS(voe_base_->LastError() != VE_ALREADY_PLAYING);
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ MARK();
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "audio_long16.pcm").c_str()));
+ MARK(); // should work again (restarts file)
+ SLEEP(dT);
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ MARK();
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "audio_long16.pcm").c_str(),
+ false, kFileFormatPcm16kHzFile));
+ MARK();
+ SLEEP(dT);
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ MARK();
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "audio_long8.pcm").c_str(),
+ false, kFileFormatPcm8kHzFile));
+ MARK();
+ SLEEP(dT);
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ MARK();
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "audio_long16.wav").c_str(),
+ false, kFileFormatPcm8kHzFile));
+ MARK();
+ SLEEP(dT);
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ MARK();
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "audio_long8mulaw.wav").c_str(), false,
+ kFileFormatPcm8kHzFile));
+ MARK();
+ SLEEP(dT);
+
+ // add compressed tests here...
+
+ // TEST_MUSTPASS(file->StopPlayingFileLocally(0)); MARK();
+ // TEST_MUSTPASS(file->StartPlayingFileLocally(
+ // 0, (output_path + "audio_short16.pcm").c_str(), true,
+ // kFileFormatPcm16kHzFile)); MARK(); // loop
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ MARK();
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "audio_short16.pcm").c_str(), false,
+ kFileFormatPcm16kHzFile, 1.0, 0, 2000));
+ MARK(); // play segment
+ SLEEP(2500);
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ MARK();
+ TEST_MUSTPASS(!file->StartPlayingFileLocally(
+ 0, (output_path + "audio_short16.pcm").c_str(), false,
+ kFileFormatPcm16kHzFile, 1.0, 2000, 1000));
+ MARK(); // invalid segment
+ TEST_MUSTPASS(voe_base_->LastError() != VE_BAD_FILE);
+ TEST_MUSTPASS(!file->StartPlayingFileLocally(
+ 0, (output_path + "audio_short16.pcm").c_str(), false,
+ kFileFormatPcm16kHzFile, 1.0, 21000, 30000));
+ MARK(); // start > file size
+ TEST_MUSTPASS(voe_base_->LastError() != VE_BAD_FILE);
+ TEST_MUSTPASS(!file->StartPlayingFileLocally(
+ 0, (output_path + "audio_short16.pcm").c_str(), false,
+ kFileFormatPcm16kHzFile, 1.0, 100, 100));
+ MARK(); // invalid segment
+ TEST_MUSTPASS(voe_base_->LastError() != VE_BAD_FILE);
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "audio_long16.pcm").c_str()));
+ MARK(); // should work again (restarts file)
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ MARK();
+ TEST_MUSTPASS(!file->StartPlayingFileLocally(0, (InStream*)NULL));
+ MARK(); // just do it
+ TEST_MUSTPASS(voe_base_->LastError() != VE_BAD_FILE);
+
+ AOK();
+ ANL();
+
+ TEST(IsPlayingFileLocally);
+ ANL();
+
+ TEST_MUSTPASS(0 != file->IsPlayingFileLocally(0));
+ MARK(); // inactive
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "audio_long16.pcm").c_str()));
+ MARK();
+ TEST_MUSTPASS(1 != file->IsPlayingFileLocally(0));
+ MARK(); // active
+ AOK();
+ ANL();
+
+ TEST(ScaleLocalFilePlayout);
+ ANL();
+ TEST_MUSTPASS(file->ScaleLocalFilePlayout(0, 1.0));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(file->ScaleLocalFilePlayout(0, 0.0));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(file->ScaleLocalFilePlayout(0, 0.5));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(file->ScaleLocalFilePlayout(0, 0.25));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ MARK();
+ AOK();
+ ANL();
+
+ // Replace microphone with file and play out on remote side
+ // All channels, per channel
+ // Different mixing frequencies
+ TEST(StartPlayingFileAsMicrophone);
+ ANL();
+ TEST(IsPlayingFileAsMicrophone);
+ ANL();
+ TEST(ScaleFileAsMicrophonePlayout);
+ ANL();
+ CodecInst tempCodec;
+ for (int ch = -1; ch < 1; ++ch) // Channel -1 and 0
+ {
+ TEST_LOG("Testing channel = %d \n", ch);
+ for (int fs = 1; fs < 4; ++fs) // nb, wb and swb codecs
+ {
+ switch (fs) {
+ case 1: // nb
+ TEST_LOG("Testing with nb codec \n");
+ tempCodec.channels = 1;
+ tempCodec.pacsize = 160;
+ tempCodec.plfreq = 8000;
+ strcpy(tempCodec.plname, "PCMU");
+ tempCodec.pltype = 0;
+ tempCodec.rate = 64000;
+ break;
+ case 2: // wb
+#ifdef WEBRTC_CODEC_ISAC
+ TEST_LOG("Testing with wb codec \n");
+ tempCodec.channels = 1;
+ tempCodec.pacsize = 480;
+ tempCodec.plfreq = 16000;
+ strcpy(tempCodec.plname, "ISAC");
+ tempCodec.pltype = 103;
+ tempCodec.rate = 32000;
+ break;
+#else
+ TEST_LOG("NOT testing with wb codec - "
+ "WEBRTC_CODEC_ISAC not defined \n");
+ continue;
+#endif
+ case 3: // swb
+#ifdef WEBRTC_CODEC_PCM16
+ TEST_LOG("Testing with swb codec \n");
+ tempCodec.channels = 1;
+ tempCodec.pacsize = 640;
+ tempCodec.plfreq = 32000;
+ strcpy(tempCodec.plname, "L16");
+ tempCodec.pltype = 125;
+ tempCodec.rate = 512000;
+ break;
+#else
+ TEST_LOG("NOT testing with swb codec -"
+ " WEBRTC_CODEC_PCM16 not defined \n");
+ continue;
+#endif
+ }
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, tempCodec));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(codec->SetSendCodec(0, tempCodec));
+
+ TEST_LOG("File 1 in 16 kHz no mix, 2 in 16 kHz mix,"
+ " 3 in 8 kHz no mix, 4 in 8 kHz mix \n");
+
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+ ch, (output_path + "audio_long16.pcm").c_str()));
+ MARK(); // don't mix
+ SLEEP(2000);
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+ MARK();
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+ ch, (output_path + "audio_long16.wav").c_str(), false, true,
+ kFileFormatWavFile));
+ MARK(); // mix
+ SLEEP(2000);
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+ MARK();
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+ ch, (output_path + "audio_long8.pcm").c_str(), false, false,
+ kFileFormatPcm8kHzFile));
+ MARK(); // don't mix
+ SLEEP(2000);
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+ MARK();
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+ ch, (output_path + "audio_long8.pcm").c_str(), false, true,
+ kFileFormatPcm8kHzFile));
+ MARK(); // mix
+ SLEEP(2000);
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+ MARK();
+ TEST_MUSTPASS(!file->StartPlayingFileAsMicrophone(
+ ch, (InStream*)NULL));
+ MARK(); // force error
+ AOK();
+ ANL();
+
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+ ch, (output_path + "audio_long16.pcm").c_str()));
+ TEST_MUSTPASS(1 != file->IsPlayingFileAsMicrophone(ch));
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+ TEST_MUSTPASS(0 != file->IsPlayingFileAsMicrophone(ch));
+ AOK();
+ ANL();
+
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+ ch, (output_path + "audio_long16.pcm").c_str()));
+ TEST_MUSTPASS(file->ScaleFileAsMicrophonePlayout(ch, 1.0));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(file->ScaleFileAsMicrophonePlayout(ch, 0.5));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(file->ScaleFileAsMicrophonePlayout(ch, 0.25));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(file->ScaleFileAsMicrophonePlayout(ch, 0.0));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(ch));
+ MARK();
+ AOK();
+ ANL();
+ }
+ }
+
+ // Record speaker signal to file
+
+ CodecInst fcomp = { 0, "L16", 8000, 80, 1, 128000 };
+
+ TEST(StartRecordingPlayout);
+ ANL();
+ TEST(StopRecordingPlayout);
+ ANL();
+
+ TEST_MUSTPASS(file->StartRecordingPlayout(0,
+ (output_path + "rec_play16.pcm").c_str()));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopRecordingPlayout(0));
+ MARK();
+
+ fcomp.plfreq = 8000;
+ strcpy(fcomp.plname, "L16");
+ TEST_MUSTPASS(file->StartRecordingPlayout(0,
+ (output_path + "rec_play8.wav").c_str(), &fcomp));
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopRecordingPlayout(0));
+ MARK();
+
+ fcomp.plfreq = 16000;
+ strcpy(fcomp.plname, "L16");
+ TEST_MUSTPASS(file->StartRecordingPlayout(0,
+ (output_path + "rec_play16.wav").c_str(), &fcomp));
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopRecordingPlayout(0));
+ MARK();
+
+ fcomp.pltype = 0;
+ fcomp.plfreq = 8000;
+ strcpy(fcomp.plname, "PCMU");
+ fcomp.rate = 64000;
+ fcomp.pacsize = 160;
+ fcomp.channels = 1;
+
+ TEST_MUSTPASS(file->StartRecordingPlayout(0,
+ (output_path + "rec_play_pcmu.wav").c_str(),
+ &fcomp));
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopRecordingPlayout(0));
+ MARK();
+
+ fcomp.pltype = 8;
+ fcomp.plfreq = 8000;
+ strcpy(fcomp.plname, "PCMA");
+ TEST_MUSTPASS(file->StartRecordingPlayout(0,
+ (output_path + "rec_play_pcma.wav").c_str(),
+ &fcomp));
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopRecordingPlayout(0));
+ MARK();
+
+ fcomp.pltype = 97;
+ fcomp.pacsize = 240;
+ fcomp.rate = 13300;
+ fcomp.plfreq = 8000;
+ strcpy(fcomp.plname, "ILBC");
+ TEST_MUSTPASS(file->StartRecordingPlayout(0,
+ (output_path + "rec_play.ilbc").c_str(),
+ &fcomp));
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopRecordingPlayout(0));
+ MARK();
+
+ TEST_MUSTPASS(file->StartRecordingPlayout(
+ -1, (output_path + "rec_play16_mixed.pcm").c_str()));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopRecordingPlayout(-1));
+ MARK();
+
+ // TEST_MUSTPASS(file->StopPlayingFileLocally(0)); // Why should this work?
+ TEST_LOG("\nplaying out...\n");
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "rec_play.ilbc").c_str(), false,
+ kFileFormatCompressedFile));
+ MARK();
+ SLEEP(2000);
+
+ AOK();
+ ANL();
+
+ // Record microphone signal to file
+ TEST(StartRecordingMicrophone);
+ ANL();
+ TEST(StopRecordingMicrophone);
+ ANL();
+
+ TEST_MUSTPASS(file->StartRecordingMicrophone(
+ (output_path + "rec_mic16.pcm").c_str()));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopRecordingMicrophone());
+ MARK();
+
+ voe_base_->StopSend(0);
+ TEST_MUSTPASS(file->StartRecordingMicrophone(
+ (output_path + "rec_mic16.pcm").c_str()));
+ MARK(); // record without sending as well
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopRecordingMicrophone());
+ MARK();
+ voe_base_->StartSend(0); // restore sending
+
+ fcomp.plfreq = 8000;
+ strcpy(fcomp.plname, "L16");
+ TEST_MUSTPASS(file->StartRecordingMicrophone(
+ (output_path + "rec_play8.wav").c_str(), &fcomp));
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopRecordingMicrophone());
+ MARK();
+
+ fcomp.plfreq = 16000;
+ strcpy(fcomp.plname, "L16");
+ TEST_MUSTPASS(file->StartRecordingMicrophone(
+ (output_path + "rec_play16.wav").c_str(), &fcomp));
+ SLEEP(1000);
+ TEST_MUSTPASS(file->StopRecordingMicrophone());
+ MARK();
+
+ // FT#1810, the following test is to make sure StartRecordingCall will
+ // record both mic and file
+ TEST_LOG("StartRecordingCall, record both mic and file in specific"
+ " channels \n");
+ TEST_LOG("Create maxnumofchannels \n");
+ for (int i = 1; i < voe_base_->MaxNumOfChannels(); i++) {
+ int ch = voe_base_->CreateChannel();
+ TEST_MUSTPASS(ch == -1);
+ TEST_MUSTPASS(voe_base_->StopPlayout(ch));
+ }
+
+ TEST_MUSTPASS(voe_base_->SetSendDestination(1, 12356, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(1, 12356));
+ TEST_MUSTPASS(voe_base_->StartReceive(1));
+ TEST_MUSTPASS(voe_base_->StopPlayout(1));
+ TEST_MUSTPASS(voe_base_->StartSend(1));
+ TEST_MUSTPASS(voe_base_->StartPlayout(1));
+
+ TEST_LOG("ALways playing audio_long16.pcm for "
+ "channel 0 in background \n");
+ fcomp.plfreq = 16000;
+ strcpy(fcomp.plname, "L16");
+ TEST_LOG("Recording microphone to L16, please speak \n");
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+ 0, (output_path + "audio_long16.pcm").c_str(), true , true));
+ TEST_MUSTPASS(file->StartRecordingMicrophone(
+ (output_path + "rec_play_ch.wav").c_str(), &fcomp));
+ MARK();
+ SLEEP(3000);
+ TEST_MUSTPASS(file->StopRecordingMicrophone());
+ MARK();
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+ TEST_LOG("Playing recording file, you should only hear what you said \n");
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "rec_play_ch.wav").c_str(),
+ false, kFileFormatWavFile));
+ SLEEP(2500);
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ TEST_LOG("Recording microphone 0 to L16, please speak \n");
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+ -1, (output_path + "audio_long16.pcm").c_str(), true , true));
+ TEST_MUSTPASS(file->StartRecordingMicrophone(
+ (output_path + "rec_play_ch_0.wav").c_str(), &fcomp));
+ MARK();
+ SLEEP(3000);
+ TEST_MUSTPASS(file->StopRecordingMicrophone());
+ MARK();
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(-1));
+ TEST_LOG("Playing recording file, you should hear what you said and"
+ " audio_long16.pcm \n");
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "rec_play_ch_0.wav").c_str(),
+ false, kFileFormatWavFile));
+ SLEEP(2500);
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ TEST_LOG("Recording microphone to ilbc, please speak \n");
+ strcpy(fcomp.plname, "ilbc");
+ fcomp.plfreq = 8000;
+ fcomp.pacsize = 160;
+ fcomp.rate = 15200;
+ fcomp.channels = 1;
+ fcomp.pltype = 97;
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(
+ 0, (output_path + "audio_long16.pcm").c_str(), true , true));
+ TEST_MUSTPASS(file->StartRecordingMicrophone(
+ (output_path + "rec_play_ch_0.ilbc").c_str(), &fcomp));
+ MARK();
+ SLEEP(3000);
+ TEST_MUSTPASS(file->StopRecordingMicrophone());
+ MARK();
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+ TEST_LOG("Playing recording file, you should only hear what you said \n");
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "rec_play_ch_0.ilbc").c_str(), false,
+ kFileFormatCompressedFile));
+ SLEEP(2500);
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ for (int i = 1; i < voe_base_->MaxNumOfChannels(); i++) {
+ TEST_MUSTPASS(voe_base_->DeleteChannel(i));
+ }
+
+ AOK();
+ ANL();
+
+ // Record mixed (speaker + microphone) signal to file
+
+
+#if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID)
+ TEST(StartRecordingSpeakerStereo);
+ ANL();
+ TEST(StopRecordingSpeakerStereo);
+ ANL();
+
+ VoEHardware* hardware = _mgr.HardwarePtr();
+ TEST_MUSTPASS(NULL == hardware);
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+#if defined(_WIN32)
+ TEST_MUSTPASS(hardware->SetRecordingDevice(-1));
+ TEST_MUSTPASS(hardware->SetPlayoutDevice(-1));
+#else
+ TEST_MUSTPASS(hardware->SetRecordingDevice(0));
+ TEST_MUSTPASS(hardware->SetPlayoutDevice(0));
+#endif
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ MARK();
+
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+#if defined(_WIN32)
+ TEST_MUSTPASS(hardware->SetRecordingDevice(-1));
+ TEST_MUSTPASS(hardware->SetPlayoutDevice(-1));
+#else
+ TEST_MUSTPASS(hardware->SetRecordingDevice(0));
+ TEST_MUSTPASS(hardware->SetPlayoutDevice(0));
+#endif
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+
+ AOK();
+ ANL();
+#else
+ TEST_LOG("Skipping stereo record tests -"
+ " WEBRTC_IOS or WEBRTC_ANDROID is defined \n");
+#endif // #if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID)
+ // Conversion between different file formats
+
+#if defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID)
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+#endif
+
+ TEST(ConvertPCMToWAV);
+ ANL();
+
+ TEST_MUSTPASS(file->ConvertPCMToWAV(
+ (output_path + "audio_long16.pcm").c_str(),
+ (output_path + "singleUserDemoConv.wav").c_str()));
+ MARK();
+ TEST_MUSTPASS(!file->ConvertPCMToWAV((InStream*)NULL,
+ (OutStream*)NULL));MARK(); // invalid stream handles
+ AOK();
+ ANL();
+
+ TEST(ConvertWAVToPCM);
+ ANL();
+
+ TEST_MUSTPASS(file->ConvertWAVToPCM(
+ (output_path + "audio_long16.wav").c_str(),
+ (output_path + "singleUserDemoConv.pcm").c_str()));
+ MARK();
+ TEST_MUSTPASS(!file->ConvertWAVToPCM((InStream*)NULL, (OutStream*)NULL));
+ MARK(); // invalid stream handles
+ AOK();
+ ANL();
+
+ TEST(ConvertPCMToCompressed);
+ ANL();
+
+ fcomp.plfreq = 16000;
+ strcpy(fcomp.plname, "L16");
+ TEST_MUSTPASS(!file->ConvertPCMToCompressed(
+ (output_path + "audio_long16.pcm").c_str(),
+ (output_path + "singleUserDemoConv16_dummy.wav").c_str(), &fcomp));
+ MARK(); // should not be supported
+
+ fcomp.plfreq = 8000;
+ strcpy(fcomp.plname, "ilbc");
+ fcomp.pacsize = 160;
+ fcomp.rate = 15200;
+ fcomp.pltype = 97;
+ fcomp.channels = 1;
+ TEST_MUSTPASS(file->ConvertPCMToCompressed(
+ (output_path + "audio_long16.pcm").c_str(),
+ (output_path + "singleUserDemoConv.ilbc").c_str(), &fcomp));MARK();
+ AOK();ANL();
+
+ TEST(ConvertCompressedToPCM);
+ ANL();
+
+ TEST_MUSTPASS(file->ConvertCompressedToPCM(
+ (output_path + "singleUserDemoConv.ilbc").c_str(),
+ (output_path + "singleUserDemoConv_ilbc.pcm").c_str()));MARK();
+ TEST_MUSTPASS(!file->ConvertCompressedToPCM(
+ (output_path + "audio_long16.pcm").c_str(),
+ (output_path + "singleUserDemoConv_dummy.pcm").c_str()));MARK();
+ AOK();ANL();
+
+#if defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID)
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+#endif
+
+ // Misc file functions
+ TEST(GetFileDuration);
+ ANL();
+
+ int dur;
+
+ TEST_MUSTPASS(file->GetFileDuration(
+ (output_path + "audio_long16.pcm").c_str(), dur));
+ TEST_MUSTPASS(file->GetFileDuration(
+ (output_path + "audio_long8.pcm").c_str(),
+ dur, kFileFormatPcm8kHzFile));
+ TEST_MUSTPASS(file->GetFileDuration(
+ (output_path + "audio_long16.pcm").c_str(),
+ dur, kFileFormatPcm16kHzFile));
+ TEST_MUSTPASS(file->GetFileDuration(
+ (output_path + "audio_long16.wav").c_str(),
+ dur, kFileFormatPcm8kHzFile));
+ TEST_MUSTPASS(file->GetFileDuration(
+ (output_path + "singleUserDemoConv.ilbc").c_str(), dur,
+ kFileFormatCompressedFile));
+
+ AOK();
+ ANL();
+
+ TEST(GetPlaybackPosition);
+ ANL();
+
+ int pos;
+
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + "audio_long16.pcm").c_str()));
+ SLEEP(1000);
+ TEST_MUSTPASS(file->GetPlaybackPosition(0, pos));
+ MARK(); // position should be ~1000
+ SLEEP(1000);
+ TEST_MUSTPASS(file->GetPlaybackPosition(0, pos));
+ MARK(); // position should be ~2000
+ // SLEEP(70*1000);
+ // file is no longer playing
+ // TEST_MUSTPASS(file->GetPlaybackPosition(0, pos)); MARK();
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ AOK();
+ ANL();
+
+ // These tests are related to defect 5136
+ // They play .wav files with different sample freq for 5s
+ char localFiles[7][50] = { "audio_tiny8.wav", "audio_tiny11.wav",
+ "audio_tiny16.wav", "audio_tiny22.wav", "audio_tiny32.wav",
+ "audio_tiny44.wav", "audio_tiny48.wav" };
+ char freq[7][5] = { "8", "11", "16", "22", "32", "44.1", "48" };
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ for (int i = 0; i < 7; i++) {
+ TEST_LOG("Playing file %s, in %s KHz \n", localFiles[i], freq[i]);
+ TEST_MUSTPASS(file->StartPlayingFileLocally(
+ 0, (output_path + localFiles[i]).c_str(),
+ false, kFileFormatWavFile, 1));
+ SLEEP(4500); // The file should not end
+ TEST_MUSTPASS(file->StopPlayingFileLocally(0));
+ }
+
+ // TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0)); // Should not work
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ AOK();
+ ANL();
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestHardware
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestHardware() {
+ PrepareTest("Hardware");
+
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoEHardware* hardware = _mgr.HardwarePtr();
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile((output_path +
+ "VoEHardware_trace.txt").c_str()));
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+ kTraceStateInfo |
+ kTraceWarning |
+ kTraceError |
+ kTraceCritical |
+ kTraceApiCall |
+ kTraceMemory |
+ kTraceInfo));
+#endif
+
+ // Set/GetAudioDeviceLayer
+ TEST(Set/GetAudioDeviceLayer);
+ ANL();
+ AudioLayers wantedLayer = kAudioPlatformDefault;
+ AudioLayers givenLayer;
+
+#if defined(_WIN32)
+ wantedLayer = kAudioWindowsCore;
+ hardware->SetAudioDeviceLayer(wantedLayer);
+ TEST_LOG("If you run on XP or below, CoreAudio "
+ "should not be able to set.\n");
+ TEST_LOG("If you run on Vista or above, CoreAudio "
+ "should be able to set.\n");
+ TEST_LOG("Verify that this is the case.\n");
+
+ TEST_MUSTPASS(voe_base_->Init());
+
+ TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+ if(givenLayer == kAudioWindowsCore)
+ {
+ TEST_LOG("CoreAudio was set\n");
+ }
+ else
+ {
+ TEST_LOG("CoreAudio was *not* set\n");
+ }
+
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ wantedLayer = kAudioWindowsWave;
+ TEST_MUSTPASS(hardware->SetAudioDeviceLayer(wantedLayer));
+ TEST_LOG("Wave audio should always be able to set.\n");
+
+ TEST_MUSTPASS(voe_base_->Init());
+
+ TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+ if(givenLayer == kAudioWindowsWave)
+ {
+ TEST_LOG("Wave audio was set\n");
+ }
+ else
+ {
+ TEST_LOG("Wave audio was not set\n");
+ }
+
+ TEST_MUSTPASS(voe_base_->Terminate());
+ // end _WIN32
+#elif defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+ wantedLayer = kAudioLinuxPulse;
+ TEST_MUSTPASS(hardware->SetAudioDeviceLayer(wantedLayer));
+ TEST_LOG("If you run on Linux with no/unsupported PA version, PulseAudio "
+ "7should not be able to set.\n");
+ TEST_LOG("If you run on Linux with supported PA version running, PulseAudio"
+ " should be able to set.\n");
+ TEST_LOG("Verify that this is the case.\n");
+
+ TEST_MUSTPASS(voe_base_->Init());
+
+ TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+ if(givenLayer == kAudioLinuxPulse)
+ {
+ TEST_LOG("\nPulseAudio was set\n");
+ }
+ else
+ {
+ TEST_LOG("\nPulseAudio was not set\n");
+ }
+
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ wantedLayer = kAudioLinuxAlsa;
+ TEST_MUSTPASS(hardware->SetAudioDeviceLayer(wantedLayer));
+ TEST_LOG("ALSA audio should always be able to set.\n");
+
+ TEST_MUSTPASS(voe_base_->Init());
+
+ TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+ if(givenLayer == kAudioLinuxAlsa)
+ {
+ TEST_LOG("\nALSA audio was set\n");
+ }
+ else
+ {
+ TEST_LOG("\nALSA audio was not set\n");
+ }
+
+ TEST_MUSTPASS(voe_base_->Terminate());
+#endif // defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+ // Invalid arguments should be ignored.
+ wantedLayer = (AudioLayers) 17;
+ TEST_MUSTPASS(hardware->SetAudioDeviceLayer(wantedLayer));
+ TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+ ASSERT_TRUE(givenLayer == kAudioPlatformDefault);
+ MARK();
+
+ // Basic usage
+ wantedLayer = kAudioPlatformDefault;
+ TEST_MUSTPASS(hardware->SetAudioDeviceLayer(wantedLayer));
+ TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+ TEST_MUSTPASS(givenLayer != wantedLayer);
+ MARK();
+
+ TEST_MUSTPASS(voe_base_->Init());
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ wantedLayer = kAudioPlatformDefault;
+ TEST_MUSTPASS(-1 != hardware->SetAudioDeviceLayer(wantedLayer));
+ TEST_MUSTPASS(VE_ALREADY_INITED != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(hardware->GetAudioDeviceLayer(givenLayer));
+ MARK();
+ switch (givenLayer) {
+ case kAudioPlatformDefault:
+ // already set above
+ break;
+ case kAudioWindowsCore:
+ TEST_LOG("\nRunning kAudioWindowsCore\n");
+ break;
+ case kAudioWindowsWave:
+ TEST_LOG("\nRunning kAudioWindowsWave\n");
+ break;
+ case kAudioLinuxAlsa:
+ TEST_LOG("\nRunning kAudioLinuxAlsa\n");
+ break;
+ case kAudioLinuxPulse:
+ TEST_LOG("\nRunning kAudioLinuxPulse\n");
+ break;
+ default:
+ TEST_LOG("\nERROR: Running unknown audio layer!!\n");
+ return -1;
+ }
+ ANL();
+
+#if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID)
+ // GetRecording/PlayoutDeviceStatus
+ TEST(Getrecording/PlayoutDeviceStatus);
+ ANL();
+ bool isRecAvailable = false;
+ bool isPlayAvailable = false;
+ TEST_MUSTPASS(hardware->GetRecordingDeviceStatus(isRecAvailable));
+ TEST_MUSTPASS(!isRecAvailable);
+ MARK();
+ TEST_MUSTPASS(hardware->GetPlayoutDeviceStatus(isPlayAvailable));
+ TEST_MUSTPASS(!isPlayAvailable);
+ MARK();
+
+ ANL();
+
+ int nRec = 0, nPlay = 0;
+ char devName[128];
+ char guidName[128];
+ int idx;
+
+ TEST_MUSTPASS(hardware->GetNumOfPlayoutDevices(nPlay));
+
+ // GetPlayoutDeviceName
+ TEST(GetPlayoutDeviceName);
+ ANL();
+ TEST_MUSTPASS(-1 != hardware->GetPlayoutDeviceName(nPlay, devName,
+ guidName));
+ TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(-1 != hardware->GetPlayoutDeviceName(-2, devName, guidName));
+ TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(-1 != hardware->GetPlayoutDeviceName(nPlay+1, devName,
+ guidName));
+ TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(-1 != hardware->GetPlayoutDeviceName(0, NULL, guidName));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(hardware->GetPlayoutDeviceName(0, devName, NULL));
+
+ // default tests
+ for (idx = 0; idx < nPlay; idx++) {
+ TEST_MUSTPASS(hardware->GetPlayoutDeviceName(idx, devName, guidName));
+ MARK();
+ TEST_MUSTPASS(hardware->SetPlayoutDevice(idx));
+ }
+
+ ANL();
+
+ TEST_MUSTPASS(hardware->GetNumOfRecordingDevices(nRec));
+
+ // GetRecordingDeviceName
+ TEST(GetRecordingDeviceName);
+ ANL();
+ TEST_MUSTPASS(-1 != hardware->GetRecordingDeviceName(nRec, devName,
+ guidName));
+ TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(-1 != hardware->GetRecordingDeviceName(-2, devName, guidName));
+ TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(-1 != hardware->GetRecordingDeviceName(nRec+1, devName,
+ guidName));
+ TEST_MUSTPASS(VE_CANNOT_RETRIEVE_DEVICE_NAME != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(-1 != hardware->GetRecordingDeviceName(0, NULL, guidName));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(hardware->GetRecordingDeviceName(0, devName, NULL));
+
+ // default tests
+ for (idx = 0; idx < nRec; idx++) {
+ TEST_MUSTPASS(hardware->GetRecordingDeviceName(idx, devName, guidName));
+ MARK();
+ TEST_MUSTPASS(hardware->SetRecordingDevice(idx));
+ }
+ ANL();
+
+ // // SetRecordingDevice
+ TEST(SetRecordingDevice);
+ ANL();
+ TEST_MUSTPASS(hardware->SetRecordingDevice(0));
+ MARK();
+ TEST_MUSTPASS(hardware->SetRecordingDevice(0, kStereoLeft));
+ MARK();
+ TEST_MUSTPASS(hardware->SetRecordingDevice(0, kStereoRight));
+ MARK();
+ ANL();
+
+ // SetPlayoutDevice
+ TEST(SetPlayoutDevice);
+ ANL();
+#if defined(_WIN32)
+ TEST_MUSTPASS(hardware->SetPlayoutDevice(-1)); MARK();
+#else
+ TEST_MUSTPASS(hardware->SetPlayoutDevice(0));
+ MARK();
+#endif
+ ANL();
+#endif // #if !defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID)
+#if defined(WEBRTC_IOS)
+ TEST(ResetSoundDevice); ANL();
+
+ for (int p=0; p<=60; p+=20)
+ {
+ TEST_LOG("Resetting sound device several times with pause %d ms\n", p);
+ for (int l=0; l<50; ++l)
+ {
+ TEST_MUSTPASS(hardware->ResetAudioDevice()); MARK();
+ SLEEP(p);
+ }
+ ANL();
+ }
+
+ TEST_LOG("Start streaming - verify the audio after each batch of resets \n");
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 8000, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0,8000));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(2000);
+
+ SLEEP(2000);
+ for (int p=0; p<=60; p+=20)
+ {
+ TEST_LOG("Resetting sound device several time with pause %d ms\n", p);
+ for (int l=0; l<20; ++l)
+ {
+ TEST_MUSTPASS(hardware->ResetAudioDevice()); MARK();
+ SLEEP(p);
+ }
+ ANL();
+ SLEEP(2000);
+ }
+
+ TEST_LOG("Stop streaming \n");
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+#endif // defined(WEBRTC_IOS))
+#ifdef WEBRTC_IOS
+ TEST_LOG("\nNOTE: Always run hardware tests also without extended tests "
+ "enabled,\nsince the extended tests are pre-streaming tests only.\n");
+#endif
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ ANL();
+ AOK();
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestNetEqStats
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestNetEqStats() {
+ PrepareTest("NetEqStats (!EMPTY!)");
+
+ AOK();
+ ANL();
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestNetwork
+//
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestNetwork() {
+ PrepareTest("Network");
+
+#ifdef WEBRTC_ANDROID
+ int sleepTime = 200;
+ int sleepTime2 = 250;
+#elif defined(WEBRTC_IOS) // WEBRTC_IOS needs more delay for getSourceInfo()
+ int sleepTime = 150;
+ int sleepTime2 = 200;
+#else
+ int sleepTime = 100;
+ int sleepTime2 = 200;
+#endif
+
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoEFile* file = _mgr.FilePtr();
+ VoENetwork* netw = _mgr.NetworkPtr();
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile((output_path +
+ "VoENetwork_trace.txt").c_str()));
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+ kTraceStateInfo |
+ kTraceWarning |
+ kTraceError |
+ kTraceCritical |
+ kTraceApiCall |
+ kTraceMemory |
+ kTraceInfo));
+#endif
+
+ TEST_MUSTPASS(voe_base_->Init());
+
+ // ------------------------------------------------------------------------
+ // >> GetLocalIP
+ //
+ // State: VE initialized, no existing channels
+ TEST(GetLocalIP);
+ ANL();
+
+#ifdef WEBRTC_IOS
+ // Should fail
+ TEST_MUSTPASS(!netw->GetLocalIP(NULL, 0)); MARK();
+ TEST_ERROR(VE_FUNC_NOT_SUPPORTED);
+
+ ANL();
+ printf("NOTE: Local IP must be set in source code (line %d) \n",
+ __LINE__ + 1);
+ const char* localIP = "192.168.1.4";
+
+#else
+ // Must be big enough so that we can print an IPv6 address.
+ char localIP[256] = {0};
+
+ // invalid parameter
+ TEST_MUSTPASS(!netw->GetLocalIP(NULL));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+
+ // default function calls (IPv4)
+ TEST_MUSTPASS(netw->GetLocalIP(localIP));
+ MARK();
+ TEST_LOG("[local IPv4: %s]\n", localIP);
+ TEST_MUSTPASS(netw->GetLocalIP(localIP));
+ MARK();
+
+#if !defined(WEBRTC_MAC) && !defined(WEBRTC_ANDROID)
+ // default function calls (IPv6)
+ TEST_MUSTPASS(netw->GetLocalIP(localIP, true));
+ MARK();
+ TEST_LOG("[local IPv6: %s]\n", localIP);
+ TEST_MUSTPASS(netw->GetLocalIP(localIP, true));
+ MARK();
+#endif
+
+ // one last call to ensure that local
+ TEST_MUSTPASS(netw->GetLocalIP(localIP));
+ MARK();
+#endif
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of GetLocalIP
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> GetSourceInfo
+ //
+ // - VE initialized
+ // - no existing channels
+ TEST(GetSourceInfo);
+ ANL();
+
+ int rtpPort(0);
+ int rtcpPort(0);
+ char ipaddr[64] = { 0 };
+ ExtendedTestTransport* ptrTransport(NULL);
+
+ // call without valid channel
+ TEST_MUSTPASS(!netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // NULL as input string
+ TEST_MUSTPASS(!netw->GetSourceInfo(0, rtpPort, rtcpPort, NULL));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+
+ // call when external transport is enabled
+ ptrTransport = new ExtendedTestTransport(netw);
+ TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+ TEST_MUSTPASS(!netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+ delete ptrTransport;
+
+ // call when external transport is disabled (no packet received yet)
+ TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 0);
+ TEST_MUSTPASS(rtcpPort != 0);
+ TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+ // send and receive packets with default settings for a while
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 8000));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 8000, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime2); // does not guarantee RTCP
+
+ // verify remote parameters (exclude RTCP)
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 8000);
+ TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+ // ensure that valid results are maintained after StopListen
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 8000);
+ TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+ // verify that results are maintained after new call to SetLocalReceiver
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 8000));
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 8000);
+ TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+ // STATE: not listening, not sending
+ // send and receive packets with other settings for a while
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 9005));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 9005, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+
+ // STATE: listening, sending
+
+ // verify new remote parameters
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 9005);
+ TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+ // restart sending to and from local IP
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 9005, kVoEDefault, localIP));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 9005, localIP));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+
+ // verify new remote parameters
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 9005);
+ TEST_MUSTPASS(strcmp(ipaddr, localIP) != 0); // should not be "127.0.0.1"
+
+ // use non-default source port in outgoing packets
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 9005));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 9005, "127.0.0.1", 9010));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+
+ // verify new remote parameters
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 9010);
+ TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+ // STATE: listening and sending using an extra local socket
+
+ // stop/start sending
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+
+ // verify that the unique source port is maintained for the extra socket
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 9010);
+ TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+ // set new source port for outgoing packets (9010 -> 9020)
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 9005, "127.0.0.1", 9020));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+#ifdef WEBRTC_IOS
+ SLEEP(500); // Need extra pause for some reason
+#endif
+
+ // verify that the unique source port is set for the new extra socket
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 9020);
+ // STATE: listening and sending using an extra local socket
+
+ // remove extra send socket and restart call again
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0)); // delete channel => destroys the
+ // extra socket
+ TEST_MUSTPASS(voe_base_->CreateChannel()); // new channel uses one socket only
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 8000)); // use new port as well
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 8000, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+
+ // verify that remote info is correct
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 8000);
+ TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+ // STATE: listening and sending using shared socket
+
+ // use non-default source port in outgoing packets to create extra send
+ // socket
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 7000));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 7000, "127.0.0.1", 7010));
+ // RTP src is 7010 => RTCP src = 7011
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+ // verify new remote parameters
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 7010);
+ TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+
+ // check RTCP port as well (should be 7010 + 1 = 7011)
+ Sleep(8000, true);
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 7010);
+ TEST_MUSTPASS(rtcpPort != 7011);
+ TEST_MUSTPASS(strcmp(ipaddr, "127.0.0.1") != 0);
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of GetSourceInfo
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> SetExternalTransport
+ //
+ // - VE initialized
+ // - no existing channels
+ // - no media
+ //
+ TEST(SetExternalTransport);
+ ANL();
+
+ ptrTransport = new ExtendedTestTransport(netw);
+
+ // call without valid channel
+ TEST_MUSTPASS(!netw->DeRegisterExternalTransport(0));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // different valid call combinations
+ TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+ MARK();
+ TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+ MARK();
+ TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+ MARK();
+ TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+ MARK();
+ TEST_MUSTPASS(!netw->RegisterExternalTransport(0, *ptrTransport));
+ MARK(); // must deregister first
+ TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+ MARK();
+
+ // STATE: external transport is disabled
+
+ // initialize sending and ensure that external transport can't be enabled
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 1234, "127.0.0.2"));
+ TEST_MUSTPASS(!netw->RegisterExternalTransport(0, *ptrTransport));
+ MARK();
+ TEST_ERROR(VE_SEND_SOCKETS_CONFLICT);
+
+ // restart channel to ensure that "initialized sender" state is cleared
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // initialize receiving and ensure that external transport can't be enabled
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 5678));
+ TEST_MUSTPASS(!netw->RegisterExternalTransport(0, *ptrTransport));
+ MARK();
+ TEST_ERROR(VE_RECEIVE_SOCKETS_CONFLICT);
+
+ // restart channel to ensure that "initialized receiver" state is cleared
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // enable external transport and verify that "emulated loopback" works
+ TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StartSend(0)); // should only start recording
+ TEST_MUSTPASS(!netw->RegisterExternalTransport(0, *ptrTransport));
+ MARK(); // should fail
+ TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+ MARK();
+ TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+ MARK();
+ Play(0, 2000, true, true); // play file as mic and verify loopback audio
+ TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+ MARK();
+
+ // STATE: external transport is disabled
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+ int testError = VE_FUNC_NOT_SUPPORTED;
+#else
+ int testError = VE_EXTERNAL_TRANSPORT_ENABLED;
+#endif
+
+ // check all APIs that should fail when external transport is enabled
+ int DSCP, priority, serviceType, overrideDSCP, nBytes(0);
+ bool useSetSockopt, enabled;
+ TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+ MARK();
+ TEST_MUSTPASS(!voe_base_->SetLocalReceiver(0, 12345));
+ TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+ TEST_MUSTPASS(!voe_base_->GetLocalReceiver(0, rtpPort, rtcpPort, ipaddr));
+ TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+ TEST_MUSTPASS(!voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+ TEST_MUSTPASS(!voe_base_->GetSendDestination(0, rtpPort, ipaddr, rtpPort,
+ rtcpPort));
+ TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+ TEST_MUSTPASS(!netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+ TEST_MUSTPASS(!netw->EnableIPv6(0))
+ TEST_ERROR(testError);
+ TEST_MUSTPASS(netw->IPv6IsEnabled(0) != false)
+ TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+ TEST_MUSTPASS(!netw->SetSourceFilter(0, 12345, 12346));
+ TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+ TEST_MUSTPASS(!netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+
+ // modified i VoE 3.4 (can be called also for external transport)
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+
+#if (!defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_MAC)) || \
+ defined(WEBRTC_EXTERNAL_TRANSPORT)
+ testError = VE_FUNC_NOT_SUPPORTED;
+#else
+ testError = VE_EXTERNAL_TRANSPORT_ENABLED;
+#endif
+ TEST_MUSTPASS(!netw->SetSendTOS(0, 0));
+ TEST_ERROR(testError);
+ TEST_MUSTPASS(!netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+ TEST_ERROR(testError);
+#if !defined(_WIN32) || defined(WEBRTC_EXTERNAL_TRANSPORT)
+ testError = VE_FUNC_NOT_SUPPORTED;
+#else
+ testError = VE_EXTERNAL_TRANSPORT_ENABLED;
+#endif
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, false, 0));
+ TEST_ERROR(testError);
+ TEST_MUSTPASS(!netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ TEST_ERROR(testError);
+ char dummy[1] = { 'a' };
+ TEST_MUSTPASS(!netw->SendUDPPacket(0, dummy, 1, nBytes));
+ TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+
+ // always disable external transport before deleting the Transport object;
+ // will lead to crash for RTCP transmission otherwise
+ TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+ MARK();
+ delete ptrTransport;
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of SetExternalTransport
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> EnableIPv6
+ //
+ // - VE initialized
+ // - no existing channels
+ // - no media
+ // - NOTE: set _ENABLE_IPV6_TESTS_ to include these tests
+ // - http://www.microsoft.com/resources/documentation/windows/xp/all/
+ // proddocs/en-us/sag_ip_v6_pro_rt_enable.mspx?mfr=true
+ // >> ipv6 install
+ // >> ipv6 [-v] if [IfIndex]
+ // >> ping6 ::1
+ // >> ping6 fe80::1
+
+#ifdef _ENABLE_IPV6_TESTS_
+
+ TEST(EnableIPv6); ANL();
+
+ // call without valid channel
+ TEST_MUSTPASS(!netw->EnableIPv6(0)); MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // call with enabled external transport
+ ptrTransport = new ExtendedTestTransport(netw);
+ TEST_MUSTPASS(netw->RegisterExternalTransport(0, *ptrTransport));
+ TEST_MUSTPASS(!netw->EnableIPv6(0)); MARK();
+ TEST_ERROR(VE_EXTERNAL_TRANSPORT_ENABLED);
+ TEST_MUSTPASS(netw->DeRegisterExternalTransport(0));
+ delete ptrTransport;
+
+ // Test "locking" to IPv4
+ TEST_MUSTPASS(netw->IPv6IsEnabled(0)); MARK(); // After this call we cannot
+ // enable IPv6
+ TEST_MUSTPASS(!netw->EnableIPv6(0)); MARK(); // Should fail
+
+ // Check that IPv6 address is invalid
+ TEST_MUSTPASS(!voe_base_->SetSendDestination(0, 8000, "::1")); MARK(); // fail
+
+ // New channel
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // valid default call
+ TEST_MUSTPASS(netw->EnableIPv6(0)); MARK();
+ TEST_MUSTPASS(netw->GetLocalIP(localIP)); MARK(); // should still read IPv4
+ TEST_LOG("[local IPv4: %s]", localIP);
+
+ // ensure that Ipv6 is enabled
+ TEST_MUSTPASS(netw->IPv6IsEnabled(0) != true);
+
+ // check that IPv4 address is invalid
+ TEST_MUSTPASS(!voe_base_->SetSendDestination(0, 8000, "127.0.0.1"));
+ TEST_ERROR(VE_INVALID_IP_ADDRESS);
+
+ // verify usage of IPv6 loopback address
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 8000));
+ // IPv6 loopback address is 0:0:0:0:0:0:0:1
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 8000, "::1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(!netw->EnableIPv6(0)); MARK(); // Should fail
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ Play(0, 2000, true, true);
+ ANL();
+
+ // Restart channel
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ TEST_MUSTPASS(netw->EnableIPv6(0)); MARK();
+ // ensure that Ipv6 is enabled
+ TEST_MUSTPASS(netw->IPv6IsEnabled(0) != true);
+
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 8000));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ // IPv6 loopback address is 0:0:0:0:0:0:0:1
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 8000, "::1"));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ file->StartPlayingFileAsMicrophone(0, _mgr.AudioFilename(), true,
+ true);
+ SLEEP(500); // ensure that we receieve some packets
+
+ // SetSourceFilter and GetSourceFilter
+ TEST(SetSourceFilter and GetSourceFilter for IPv6); ANL();
+ char sourceIp[64] =
+ { 0};
+ char filterIp[64] =
+ { 0};
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, sourceIp));
+ TEST_LOG("Source port: %d \n", rtpPort);
+ TEST_LOG("Source RTCP port: %d \n", rtcpPort);
+ TEST_LOG("Source IP: %s \n", sourceIp);
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+ TEST_LOG("Filter port RTP: %d \n", rtpPort);
+ TEST_LOG("Filter port RTCP: %d \n", rtcpPort);
+ TEST_LOG("Filter IP: %s \n", filterIp);
+ TEST_MUSTPASS(0 != rtpPort);
+ TEST_MUSTPASS(0 != rtcpPort);
+ TEST_MUSTPASS(filterIp[0] != '\0');
+ TEST_LOG("Set filter IP to %s => should hear audio\n", sourceIp);
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, sourceIp));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+ TEST_MUSTPASS(0 != rtpPort);
+ TEST_MUSTPASS(0 != rtcpPort);
+ TEST_MUSTPASS(_stricmp(filterIp, sourceIp));
+ SLEEP(1500);
+ TEST_LOG("Set filter IP to ::10:10:10 => should *not* hear audio\n");
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "::10:10:10"));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+ TEST_MUSTPASS(_stricmp(filterIp, "::10:10:10"));
+ SLEEP(1500);
+ TEST_LOG("Disable IP filter => should hear audio again\n");
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "::0"));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+ TEST_MUSTPASS(_stricmp(filterIp, "::"));
+ SLEEP(1500);
+ TEST_LOG("Set filter IP to ::10:10:10 => should *not* hear audio\n");
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "::10:10:10"));
+ SLEEP(1500);
+ TEST_LOG("Disable IP filter => should hear audio again\n");
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+ TEST_MUSTPASS(filterIp[0] != '\0');
+ SLEEP(1500);
+ TEST_LOG("Set filter IP to ::10:10:10 => should *not* hear audio\n");
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "::10:10:10"));
+ SLEEP(1500);
+ TEST_LOG("Disable IP filter => should hear audio again\n");
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "::"));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, filterIp));
+ TEST_MUSTPASS(_stricmp(filterIp, "::"));
+ SLEEP(1500);
+
+ file->StopPlayingFileAsMicrophone(0);
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+
+#endif // #ifdef _ENABLE_IPV6_TESTS_
+ // >> end of EnableIPv6
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> SetSourceFilter
+ //
+ // - VE initialized
+ // - no existing channels
+ // - no media
+ TEST(SetSourceFilter);
+ ANL();
+
+ // call without valid channel
+ TEST_MUSTPASS(!netw->SetSourceFilter(0, 12345));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // invalid parameters
+ TEST_MUSTPASS(!netw->SetSourceFilter(0, 65536));
+ MARK();
+ TEST_ERROR(VE_INVALID_PORT_NMBR);
+ TEST_MUSTPASS(!netw->SetSourceFilter(0, 12345, 65536));
+ MARK();
+ TEST_ERROR(VE_INVALID_PORT_NMBR);
+ TEST_MUSTPASS(!netw->SetSourceFilter(0, 12345, 12346, "300.300.300.300"));
+ MARK();
+ TEST_ERROR(VE_INVALID_IP_ADDRESS);
+
+ // STATE: RTP filter port is 12345, RTCP filter port is 12346
+
+ // disable all filters and ensure that media is received
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+ MARK();
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 2000, kVoEDefault, localIP));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 2000, localIP));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ TEST_MUSTPASS(rtpPort != 2000);
+ TEST_MUSTPASS(rtcpPort != 2001);
+ TEST_MUSTPASS(strcmp(ipaddr, localIP) != 0);
+
+ // clear states and restart loopback session
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0)); // clear source info state
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // set RTP filter to port 2002 and verify that source 2000 is blocked
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 2002, 0, NULL));;
+ MARK();
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 2000, kVoEDefault, localIP));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 2000, localIP));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ TEST_MUSTPASS(rtpPort != 0);
+ TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+ // ensure that received packets originates from 2002 and that they now pass
+ // the filter
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ // RTP source is 2002
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 2002, kVoEDefault, localIP));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 2002, localIP));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ TEST_MUSTPASS(rtpPort != 2002);
+ TEST_MUSTPASS(strcmp(ipaddr, localIP) != 0);
+
+ // clear states and restart loopback session
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0)); // clear source info state
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // set IP filter to local IP and verify that default loopback stream is
+ // blocked
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, localIP));;
+ MARK();
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 2000));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 2000, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ TEST_MUSTPASS(rtpPort != 0);
+ TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+ // ensure that received packets originates from the local IP and that they
+ // now pass the filter
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ // should pass the filter
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 2000, kVoEDefault, localIP));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 2000, localIP));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+ TEST_MUSTPASS(netw->GetSourceInfo(0, rtpPort, rtcpPort, ipaddr));
+ TEST_MUSTPASS(rtpPort != 2000);
+ TEST_MUSTPASS(strcmp(ipaddr, localIP) != 0);
+
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+
+ // STATE: no active media, IP filter is active
+
+ // disable all filters
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));;
+ MARK();
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ TEST_MUSTPASS(rtpPort != 0);
+ TEST_MUSTPASS(rtcpPort != 0);
+ TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of SetSourceFilter
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> GetSourceFilter
+ //
+ // - VE initialized
+ // - no existing channels
+ // - no media
+ TEST(GetSourceFilter);
+ ANL();
+
+ // call without valid channel
+ TEST_MUSTPASS(!netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // invalid input parameters
+ TEST_MUSTPASS(!netw->GetSourceFilter(0, rtpPort, rtcpPort, NULL));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+
+ // valid call without any filter set
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 0);
+ TEST_MUSTPASS(rtcpPort != 0);
+ TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+ // STATE: no active media and no enabled filters
+
+ // set different filters and verify that they "bite"
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 54321, 0, NULL));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 54321);
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtpPort != 0);
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 15425, NULL));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtcpPort != 15425);
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(rtcpPort != 0);
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "192.168.199.19"));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(strcmp(ipaddr, "192.168.199.19") != 0);
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, "0.0.0.0"));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(strcmp(ipaddr, "0.0.0.0") != 0);
+ TEST_MUSTPASS(netw->SetSourceFilter(0, 0, 0, NULL));
+ TEST_MUSTPASS(netw->GetSourceFilter(0, rtpPort, rtcpPort, ipaddr));
+ MARK();
+ TEST_MUSTPASS(strcmp(ipaddr, "") != 0);
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of GetSourceFilter
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> RegisterDeadOrAliveObserver
+ // >> DeRegisterDeadOrAliveObserver
+ //
+ // - VE initialized
+ // - no existing channels
+ // - no media
+ TEST(RegisterDeadOrAliveObserver);
+ ANL();
+ TEST(DeRegisterDeadOrAliveObserver);
+ ANL();
+
+ // call without valid channel
+ TEST_MUSTPASS(!netw->RegisterDeadOrAliveObserver(0, *this));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ TEST_MUSTPASS(netw->RegisterDeadOrAliveObserver(0, *this));
+ MARK();
+ TEST_MUSTPASS(!netw->RegisterDeadOrAliveObserver(0, *this));
+ MARK(); // already registered
+ TEST_ERROR(VE_INVALID_OPERATION);
+ TEST_MUSTPASS(netw->DeRegisterDeadOrAliveObserver(0));
+ MARK();
+ TEST_MUSTPASS(netw->DeRegisterDeadOrAliveObserver(0));
+ MARK(); // OK to do it again
+ TEST_MUSTPASS(netw->RegisterDeadOrAliveObserver(0, *this));
+ MARK();
+ TEST_MUSTPASS(netw->DeRegisterDeadOrAliveObserver(0));
+ MARK();
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+
+ // STATE: dead-or-alive observer is disabled
+
+ // >> end of RegisterDeadOrAliveObserver
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> SetPeriodicDeadOrAliveStatus
+ // >> GetPeriodicDeadOrAliveStatus
+ //
+ // - VE initialized
+ // - no existing channels
+ // - no media
+
+ // call without valid channel
+ TEST_MUSTPASS(!netw->SetPeriodicDeadOrAliveStatus(0, false));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // Invalid paramters
+ TEST_MUSTPASS(!netw->SetPeriodicDeadOrAliveStatus(0, true, 0));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetPeriodicDeadOrAliveStatus(0, true, 151));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetPeriodicDeadOrAliveStatus(1, true, 10));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ int sampleTime(0);
+
+ // Valid parameters
+ TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 1));
+ MARK();
+ TEST_MUSTPASS(netw->GetPeriodicDeadOrAliveStatus(0, enabled, sampleTime));
+ TEST_MUSTPASS(enabled != true);
+ TEST_MUSTPASS(sampleTime != 1);
+ TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 150));
+ MARK();
+ TEST_MUSTPASS(netw->GetPeriodicDeadOrAliveStatus(0, enabled, sampleTime));
+ TEST_MUSTPASS(enabled != true);
+ TEST_MUSTPASS(sampleTime != 150);
+ TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, false));
+ MARK();
+ TEST_MUSTPASS(netw->GetPeriodicDeadOrAliveStatus(0, enabled, sampleTime));
+ TEST_MUSTPASS(enabled != false);
+ TEST_MUSTPASS(sampleTime != 150); // ensure last set time isnt modified
+
+ StartMedia(0, 2000, true, true, true);
+
+ // STATE: full duplex media is active
+
+ // test the dead-or-alive mechanism
+ TEST_MUSTPASS(netw->RegisterDeadOrAliveObserver(0, *this));
+ MARK();
+ TEST_LOG("\nVerify that Alive callbacks are received (dT=2sec): ");
+ fflush(NULL);
+ TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 2));
+ SLEEP(6000);
+ TEST_LOG("\nChange dT to 1 second: ");
+ fflush(NULL);
+ TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 1));
+ SLEEP(6000);
+ TEST_LOG("\nDisable dead-or-alive callbacks: ");
+ fflush(NULL);
+ TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, false));
+ SLEEP(6000);
+ TEST_LOG("\nStop sending and enable callbacks again.\n");
+ TEST_LOG("Verify that Dead callbacks are received (dT=2sec): ");
+ fflush(NULL);
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, true, 2));
+ SLEEP(6000);
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_LOG("\nRestart sending.\n");
+ TEST_LOG("Verify that Alive callbacks are received again (dT=2sec): ");
+ fflush(NULL);
+ SLEEP(6000);
+ TEST_LOG("\nDisable dead-or-alive callbacks.");
+ fflush(NULL);
+ TEST_MUSTPASS(netw->SetPeriodicDeadOrAliveStatus(0, false));
+ TEST_MUSTPASS(netw->DeRegisterDeadOrAliveObserver(0));
+ MARK();
+
+ StopMedia(0);
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of SetPeriodicDeadOrAliveStatus
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> SetPacketTimeoutNotification
+ // >> GetPacketTimeoutNotification
+ //
+ // - VE initialized
+ // - no existing channels
+ // - no media
+ // - NOTE: dynamic tests are performed in standard test
+
+ int timeOut(0);
+
+ TEST(SetPacketTimeoutNotification);
+ ANL();
+ TEST(GetPacketTimeoutNotification);
+ ANL();
+
+ // call without existing valid channel
+ TEST_MUSTPASS(!netw->SetPacketTimeoutNotification(0, false));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // invalid function calls
+ TEST_MUSTPASS(!netw->SetPacketTimeoutNotification(0, true, 0));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetPacketTimeoutNotification(0, true, 151));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+
+ // valid function calls (no active media)
+ TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, true, 2));
+ MARK();
+ TEST_MUSTPASS(netw->GetPacketTimeoutNotification(0, enabled, timeOut));
+ MARK();
+ TEST_MUSTPASS(enabled != true);
+ TEST_MUSTPASS(timeOut != 2);
+ TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, false));
+ MARK();
+ TEST_MUSTPASS(netw->GetPacketTimeoutNotification(0, enabled, timeOut));
+ MARK();
+ TEST_MUSTPASS(enabled != false);
+ TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, true, 10));
+ MARK();
+ TEST_MUSTPASS(netw->GetPacketTimeoutNotification(0, enabled, timeOut));
+ MARK();
+ TEST_MUSTPASS(enabled != true);
+ TEST_MUSTPASS(timeOut != 10);
+ TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, true, 2));
+ MARK();
+ TEST_MUSTPASS(netw->GetPacketTimeoutNotification(0, enabled, timeOut));
+ MARK();
+ TEST_MUSTPASS(enabled != true);
+ TEST_MUSTPASS(timeOut != 2);
+ TEST_MUSTPASS(netw->SetPacketTimeoutNotification(0, false));
+ MARK();
+ TEST_MUSTPASS(netw->GetPacketTimeoutNotification(0, enabled, timeOut));
+ MARK();
+ TEST_MUSTPASS(enabled != false);
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ ANL();
+ AOK();
+ ANL();
+ ANL();
+
+ // >> end of SetPacketTimeoutNotification
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> SendUDPPacket
+ //
+ // - VE initialized
+ // - no existing channels
+ // - no media
+
+
+ // >> end of SendUDPPacket
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> SetSendTOS
+ //
+ // - VE initialized
+ // - no existing channels
+ // - no media
+ TEST(SetSendTOS);
+ ANL();
+#if defined(_WIN32) || defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+
+ // call without existing valid channel
+
+ TEST_MUSTPASS(!netw->SetSendTOS(0, 0)); MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // trivial invalid function calls
+ TEST_MUSTPASS(!netw->SetSendTOS(0, -1)); MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendTOS(0, 64)); MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendTOS(0, 1, -2)); MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendTOS(0, 1, 8)); MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendTOS(0, 1)); MARK();
+ TEST_ERROR(VE_SOCKET_ERROR); // must create sockets first
+
+#ifdef _WIN32
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 3000));
+
+ // enable ToS using SetSockopt (should work without local binding)
+ TEST_MUSTPASS(netw->SetSendTOS(0, 1, -1, true)); MARK();
+ TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt)); MARK();
+ TEST_MUSTPASS(DSCP != 1);
+ TEST_MUSTPASS(priority != 0);
+ TEST_MUSTPASS(useSetSockopt != true);
+
+ // try to disable SetSockopt while ToS is enabled (should fail)
+ TEST_MUSTPASS(!netw->SetSendTOS(0, 1, -1, false)); MARK();
+ TEST_ERROR(VE_TOS_INVALID); // must disable ToS before enabling SetSockopt
+
+ // disable ToS to be able to stop using SetSockopt
+ TEST_MUSTPASS(netw->SetSendTOS(0, 0, -1, true)); MARK(); // disable ToS
+ TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt)); MARK();
+ TEST_MUSTPASS(DSCP != 0);
+ TEST_MUSTPASS(priority != 0);
+ TEST_MUSTPASS(useSetSockopt != true);
+
+ // to use the "non-SetSockopt" method, local binding is required,
+ // trying without it should fail
+ TEST_MUSTPASS(!netw->SetSendTOS(0, 1, -1, false)); MARK();
+ TEST_ERROR(VE_TOS_ERROR); // must bind to local IP first
+
+ // bind to local IP and try again (should work this time)
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345, kVoEDefault, localIP));
+ TEST_LOG("\nThis test needs to be run as administrator\n");
+ TEST_MUSTPASS(netw->SetSendTOS(0, 1, -1, false)); MARK();
+ TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt)); MARK();
+ TEST_MUSTPASS(DSCP != 1);
+ TEST_MUSTPASS(priority != 0);
+ TEST_MUSTPASS(useSetSockopt != false);
+
+ // STATE: binded to local IP, local port is 12345 and DSCP is 1 (not using
+ // SetSockopt)
+
+ // verify loopback audio with the current settings
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, localIP));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ Play(0, 2000, true, true); // file should be played out here...
+
+#ifdef _SEND_TO_REMOTE_IP_
+ // Send to remote destination and verify the DSCP using Wireshark.
+ // Use filter ip.src == "RemoteIP".
+ TEST_LOG("\nUse Wireshark and verify a correctly received DSCP at the "
+ "remote side!\n");
+ TEST_LOG("Sending approx. 5 packets to %s:%d for each DSCP below:\n",
+ RemoteIP, RemotePort);
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, RemotePort, RemoteIP));
+ TEST_LOG(" DSCP is set to 0x%02x\n", 1);
+ SLEEP(100);
+
+ // Change the DSCP while sending is active and verify on remote side.
+ TEST_MUSTPASS(netw->SetSendTOS(0, 2));
+ TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+ TEST_LOG(" DSCP is set to 0x%02x\n", DSCP);
+ SLEEP(100);
+
+ // Change the DSCP while sending is active and verify on remote side.
+ TEST_MUSTPASS(netw->SetSendTOS(0, 63));
+ TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+ TEST_LOG(" DSCP is set to 0x%02x\n", DSCP);
+ SLEEP(100);
+
+ // stop and resume sending
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+ TEST_LOG(" DSCP is set to 0x%02x\n", DSCP);
+ SLEEP(100);
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(netw->SetSendTOS(0, 0));
+#endif // _SEND_TO_REMOTE_IP_
+ // Windows priority tests (priority cannot be set using setsockopt on Win)
+ TEST_LOG("Testing priority\n");
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, localIP));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(!netw->SetSendTOS(0, 0, 3, true)); // Should fail
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(netw->SetSendTOS(0, 0, 3, false));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ Play(0, 2000, true, true); // file should be played out here...
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(netw->SetSendTOS(0, 1, 3, false));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ Play(0, 2000, true, true); // file should be played out here...
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+#endif // _WIN32
+ // STATE: no media, disabled ToS, no defined receiver
+
+ // Repeat tests above but using setsockopt() this time.
+ // Binding to local IP should not be required.
+
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345, kVoEDefault));
+ TEST_MUSTPASS(netw->SetSendTOS(0, 10, -1, true)); MARK();
+ TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt)); MARK();
+ TEST_MUSTPASS(DSCP != 10);
+ TEST_MUSTPASS(priority != 0);
+ TEST_MUSTPASS(useSetSockopt != true);
+
+ // STATE: *not* binded to local IP, local port is 12345 and DSCP is 10
+ // (using SetSockopt)
+
+ // verify loopback audio with the current settings
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ Play(0, 2000, true, true); // file should be played out here...
+
+#ifdef _SEND_TO_REMOTE_IP_
+ // Send to remote destination and verify the DSCP using Wireshark.
+ // Use filter ip.src == "RemoteIP".
+ TEST_LOG("\nUse Wireshark and verify a correctly received DSCP at the"
+ " remote side!\n");
+ TEST_LOG("Sending approx. 5 packets to %s:%d for each DSCP below:\n",
+ RemoteIP, RemotePort);
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, RemotePort, RemoteIP));
+ TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+ TEST_LOG(" DSCP is set to 0x%02x (setsockopt)\n", DSCP);
+ SLEEP(100);
+
+ // Change the DSCP while sending is active and verify on remote side.
+ TEST_MUSTPASS(netw->SetSendTOS(0, 20, -1, true)); // use setsockopt()
+ TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+ TEST_LOG(" DSCP is set to 0x%02x (setsockopt)\n", DSCP);
+ SLEEP(100);
+
+ // Change the DSCP while sending is active and verify on remote side.
+ TEST_MUSTPASS(netw->SetSendTOS(0, 61, -1, true)); // use setsockopt()
+ TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+ TEST_LOG(" DSCP is set to 0x%02x (setsockopt)\n", DSCP);
+ SLEEP(100);
+
+ // stop and resume sending
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(netw->GetSendTOS(0, DSCP, priority, useSetSockopt));
+ TEST_LOG(" DSCP is set to 0x%02x (setsockopt)\n", DSCP);
+ SLEEP(100);
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(netw->SetSendTOS(0, 0, -1, true));
+#endif // _SEND_TO_REMOTE_IP_
+#if defined(WEBRTC_LINUX)
+ // Linux priority tests (using setsockopt)
+ TEST_LOG("Testing priority\n");
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, localIP));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(netw->SetSendTOS(0, 0, 3, true));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ Play(0, 2000, true, true); // file should be played out here...
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(netw->SetSendTOS(0, 1, 3, true));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ Play(0, 2000, true, true); // file should be played out here...
+#endif // #if defined(WEBRTC_LINUX)
+#if !defined(_WIN32) && !defined(WEBRTC_LINUX)
+ // Fail tests for other than Wind and Linux
+ TEST_MUSTPASS(!netw->SetSendTOS(0, 0, 3, false)); // Should fail
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+#endif // #if !defined(_WIN32) && !defined(WEBRTC_LINUX)
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ ANL(); AOK(); ANL(); ANL();
+
+ // END #if defined(_WIN32) || defined(WEBRTC_MAC) || defined(WEBRTC_LINUX)
+#else
+ TEST_LOG("Skipping ToS tests - _WIN32, LINUX, MAC is not defined or "
+ "WEBRTC_ANDROID is defined");
+#endif
+
+ // >> end of SetSendTOS
+ // ------------------------------------------------------------------------
+
+ // ------------------------------------------------------------------------
+ // >> SetSendGQoS (Windows only)
+ //
+ // - VE initialized
+ // - no existing channels
+ // - no media
+ //
+ // From qos.h:
+ //
+ // #define SERVICETYPE_NOTRAFFIC 0x00000000
+ // #define SERVICETYPE_BESTEFFORT 0x00000001 (*)
+ // #define SERVICETYPE_CONTROLLEDLOAD 0x00000002 (*)
+ // #define SERVICETYPE_GUARANTEED 0x00000003 (*)
+ // #define SERVICETYPE_NETWORK_UNAVAILABLE 0x00000004
+ // #define SERVICETYPE_GENERAL_INFORMATION 0x00000005
+ // #define SERVICETYPE_NOCHANGE 0x00000006
+ // #define SERVICETYPE_NONCONFORMING 0x00000009
+ // #define SERVICETYPE_NETWORK_CONTROL 0x0000000A
+ // #define SERVICETYPE_QUALITATIVE 0x0000000D (*)
+ //
+ // #define SERVICE_BESTEFFORT 0x80010000
+ // #define SERVICE_CONTROLLEDLOAD 0x80020000
+ // #define SERVICE_GUARANTEED 0x80040000
+ // #define SERVICE_QUALITATIVE 0x80200000
+ //
+ // (*) supported in WEBRTC VoE
+ TEST(SetSendGQoS);
+ ANL();
+#ifdef _WIN32
+
+ // call without existing valid channel
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, false, 0)); MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+
+ // supported service type but no sockets
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT)); MARK();
+ TEST_ERROR(VE_SOCKETS_NOT_INITED);
+
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+
+ // supported service type but sender is not initialized
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT)); MARK();
+ TEST_ERROR(VE_DESTINATION_NOT_INITED);
+
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+
+ // invalid service types
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_NOTRAFFIC)); MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_NETWORK_UNAVAILABLE));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_GENERAL_INFORMATION));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_NOCHANGE)); MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_NONCONFORMING));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_NETWORK_CONTROL));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICE_BESTEFFORT)); MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICE_CONTROLLEDLOAD)); MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICE_GUARANTEED)); MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICE_QUALITATIVE)); MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+
+ // Is ToS enabled here?
+
+ // Settings which don't require binding to local IP
+
+ // set SERVICETYPE_BESTEFFORT
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT)); MARK();
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ MARK();
+ TEST_MUSTPASS(enabled != true);
+ TEST_MUSTPASS(serviceType != SERVICETYPE_BESTEFFORT);
+ TEST_MUSTPASS(overrideDSCP != false);
+
+ // set SERVICETYPE_CONTROLLEDLOAD
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_CONTROLLEDLOAD));
+ MARK();
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ MARK();
+ TEST_MUSTPASS(enabled != true);
+ TEST_MUSTPASS(serviceType != SERVICETYPE_CONTROLLEDLOAD);
+ TEST_MUSTPASS(overrideDSCP != false);
+
+ // set SERVICETYPE_GUARANTEED
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_GUARANTEED)); MARK();
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ MARK();
+ TEST_MUSTPASS(enabled != true);
+ TEST_MUSTPASS(serviceType != SERVICETYPE_GUARANTEED);
+ TEST_MUSTPASS(overrideDSCP != false);
+
+ // set SERVICETYPE_QUALITATIVE
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_QUALITATIVE)); MARK();
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ MARK();
+ TEST_MUSTPASS(enabled != true);
+ TEST_MUSTPASS(serviceType != SERVICETYPE_QUALITATIVE);
+ TEST_MUSTPASS(overrideDSCP != false);
+
+ // disable GQoS
+ TEST_MUSTPASS(netw->SetSendGQoS(0, false, 0)); MARK();
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ MARK();
+ TEST_MUSTPASS(enabled != false);
+ TEST_MUSTPASS(serviceType != SERVICETYPE_QUALITATIVE);
+ TEST_MUSTPASS(overrideDSCP != false);
+
+ // STATE: diabled QGoS, sockets exists, sending side is initialized, no media
+
+ // Loopback tests using the four different GQoS settings
+
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT)); MARK();
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ ANL();
+ TEST_LOG("[SERVICETYPE_BESTEFFORT]");
+ Play(0, 2000, true, true); // file should be played out here...
+
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_CONTROLLEDLOAD)); MARK();
+ ANL();
+ TEST_LOG("[SERVICETYPE_CONTROLLEDLOAD]");
+ Play(0, 2000, true, true); // file should be played out here...
+
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_GUARANTEED)); MARK();
+ ANL();
+ TEST_LOG("[SERVICETYPE_GUARANTEED]");
+ Play(0, 2000, true, true); // file should be played out here...
+
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_QUALITATIVE)); MARK();
+ ANL();
+ TEST_LOG("[SERVICETYPE_QUALITATIVE]");
+ Play(0, 2000, true, true); // file should be played out here...
+
+#ifdef _SEND_TO_REMOTE_IP_
+ // Send to remote destination and verify the DSCP mapping using Wireshark.
+ // Use filter ip.src == "RemoteIP".
+
+ // Modify the send destination on the fly
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, RemotePort, RemoteIP));
+
+ TEST_LOG("\nUse Wireshark and verify a correctly received DSCP mapping at"
+ " the remote side!\n");
+ TEST_LOG("Sending approx. 5 packets to %s:%d for each GQoS setting below:\n",
+ RemoteIP, RemotePort);
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT));
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ TEST_LOG(" serviceType is set to SERVICETYPE_BESTEFFORT (0x%02x), should "
+ "be mapped to DSCP = 0x00\n", serviceType);
+ SLEEP(100);
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_CONTROLLEDLOAD));
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ TEST_LOG(" serviceType is set to SERVICETYPE_CONTROLLEDLOAD (0x%02x), "
+ "should be mapped to DSCP = 0x18\n", serviceType);
+ SLEEP(100);
+ TEST_MUSTPASS(netw->SetSendGQoS(0, false, 0));
+ TEST_LOG(" QoS is disabled, should give DSCP = 0x%02x\n", 0);
+ SLEEP(100);
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_GUARANTEED));
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ TEST_LOG(" serviceType is set to SERVICETYPE_GUARANTEED (0x%02x), should "
+ "be mapped to DSCP = 0x28\n", serviceType);
+ SLEEP(100);
+ TEST_MUSTPASS(netw->SetSendGQoS(0, false, 0));
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_QUALITATIVE));
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ TEST_LOG(" serviceType is set to SERVICETYPE_QUALITATIVE (0x%02x), should"
+ " be mapped to DSCP = 0x00\n", serviceType);
+ SLEEP(100);
+#endif // _SEND_TO_REMOTE_IP_
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+
+ // STATE: sockets exists, sending side is initialized, no media
+
+ // Repeat test above but this time using overrideDSCP.
+
+ // Some initial loopack tests.
+ // NOTE - override DSCP requres binding to local IP.
+
+ // should not work since QoS is enabled
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 3));
+ MARK();
+ TEST_ERROR(VE_TOS_GQOS_CONFLICT);
+
+ // disble QoS and try to override again (should fail again since local
+ // binding is not done yet)
+ TEST_MUSTPASS(netw->SetSendGQoS(0, false, 0));
+ TEST_MUSTPASS(!netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 3));
+ MARK();
+ TEST_ERROR(VE_GQOS_ERROR);
+
+ // make proper settings and try again (should work this time)
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345, kVoEDefault, localIP));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, localIP));
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 3));
+ MARK();
+
+ // Now, let's try some loopback tests using override DSCP
+
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ ANL();
+ TEST_LOG("[overrideDSCP=3]");
+ Play(0, 2000, true, true); // file should be played out here...
+
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 17));
+ MARK();
+ ANL();
+ TEST_LOG("[overrideDSCP=17]");
+ Play(0, 2000, true, true); // file should be played out here...
+
+ // And finally, send to remote side as well to verify that the new mapping
+ // works as it should.
+
+#ifdef _SEND_TO_REMOTE_IP_
+ // Modify the send destination on the fly
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, RemotePort, RemoteIP));
+
+ TEST_LOG("\nUse Wireshark and verify a correctly received DSCP mapping at"
+ " the remote side!\n");
+ TEST_LOG("Sending approx. 5 packets to %s:%d for each GQoS setting below:\n",
+ RemoteIP, RemotePort);
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 18));
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ TEST_LOG(" serviceType is set to SERVICETYPE_BESTEFFORT, should be "
+ "overrided to DSCP = 0x%02x\n", overrideDSCP);
+ SLEEP(100);
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 62));
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ TEST_LOG(" serviceType is set to SERVICETYPE_BESTEFFORT, should be "
+ "overrided to DSCP = 0x%02x\n", overrideDSCP);
+ SLEEP(100);
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 32));
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ TEST_LOG(" serviceType is set to SERVICETYPE_BESTEFFORT, should be "
+ "overrided to DSCP = 0x%02x\n", overrideDSCP);
+ SLEEP(100);
+ TEST_MUSTPASS(netw->SetSendGQoS(0, true, SERVICETYPE_BESTEFFORT, 1));
+ TEST_MUSTPASS(netw->GetSendGQoS(0, enabled, serviceType, overrideDSCP));
+ TEST_LOG(" serviceType is set to SERVICETYPE_BESTEFFORT, should be "
+ "overrided to DSCP = 0x%02x\n", overrideDSCP);
+ SLEEP(100);
+ TEST_MUSTPASS(netw->SetSendGQoS(0, false, 0));
+ TEST_LOG(" QoS is disabled, should give DSCP = 0x%02x\n", 0);
+ SLEEP(100);
+#endif // _SEND_TO_REMOTE_IP_
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ ANL(); AOK(); ANL(); ANL();
+
+#else
+ TEST_LOG("Skipping GQoS tests - _WIN32 is not defined");
+#endif // #ifdef _WIN32
+ // >> end of SetSendGQoS
+ // ------------------------------------------------------------------------
+
+ if (file) {
+ file->StopPlayingFileAsMicrophone(0);
+ }
+ voe_base_->StopSend(0);
+ voe_base_->StopPlayout(0);
+ voe_base_->StopReceive(0);
+ voe_base_->DeleteChannel(0);
+ voe_base_->Terminate();
+
+ ANL();
+ AOK();
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestRTP_RTCP
+// ----------------------------------------------------------------------------
+
+// Used to validate packets during the RTP audio level indication test.
+class RTPAudioTransport: public Transport {
+public:
+
+ RTPAudioTransport() :
+ mute_(false) {
+ }
+
+ virtual ~RTPAudioTransport() {
+ }
+
+ void set_mute(bool mute) {
+ mute_ = mute;
+ }
+ bool mute() const {
+ return mute_;
+ }
+
+ // TODO(andrew): use proper error checks here rather than asserts.
+ virtual int SendPacket(int channel, const void* data, int length) {
+ const uint8_t* packet = static_cast<const uint8_t*> (data);
+
+ // Extension bit.
+ assert(packet[0] & 0x10);
+ int index = 12; // Assume standard RTP header.
+ // Header extension ID
+ assert(packet[index++] == 0xBE);
+ assert(packet[index++] == 0xDE);
+ // Header extension length
+ assert(packet[index++] == 0x00);
+ assert(packet[index++] == 0x01);
+
+ // User-defined ID.
+ assert(((packet[index] & 0xf0) >> 4) == 1);
+ // Length
+ assert((packet[index++] & 0x0f) == 0);
+
+ int vad = packet[index] >> 7;
+ int level = packet[index] & 0x7f;
+ if (channel == 0) {
+ printf("%d -%d\n", vad, level);
+ } else if (channel == 1) {
+ printf(" %d -%d\n", vad, level);
+ } else {
+ assert(false);
+ }
+
+ if (mute_) {
+ assert(vad == 0);
+ assert(level == 127);
+ } else {
+ assert(vad == 0 || vad == 1);
+ assert(level >= 0 && level <= 127);
+ }
+
+ return 0;
+ }
+
+ virtual int SendRTCPPacket(int /*channel*/, const void* /*data*/,
+ int /*length*/) {
+ return 0;
+ }
+
+private:
+ bool mute_;
+};
+
+int VoEExtendedTest::TestRTP_RTCP() {
+ PrepareTest("RTP_RTCP");
+
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoEFile* file = _mgr.FilePtr();
+ VoERTP_RTCP* rtp_rtcp = _mgr.RTP_RTCPPtr();
+ VoENetwork* network = _mgr.NetworkPtr();
+ VoEVolumeControl* volume = _mgr.VolumeControlPtr();
+ VoECodec* codec = _mgr.CodecPtr();
+
+ XRTPObserver rtpObserver;
+
+#ifdef WEBRTC_ANDROID
+ int sleepTime = 200;
+#else
+ int sleepTime = 100;
+#endif
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile((output_path +
+ "VoERTP_RTCP_trace.txt").c_str()));
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+ kTraceStateInfo |
+ kTraceWarning |
+ kTraceError |
+ kTraceCritical |
+ kTraceApiCall |
+ kTraceMemory |
+ kTraceInfo));
+#endif
+
+ TEST_MUSTPASS(voe_base_->Init());
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+
+ ///////////////////////////
+ // Actual test starts here
+
+ // ------------------------------------------------------------------------
+ // >> Set/GetRTPAudioLevelIndicationStatus
+ TEST(SetRTPAudioLevelIndicationStatus);
+ ANL();
+ TEST(GetRTPAudioLevelIndicationStatus);
+
+ // test invalid input parameters
+ TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, true, 0));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, true, 15));
+ MARK();
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, false, 15));
+ MARK();
+ TEST_MUSTPASS(-1 != rtp_rtcp->SetRTPAudioLevelIndicationStatus(1, true, 5));
+ MARK();
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+
+ // test complete valid input range [1,14]
+ bool audioLevelEnabled(false);
+ unsigned char ID(0);
+ for (int id = 1; id < 15; id++) {
+ TEST_MUSTPASS(rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, true, id));
+ MARK();
+ TEST_MUSTPASS(rtp_rtcp->GetRTPAudioLevelIndicationStatus(
+ 0, audioLevelEnabled, ID));
+ MARK();
+ TEST_MUSTPASS(audioLevelEnabled != true);
+ TEST_MUSTPASS(rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, false, id));
+ MARK();
+ TEST_MUSTPASS(rtp_rtcp->GetRTPAudioLevelIndicationStatus(
+ 0, audioLevelEnabled, ID));
+ MARK();
+ TEST_MUSTPASS(audioLevelEnabled != false);
+ TEST_MUSTPASS(ID != id);
+ }
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+
+ RTPAudioTransport rtpAudioTransport;
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+ TEST_MUSTPASS(network->RegisterExternalTransport(0, rtpAudioTransport));
+ TEST_MUSTPASS(rtp_rtcp->SetRTPAudioLevelIndicationStatus(0, true));
+ TEST_MUSTPASS(codec->SetVADStatus(0, true));
+
+ printf("\n\nReceving muted packets (expect VAD = 0, Level = -127)...\n");
+ printf("VAD Level [dbFS]\n");
+ SLEEP(2000);
+ rtpAudioTransport.set_mute(true);
+ TEST_MUSTPASS(volume->SetInputMute(0, true));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(5000);
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ rtpAudioTransport.set_mute(false);
+ TEST_MUSTPASS(volume->SetInputMute(0, false));
+
+ printf("\nReceiving packets from mic (should respond to mic level)...\n");
+ printf("VAD Level [dbFS]\n");
+ SLEEP(2000);
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(5000);
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+
+ printf("\nReceiving packets from file (expect mostly VAD = 1)...\n");
+ printf("VAD Level [dbFS]\n");
+ SLEEP(2000);
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, _mgr.AudioFilename(),
+ true, true));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(5000);
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+
+ printf("\nMuted and mic on independent channels...\n");
+ printf("Muted Mic\n");
+ SLEEP(2000);
+ ASSERT_TRUE(1 == voe_base_->CreateChannel());
+ TEST_MUSTPASS(network->RegisterExternalTransport(1, rtpAudioTransport));
+ TEST_MUSTPASS(rtp_rtcp->SetRTPAudioLevelIndicationStatus(1, true));
+ TEST_MUSTPASS(codec->SetVADStatus(1, true));
+ TEST_MUSTPASS(volume->SetInputMute(0, true));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartSend(1));
+ SLEEP(5000);
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopSend(1));
+
+ TEST_MUSTPASS(network->DeRegisterExternalTransport(0));
+ TEST_MUSTPASS(network->DeRegisterExternalTransport(1));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(1));
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+
+ MARK();
+ ANL();
+
+ // ------------------------------------------------------------------------
+ // >> InsertExtraRTPPacket
+
+ int i(0);
+
+ TEST(SetLocalSSRC);
+ TEST_MUSTPASS(!rtp_rtcp->SetLocalSSRC(0, 5678));
+ MARK();
+ TEST_MUSTPASS(VE_ALREADY_SENDING != voe_base_->LastError());
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(rtp_rtcp->SetLocalSSRC(0, 5678)); // force send SSRC to 5678
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ MARK();
+ ANL();
+
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, _mgr.AudioFilename(),
+ true, true));
+
+ // ------------------------------------------------------------------------
+ // >> InsertExtraRTPPacket
+ TEST(InsertExtraRTPPacket);
+ ANL();
+
+ const char payloadData[8] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H' };
+
+ TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(-1, 0, false,
+ payloadData, 8));
+ MARK(); // invalid channel
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+ TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0, -1, false,
+ payloadData, 8));
+ MARK(); // invalid payload type
+ TEST_ERROR(VE_INVALID_PLTYPE);
+ TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0, 128, false,
+ payloadData, 8));
+ MARK(); // invalid payload type
+ TEST_ERROR(VE_INVALID_PLTYPE);
+ TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0, 99, false,
+ NULL, 8));
+ MARK(); // invalid pointer
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0, 99, false,
+ payloadData, 1500-28+1));
+ MARK(); // invalid size
+ TEST_ERROR(VE_INVALID_ARGUMENT);
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(-1 != rtp_rtcp->InsertExtraRTPPacket(0, 99, false,
+ payloadData, 8));
+ MARK(); // not sending
+ TEST_ERROR(VE_NOT_SENDING);
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, _mgr.AudioFilename(),
+ true, true));
+
+ SLEEP(1000);
+ for (int p = 0; p < 128; p++) {
+ TEST_MUSTPASS(rtp_rtcp->InsertExtraRTPPacket(0, p, false,
+ payloadData, 8));
+ MARK();
+ TEST_MUSTPASS(rtp_rtcp->InsertExtraRTPPacket(0, p, true,
+ payloadData, 8));
+ MARK();
+ }
+
+ // Ensure we have sent all extra packets before we move forward to avoid
+ //incorrect error code
+ SLEEP(1000);
+
+ ANL();
+
+ // ------------------------------------------------------------------------
+ // >> RTP dump APIs
+ TEST(Start/StopRtpDump);
+ ANL();
+ TEST(Start/RTPDumpIsActive);
+
+ TEST_MUSTPASS(-1 != rtp_rtcp->RTPDumpIsActive(-1, kRtpIncoming));
+ MARK(); // invalid channel
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+ TEST_MUSTPASS(false != rtp_rtcp->RTPDumpIsActive(0, kRtpIncoming));
+ MARK(); // should be off by default
+ TEST_MUSTPASS(false != rtp_rtcp->RTPDumpIsActive(0, kRtpOutgoing));
+ MARK(); // should be off by default
+
+ TEST_MUSTPASS(-1 != rtp_rtcp->StartRTPDump(-1, NULL));
+ MARK(); // invalid channel
+ TEST_ERROR(VE_CHANNEL_NOT_VALID);
+ TEST_MUSTPASS(-1 != rtp_rtcp->StartRTPDump(0, NULL));
+ MARK(); // invalid file name
+ TEST_ERROR(VE_BAD_FILE);
+
+ // Create two RTP dump files:
+
+ // - dump_in_1sec.rtp <=> ~1 sec recording of input side
+ // - dump_in_2sec.rtp <=> ~2 sec recording of output side
+ //
+ TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0));
+ MARK();
+ TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0, kRtpIncoming));
+ MARK();
+ TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0, kRtpOutgoing));
+ MARK();
+ std::string output_path = webrtc::test::OutputPath();
+ TEST_MUSTPASS(rtp_rtcp->StartRTPDump(
+ 0, (output_path + "dump_in_1sec.rtp").c_str(), kRtpIncoming));
+ MARK();
+ TEST_MUSTPASS(rtp_rtcp->StartRTPDump(
+ 0, (output_path + "dump_out_2sec.rtp").c_str(), kRtpOutgoing));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0, kRtpIncoming));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0, kRtpOutgoing));
+ MARK();
+
+ // Start/Stop tests:
+ //
+ // - only one file (called dump_in_200ms.rtp) should exist after this test
+ //
+ for (i = 0; i < 10; i++) {
+ TEST_MUSTPASS(rtp_rtcp->StartRTPDump(0,
+ (output_path + "dump_in_200ms.rtp").c_str()));
+ MARK();
+ SLEEP(200);
+ TEST_MUSTPASS(rtp_rtcp->StopRTPDump(0));
+ MARK();
+ }
+
+ // >> end of RTP dump APIs
+ // ------------------------------------------------------------------------
+ ANL();
+
+ TEST(GetRTCPStatus);
+ bool enabled;
+ TEST_MUSTPASS(!rtp_rtcp->GetRTCPStatus(-1, enabled));
+ MARK();
+ TEST_MUSTPASS(rtp_rtcp->GetRTCPStatus(0, enabled));
+ MARK(); // should be on by default
+ TEST_MUSTPASS(enabled != true);
+ ANL();
+
+ TEST(SetRTCPStatus);
+ TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, false));
+ MARK();
+ TEST_MUSTPASS(rtp_rtcp->GetRTCPStatus(0, enabled));
+ TEST_MUSTPASS(enabled != false);
+ MARK();
+ SLEEP(2000);
+ TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, true));
+ MARK();
+ TEST_MUSTPASS(rtp_rtcp->GetRTCPStatus(0, enabled));
+ TEST_MUSTPASS(enabled != true);
+ MARK();
+ SLEEP(6000); // Make sure we get an RTCP packet
+ ANL();
+
+ TEST(CNAME);
+ TEST_MUSTPASS(!rtp_rtcp->SetRTCP_CNAME(0, NULL));
+ MARK();
+ TEST_MUSTPASS(VE_RTP_RTCP_MODULE_ERROR != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!rtp_rtcp->GetRemoteRTCP_CNAME(0, NULL));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ ANL();
+
+ TEST(GetRemoteSSRC);
+ unsigned int ssrc(0);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteSSRC(0, ssrc));
+ MARK();
+ TEST_MUSTPASS(ssrc != 5678);
+ ANL();
+
+ TEST(GetRemoteCSRC); // only trivial tests added
+ unsigned int csrcs[2];
+ int n(0);
+ TEST_MUSTPASS(!rtp_rtcp->GetRemoteCSRCs(1, csrcs));
+ MARK();
+ n = rtp_rtcp->GetRemoteCSRCs(0, csrcs);
+ MARK();
+ TEST_MUSTPASS(n != 0); // should be empty
+ ANL();
+
+ TEST(SetRTPObserver);
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(rtp_rtcp->RegisterRTPObserver(0, rtpObserver));
+ TEST_MUSTPASS(rtp_rtcp->DeRegisterRTPObserver(0));
+ TEST_MUSTPASS(rtp_rtcp->RegisterRTPObserver(0, rtpObserver));
+ TEST_MUSTPASS(rtp_rtcp->SetLocalSSRC(0, 7777)); // force send SSRC to 7777
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(sleepTime);
+ // verify that the new SSRC has been detected by the observer
+ TEST_MUSTPASS(rtpObserver._SSRC != 7777);
+ TEST_MUSTPASS(rtp_rtcp->DeRegisterRTPObserver(0));
+ ANL();
+
+ // Make fresh restart (ensures that SSRC is randomized)
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+
+ SLEEP(100);
+
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, _mgr.AudioFilename(),
+ true, true));
+
+ SLEEP(8000);
+
+ TEST(GetRemoteRTCPData);
+ // Statistics based on received RTCP reports (i.e. statistics on the remote
+ // side sent to us).
+ unsigned int NTPHigh(0), NTPLow(0), timestamp(0), playoutTimestamp(0),
+ jitter(0);
+ unsigned short fractionLost(0);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCPData(0, NTPHigh, NTPLow,
+ timestamp, playoutTimestamp));
+ TEST_LOG("\n NTPHigh = %u \n NTPLow = %u \n timestamp = %u \n "
+ " playoutTimestamp = %u \n jitter = %u \n fractionLost = %hu \n",
+ NTPHigh, NTPLow, timestamp, playoutTimestamp, jitter, fractionLost);
+
+ unsigned int NTPHigh2(0), NTPLow2(0), timestamp2(0);
+ unsigned int playoutTimestamp2(0), jitter2(0);
+ unsigned short fractionLost2(0);
+
+ TEST_LOG("take a new sample and ensure that the playout timestamp is "
+ "maintained");
+ SLEEP(100);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCPData(0, NTPHigh2, NTPLow2, timestamp2,
+ playoutTimestamp2, &jitter2,
+ &fractionLost2));
+ TEST_LOG("\n NTPHigh = %u \n NTPLow = %u \n timestamp = %u \n "
+ " playoutTimestamp = %u \n jitter = %u \n fractionLost = %hu \n",
+ NTPHigh2, NTPLow2, timestamp2, playoutTimestamp2, jitter2, fractionLost2);
+ TEST_MUSTPASS(playoutTimestamp != playoutTimestamp2);
+
+ TEST_LOG("wait for 8 seconds and ensure that the RTCP statistics is"
+ " updated...");
+ SLEEP(8000);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCPData(0, NTPHigh2, NTPLow2,
+ timestamp2, playoutTimestamp2,
+ &jitter2, &fractionLost2));
+ TEST_LOG("\n NTPHigh = %u \n NTPLow = %u \n timestamp = %u \n "
+ " playoutTimestamp = %u \n jitter = %u \n fractionLost = %hu \n",
+ NTPHigh2, NTPLow2, timestamp2, playoutTimestamp2, jitter2, fractionLost2);
+ TEST_MUSTPASS((NTPHigh == NTPHigh2) && (NTPLow == NTPLow2));
+ TEST_MUSTPASS(timestamp == timestamp2);
+ TEST_MUSTPASS(playoutTimestamp == playoutTimestamp2);
+
+#ifdef WEBRTC_CODEC_RED
+ //The following test is related to defect 4985 and 4986
+ TEST_LOG("Turn FEC and VAD on and wait for 4 seconds and ensure that "
+ "the jitter is still small...");
+ CodecInst cinst;
+#if (!defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID))
+ cinst.pltype = 104;
+ strcpy(cinst.plname, "isac");
+ cinst.plfreq = 32000;
+ cinst.pacsize = 960;
+ cinst.channels = 1;
+ cinst.rate = 45000;
+#else
+ cinst.pltype = 119;
+ strcpy(cinst.plname, "isaclc");
+ cinst.plfreq = 16000;
+ cinst.pacsize = 320;
+ cinst.channels = 1;
+ cinst.rate = 40000;
+#endif
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+ TEST_MUSTPASS(codec->SetSendCodec(0, cinst));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(rtp_rtcp->SetFECStatus(0, true, -1));
+ MARK();
+ TEST_MUSTPASS(codec->SetVADStatus(0,true));
+ SLEEP(4000);
+ TEST_MUSTPASS(rtp_rtcp->GetRemoteRTCPData(0, NTPHigh2, NTPLow2, timestamp2,
+ playoutTimestamp2, &jitter2,
+ &fractionLost2));
+ TEST_LOG("\n NTPHigh = %u \n NTPLow = %u \n timestamp = %u \n "
+ " playoutTimestamp = %u \n jitter = %u \n fractionLost = %hu \n",
+ NTPHigh2, NTPLow2, timestamp2, playoutTimestamp2, jitter2, fractionLost2);
+ TEST_MUSTPASS(jitter2 > 1000)
+ TEST_MUSTPASS(rtp_rtcp->SetFECStatus(0, false));
+ MARK();
+ //4985 and 4986 end
+#endif // #ifdef WEBRTC_CODEC_RED
+ TEST(GetRTPStatistics);
+ ANL();
+ // Statistics summarized on local side based on received RTP packets.
+ CallStatistics stats;
+ // Call GetRTPStatistics over a longer period than 7.5 seconds
+ // (=dT RTCP transmissions).
+ unsigned int averageJitterMs, maxJitterMs, discardedPackets;
+ SLEEP(1000);
+ for (i = 0; i < 8; i++) {
+ TEST_MUSTPASS(rtp_rtcp->GetRTPStatistics(0, averageJitterMs,
+ maxJitterMs,
+ discardedPackets));
+ TEST_LOG(" %i) averageJitterMs = %u \n maxJitterMs = %u \n "
+ " discardedPackets = %u \n", i, averageJitterMs, maxJitterMs,
+ discardedPackets);
+ SLEEP(1000);
+ }
+
+ TEST(RTCPStatistics #1);
+ ANL();
+ unsigned int packetsSent(0);
+ unsigned int packetsReceived(0);
+ for (i = 0; i < 8; i++)
+ {
+ TEST_MUSTPASS(rtp_rtcp->GetRTCPStatistics(0, stats));
+ TEST_LOG(" %i) fractionLost = %hu \n cumulativeLost = %u \n "
+ " extendedMax = %u \n jitterSamples = %u \n rttMs = %d \n",
+ i, stats.fractionLost, stats.cumulativeLost,
+ stats.extendedMax, stats.jitterSamples, stats.rttMs);
+ TEST_LOG( " bytesSent = %d \n packetsSent = %d \n "
+ " bytesReceived = %d \n packetsReceived = %d \n",
+ stats.bytesSent, stats.packetsSent, stats.bytesReceived,
+ stats.packetsReceived);
+ if (i > 0)
+ {
+ TEST_LOG(" diff sent packets : %u (~50)\n",
+ stats.packetsSent - packetsSent);
+ TEST_LOG(" diff received packets: %u (~50)\n",
+ stats.packetsReceived - packetsReceived);
+ }
+ packetsSent = stats.packetsSent;
+ packetsReceived = stats.packetsReceived;
+ SLEEP(1000);
+ }
+
+ TEST(RTCPStatistics #2);
+ ANL();
+ TEST_LOG("restart sending and ensure that the statistics is reset");
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ SLEEP(50); // ensures approx. two received packets
+ TEST_MUSTPASS(rtp_rtcp->GetRTCPStatistics(0, stats));
+ TEST_LOG("\n fractionLost = %hu \n cumulativeLost = %u \n "
+ " extendedMax = %u \n jitterSamples = %u \n rttMs = %d \n",
+ stats.fractionLost, stats.cumulativeLost,
+ stats.extendedMax, stats.jitterSamples, stats.rttMs);
+ TEST_LOG( " bytesSent = %d \n packetsSent = %d \n "
+ " bytesReceived = %d \n packetsReceived = %d \n",
+ stats.bytesSent, stats.packetsSent, stats.bytesReceived,
+ stats.packetsReceived);
+
+ TEST(RTCPStatistics #3);
+ ANL();
+ TEST_LOG("disable RTCP and verify that statistics is not corrupt");
+ TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, false));
+ SLEEP(250);
+ TEST_MUSTPASS(rtp_rtcp->GetRTCPStatistics(0, stats));
+ TEST_LOG("\n fractionLost = %hu \n cumulativeLost = %u \n "
+ " extendedMax = %u \n jitterSamples = %u \n rttMs = %d \n",
+ stats.fractionLost, stats.cumulativeLost,
+ stats.extendedMax, stats.jitterSamples, stats.rttMs);
+ TEST_LOG(" bytesSent = %d \n packetsSent = %d \n "
+ "bytesReceived = %d \n packetsReceived = %d \n",
+ stats.bytesSent, stats.packetsSent,
+ stats.bytesReceived, stats.packetsReceived);
+ TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, true));
+
+ TEST(RTCPStatistics #4);
+ ANL();
+ TEST_LOG("restart receiving and check RX statistics");
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ SLEEP(50); // ensures approx. two received packets
+ TEST_MUSTPASS(rtp_rtcp->GetRTCPStatistics(0, stats));
+ TEST_LOG("\n fractionLost = %hu \n cumulativeLost = %u \n "
+ " extendedMax = %u \n jitterSamples = %u \n rttMs = %d \n",
+ stats.fractionLost, stats.cumulativeLost,
+ stats.extendedMax, stats.jitterSamples,
+ stats.rttMs);
+ TEST_LOG(" bytesSent = %d \n packetsSent = %d \n "
+ " bytesReceived = %d \n packetsReceived = %d \n",
+ stats.bytesSent, stats.packetsSent,
+ stats.bytesReceived, stats.packetsReceived);
+
+ TEST(SendApplicationDefinedRTCPPacket);
+ // just do some fail tests here
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ // should fail since sending is off
+ TEST_MUSTPASS(!rtp_rtcp->SendApplicationDefinedRTCPPacket(
+ 0, 0, 0, "abcdabcdabcdabcdabcdabcdabcdabcd", 32));
+ MARK();
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_MUSTPASS(rtp_rtcp->SendApplicationDefinedRTCPPacket(
+ 0, 0, 0, "abcdabcdabcdabcdabcdabcdabcdabcd", 32));
+ MARK();
+ TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, false));
+ // should fail since RTCP is off
+ TEST_MUSTPASS(!rtp_rtcp->SendApplicationDefinedRTCPPacket(
+ 0, 0, 0, "abcdabcdabcdabcdabcdabcdabcdabcd", 32));
+ MARK();
+ TEST_MUSTPASS(rtp_rtcp->SetRTCPStatus(0, true));
+ TEST_MUSTPASS(rtp_rtcp->SendApplicationDefinedRTCPPacket(
+ 0, 0, 0, "abcdabcdabcdabcdabcdabcdabcdabcd", 32));
+ MARK();
+ // invalid data length
+ TEST_MUSTPASS(!rtp_rtcp->SendApplicationDefinedRTCPPacket(
+ 0, 0, 0, "abcdabcdabcdabcdabcdabcdabcdabc", 31));
+ MARK();
+ // invalid data vector
+ TEST_MUSTPASS(!rtp_rtcp->SendApplicationDefinedRTCPPacket(0, 0, 0, NULL, 0));
+ MARK();
+ ANL();
+
+#ifdef WEBRTC_CODEC_RED
+ TEST(SetFECStatus);
+ ANL();
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ cinst.pltype = 126;
+ strcpy(cinst.plname, "red");
+ cinst.plfreq = 8000;
+ cinst.pacsize = 0;
+ cinst.channels = 1;
+ cinst.rate = 0;
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+#if (!defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID))
+ cinst.pltype = 104;
+ strcpy(cinst.plname, "isac");
+ cinst.plfreq = 32000;
+ cinst.pacsize = 960;
+ cinst.channels = 1;
+ cinst.rate = 45000;
+#else
+ cinst.pltype = 119;
+ strcpy(cinst.plname, "isaclc");
+ cinst.plfreq = 16000;
+ cinst.pacsize = 320;
+ cinst.channels = 1;
+ cinst.rate = 40000;
+#endif
+ // We have to re-register the audio codec payload type as stopReceive will
+ // clean the database
+ TEST_MUSTPASS(codec->SetRecPayloadType(0, cinst));
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 8000));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 8000, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ TEST_LOG("Start playing a file as microphone again \n");
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, _mgr.AudioFilename(),
+ true, true));
+ TEST_MUSTPASS(rtp_rtcp->SetFECStatus(0, true, 126));
+ MARK();
+ TEST_LOG("Should sound OK with FEC enabled\n");
+ SLEEP(4000);
+ TEST_MUSTPASS(rtp_rtcp->SetFECStatus(0, false));
+ MARK();
+#endif // #ifdef WEBRTC_CODEC_RED
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ ANL();
+ AOK();
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestVideoSync
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestVideoSync()
+{
+ PrepareTest("VideoSync");
+
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoEVideoSync* vsync = _mgr.VideoSyncPtr();
+
+ // check if this interface is supported
+ if (!vsync)
+ {
+ TEST_LOG("VoEVideoSync is not supported!");
+ return -1;
+ }
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile((output_path +
+ "VoEVideoSync_trace.txt").c_str()));
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+ kTraceStateInfo |
+ kTraceWarning |
+ kTraceError |
+ kTraceCritical |
+ kTraceApiCall |
+ kTraceMemory |
+ kTraceInfo));
+#endif
+
+ TEST_MUSTPASS(voe_base_->Init());
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+
+ ///////////////////////////
+ // Actual test starts here
+
+ TEST(SetInitTimestamp);
+ ANL();
+ TEST_MUSTPASS(!vsync->SetInitTimestamp(0, 12345));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(vsync->SetInitTimestamp(0, 12345));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ MARK();
+ SLEEP(1000);
+ AOK();
+ ANL();
+
+ TEST(SetInitSequenceNumber);
+ ANL();
+ TEST_MUSTPASS(!vsync->SetInitSequenceNumber(0, 123));
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ MARK();
+ SLEEP(1000);
+ TEST_MUSTPASS(vsync->SetInitSequenceNumber(0, 123));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+ MARK();
+ SLEEP(1000);
+ AOK();
+ ANL();
+
+ unsigned int timeStamp;
+ TEST(GetPlayoutTimestamp);
+ ANL();
+ TEST_MUSTPASS(vsync->GetPlayoutTimestamp(0, timeStamp));
+ TEST_LOG("GetPlayoutTimestamp: %u", timeStamp);
+ SLEEP(1000);
+ TEST_MUSTPASS(vsync->GetPlayoutTimestamp(0, timeStamp));
+ TEST_LOG(" %u", timeStamp);
+ SLEEP(1000);
+ TEST_MUSTPASS(vsync->GetPlayoutTimestamp(0, timeStamp));
+ TEST_LOG(" %u\n", timeStamp);
+ AOK();
+ ANL();
+
+ TEST(SetMinimumPlayoutDelay);
+ ANL();
+ TEST_MUSTPASS(!vsync->SetMinimumPlayoutDelay(0, -1));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+ TEST_MUSTPASS(!vsync->SetMinimumPlayoutDelay(0, 5000));
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ MARK();
+
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ AOK();
+ ANL();
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest::TestVolumeControl
+// ----------------------------------------------------------------------------
+
+int VoEExtendedTest::TestVolumeControl()
+{
+ PrepareTest("TestVolumeControl");
+
+ VoEBase* voe_base_ = _mgr.BasePtr();
+ VoEVolumeControl* volume = _mgr.VolumeControlPtr();
+#ifdef _TEST_FILE_
+ VoEFile* file = _mgr.FilePtr();
+#endif
+#ifdef _TEST_HARDWARE_
+ VoEHardware* hardware = _mgr.HardwarePtr();
+#endif
+
+#ifdef _USE_EXTENDED_TRACE_
+ TEST_MUSTPASS(VoiceEngine::SetTraceFile(
+ (output_path + "VoEVolumeControl_trace.txt").c_str()));
+ TEST_MUSTPASS(VoiceEngine::SetTraceFilter(kTraceStateInfo |
+ kTraceStateInfo |
+ kTraceWarning |
+ kTraceError |
+ kTraceCritical |
+ kTraceApiCall |
+ kTraceMemory |
+ kTraceInfo));
+#endif
+
+ TEST_MUSTPASS(voe_base_->Init());
+ TEST_MUSTPASS(voe_base_->CreateChannel());
+#if (defined _TEST_HARDWARE_ && (!defined(WEBRTC_IOS)))
+#if defined(_WIN32)
+ TEST_MUSTPASS(hardware->SetRecordingDevice(-1));
+ TEST_MUSTPASS(hardware->SetPlayoutDevice(-1));
+#else
+ TEST_MUSTPASS(hardware->SetRecordingDevice(0));
+ TEST_MUSTPASS(hardware->SetPlayoutDevice(0));
+#endif
+#endif
+ TEST_MUSTPASS(voe_base_->SetLocalReceiver(0, 12345));
+ TEST_MUSTPASS(voe_base_->SetSendDestination(0, 12345, "127.0.0.1"));
+ TEST_MUSTPASS(voe_base_->StartReceive(0));
+ TEST_MUSTPASS(voe_base_->StartPlayout(0));
+ TEST_MUSTPASS(voe_base_->StartSend(0));
+#ifdef _TEST_FILE_
+ TEST_MUSTPASS(file->StartPlayingFileAsMicrophone(0, _mgr.AudioFilename(),
+ true, true));
+#endif
+
+ ////////////////////////////
+ // Actual test starts here
+
+#if !defined(WEBRTC_IOS)
+ TEST(SetSpeakerVolume);
+ ANL();
+ TEST_MUSTPASS(-1 != volume->SetSpeakerVolume(256));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ ANL();
+#endif // #if !defined(WEBRTC_IOS)
+
+#if (!defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID))
+ TEST(SetMicVolume); ANL();
+ TEST_MUSTPASS(-1 != volume->SetMicVolume(256)); MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ ANL();
+#endif // #if (!defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID))
+
+#if !defined(WEBRTC_IOS)
+ TEST(SetChannelOutputVolumeScaling);
+ ANL();
+ TEST_MUSTPASS(-1 != volume->SetChannelOutputVolumeScaling(0, (float)-0.1));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(-1 != volume->SetChannelOutputVolumeScaling(0, (float)10.1));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ ANL();
+#endif // #if !defined(WEBRTC_IOS)
+#if (!defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID))
+ TEST(SetOutputVolumePan);
+ ANL();
+ TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(-1, (float)-0.1,
+ (float)1.0));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(-1, (float)1.1,
+ (float)1.0));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(-1, (float)1.0,
+ (float)-0.1));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(-1, (float)1.0,
+ (float)1.1));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ ANL();
+
+ TEST(SetChannelOutputVolumePan);
+ ANL();
+ TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(0, (float)-0.1,
+ (float)1.0));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(0, (float)1.1,
+ (float)1.0));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(0, (float)1.0,
+ (float)-0.1));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ TEST_MUSTPASS(-1 != volume->SetOutputVolumePan(0, (float)1.0,
+ (float)1.1));
+ MARK();
+ TEST_MUSTPASS(VE_INVALID_ARGUMENT != voe_base_->LastError());
+ ANL();
+#endif // #if (!defined(WEBRTC_IOS) && !defined(WEBRTC_ANDROID))
+#ifdef _TEST_FILE_
+ TEST_MUSTPASS(file->StopPlayingFileAsMicrophone(0));
+#endif
+ TEST_MUSTPASS(voe_base_->StopSend(0));
+ TEST_MUSTPASS(voe_base_->StopPlayout(0));
+ TEST_MUSTPASS(voe_base_->StopReceive(0));
+ TEST_MUSTPASS(voe_base_->DeleteChannel(0));
+ TEST_MUSTPASS(voe_base_->Terminate());
+
+ AOK();
+ ANL();
+ return 0;
+}
+
+} // namespace voetest
diff --git a/voice_engine/test/auto_test/voe_extended_test.h b/voice_engine/test/auto_test/voe_extended_test.h
new file mode 100644
index 0000000..c685d88
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_extended_test.h
@@ -0,0 +1,462 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTENDED_TEST_H
+#define WEBRTC_VOICE_ENGINE_VOE_EXTENDED_TEST_H
+
+#include "voe_standard_test.h"
+#include "modules/audio_device/include/audio_device.h"
+
+namespace voetest {
+
+class VoETestManager;
+
+// ----------------------------------------------------------------------------
+// AudioDeviceModule
+//
+// Implementation of the ADM to be used as external ADM in VoiceEngine.
+// This implementation is only a mock class, i.e., it does not provide
+// any real audio support.
+// ----------------------------------------------------------------------------
+
+class AudioDeviceModuleImpl : public AudioDeviceModule {
+ public:
+ // Factory methods
+ static AudioDeviceModuleImpl* Create();
+ static bool Destroy(AudioDeviceModuleImpl* adm);
+
+ // Helper methods which allows us to get some handy information about
+ // this mock implementation.
+ int32_t ReferenceCounter() const {
+ return _ref_count;
+ }
+
+ // RefCountedModule implementation (mocks default implementation)
+ virtual int32_t AddRef();
+ virtual int32_t Release();
+
+ // Module implementation
+ virtual int32_t Version(char* version,
+ uint32_t& remaining_buffer_in_bytes,
+ uint32_t& position) const {
+ return 0;
+ }
+ virtual int32_t ChangeUniqueId(const int32_t id) {
+ return 0;
+ }
+ virtual int32_t TimeUntilNextProcess() {
+ return -1;
+ }
+ virtual int32_t Process() {
+ return 0;
+ }
+
+ // AudioDeviceModule implementation
+ virtual int32_t ActiveAudioLayer(AudioLayer* audioLayer) const {
+ return 0;
+ }
+
+ virtual ErrorCode LastError() const {
+ return static_cast<ErrorCode> (0);
+ }
+ virtual int32_t RegisterEventObserver(AudioDeviceObserver* eventCallback) {
+ return 0;
+ }
+
+ virtual int32_t RegisterAudioCallback(AudioTransport* audioCallback) {
+ return 0;
+ }
+
+ virtual int32_t Init() {
+ return 0;
+ }
+ virtual int32_t Terminate() {
+ return 0;
+ }
+ virtual bool Initialized() const {
+ return true;
+ }
+
+ virtual int16_t PlayoutDevices() {
+ return -1;
+ }
+ virtual int16_t RecordingDevices() {
+ return -1;
+ }
+ virtual int32_t PlayoutDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ return -1;
+ }
+ virtual int32_t RecordingDeviceName(uint16_t index,
+ char name[kAdmMaxDeviceNameSize],
+ char guid[kAdmMaxGuidSize]) {
+ return -1;
+ }
+
+ virtual int32_t SetPlayoutDevice(uint16_t index) {
+ return 0;
+ }
+ virtual int32_t SetPlayoutDevice(WindowsDeviceType device) {
+ return 0;
+ }
+ virtual int32_t SetRecordingDevice(uint16_t index) {
+ return 0;
+ }
+ virtual int32_t SetRecordingDevice(WindowsDeviceType device) {
+ return 0;
+ }
+
+ virtual int32_t PlayoutIsAvailable(bool* available) {
+ *available = true;
+ return 0;
+ }
+ virtual int32_t InitPlayout() {
+ return 0;
+ }
+ virtual bool PlayoutIsInitialized() const {
+ return true;
+ }
+ virtual int32_t RecordingIsAvailable(bool* available) {
+ *available = true;
+ return 0;
+ }
+ virtual int32_t InitRecording() {
+ return 0;
+ }
+ virtual bool RecordingIsInitialized() const {
+ return true;
+ }
+
+ virtual int32_t StartPlayout() {
+ return 0;
+ }
+ virtual int32_t StopPlayout() {
+ return 0;
+ }
+ virtual bool Playing() const {
+ return true;
+ }
+ virtual int32_t StartRecording() {
+ return 0;
+ }
+ virtual int32_t StopRecording() {
+ return 0;
+ }
+ virtual bool Recording() const {
+ return true;
+ }
+
+ virtual int32_t SetAGC(bool enable) {
+ return -1;
+ }
+ virtual bool AGC() const {
+ return false;
+ }
+
+ virtual int32_t SetWaveOutVolume(uint16_t volumeLeft,
+ uint16_t volumeRight) {
+ return -1;
+ }
+ virtual int32_t WaveOutVolume(uint16_t* volumeLeft,
+ uint16_t* volumeRight) const {
+ return -1;
+ }
+
+ virtual int32_t SpeakerIsAvailable(bool* available) {
+ *available = true;
+ return 0;
+ }
+ virtual int32_t InitSpeaker() {
+ return 0;
+ }
+ virtual bool SpeakerIsInitialized() const {
+ return true;
+ }
+ virtual int32_t MicrophoneIsAvailable(bool* available) {
+ *available = true;
+ return 0;
+ }
+ virtual int32_t InitMicrophone() {
+ return 0;
+ }
+ virtual bool MicrophoneIsInitialized() const {
+ return true;
+ }
+
+ virtual int32_t SpeakerVolumeIsAvailable(bool* available) {
+ return -1;
+ }
+ virtual int32_t SetSpeakerVolume(uint32_t volume) {
+ return -1;
+ }
+ virtual int32_t SpeakerVolume(uint32_t* volume) const {
+ return -1;
+ }
+ virtual int32_t MaxSpeakerVolume(uint32_t* maxVolume) const {
+ return -1;
+ }
+ virtual int32_t MinSpeakerVolume(uint32_t* minVolume) const {
+ return -1;
+ }
+ virtual int32_t SpeakerVolumeStepSize(uint16_t* stepSize) const {
+ return -1;
+ }
+
+ virtual int32_t MicrophoneVolumeIsAvailable(bool* available) {
+ return -1;
+ }
+ virtual int32_t SetMicrophoneVolume(uint32_t volume) {
+ return -1;
+ }
+ virtual int32_t MicrophoneVolume(uint32_t* volume) const {
+ return -1;
+ }
+ virtual int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const {
+ return -1;
+ }
+ virtual int32_t MinMicrophoneVolume(uint32_t* minVolume) const {
+ return -1;
+ }
+ virtual int32_t MicrophoneVolumeStepSize(uint16_t* stepSize) const {
+ return -1;
+ }
+
+ virtual int32_t SpeakerMuteIsAvailable(bool* available) {
+ return -1;
+ }
+ virtual int32_t SetSpeakerMute(bool enable) {
+ return -1;
+ }
+ virtual int32_t SpeakerMute(bool* enabled) const {
+ return -1;
+ }
+
+ virtual int32_t MicrophoneMuteIsAvailable(bool* available) {
+ return -1;
+ }
+ virtual int32_t SetMicrophoneMute(bool enable) {
+ return -1;
+ }
+ virtual int32_t MicrophoneMute(bool* enabled) const {
+ return -1;
+ }
+
+ virtual int32_t MicrophoneBoostIsAvailable(bool* available) {
+ return -1;
+ }
+ virtual int32_t SetMicrophoneBoost(bool enable) {
+ return -1;
+ }
+ virtual int32_t MicrophoneBoost(bool* enabled) const {
+ return -1;
+ }
+
+ virtual int32_t StereoPlayoutIsAvailable(bool* available) const {
+ return -1;
+ }
+ virtual int32_t SetStereoPlayout(bool enable) {
+ return -1;
+ }
+ virtual int32_t StereoPlayout(bool* enabled) const {
+ return -1;
+ }
+ virtual int32_t StereoRecordingIsAvailable(bool* available) const {
+ return -1;
+ }
+ virtual int32_t SetStereoRecording(bool enable) {
+ return -1;
+ }
+ virtual int32_t StereoRecording(bool* enabled) const {
+ return -1;
+ }
+ virtual int32_t SetRecordingChannel(const ChannelType channel) {
+ return -1;
+ }
+ virtual int32_t RecordingChannel(ChannelType* channel) const {
+ return -1;
+ }
+
+ virtual int32_t SetPlayoutBuffer(const BufferType type, uint16_t sizeMS = 0) {
+ return -1;
+ }
+ virtual int32_t PlayoutBuffer(BufferType* type, uint16_t* sizeMS) const {
+ return -1;
+ }
+ virtual int32_t PlayoutDelay(uint16_t* delayMS) const {
+ return -1;
+ }
+ virtual int32_t RecordingDelay(uint16_t* delayMS) const {
+ return -1;
+ }
+
+ virtual int32_t CPULoad(uint16_t* load) const {
+ return -1;
+ }
+
+ virtual int32_t StartRawOutputFileRecording(
+ const char pcmFileNameUTF8[kAdmMaxFileNameSize]) {
+ return -1;
+ }
+ virtual int32_t StopRawOutputFileRecording() {
+ return -1;
+ }
+ virtual int32_t StartRawInputFileRecording(
+ const char pcmFileNameUTF8[kAdmMaxFileNameSize]) {
+ return -1;
+ }
+ virtual int32_t StopRawInputFileRecording() {
+ return -1;
+ }
+
+ virtual int32_t SetRecordingSampleRate(const uint32_t samplesPerSec) {
+ return -1;
+ }
+ virtual int32_t RecordingSampleRate(uint32_t* samplesPerSec) const {
+ return -1;
+ }
+ virtual int32_t SetPlayoutSampleRate(const uint32_t samplesPerSec) {
+ return -1;
+ }
+ virtual int32_t PlayoutSampleRate(uint32_t* samplesPerSec) const {
+ return -1;
+ }
+
+ virtual int32_t ResetAudioDevice() {
+ return -1;
+ }
+ virtual int32_t SetLoudspeakerStatus(bool enable) {
+ return -1;
+ }
+ virtual int32_t GetLoudspeakerStatus(bool* enabled) const {
+ return -1;
+ }
+
+ protected:
+ AudioDeviceModuleImpl();
+ ~AudioDeviceModuleImpl();
+
+ private:
+ volatile int32_t _ref_count;
+};
+
+// ----------------------------------------------------------------------------
+// Transport
+// ----------------------------------------------------------------------------
+
+class ExtendedTestTransport : public Transport {
+ public:
+ ExtendedTestTransport(VoENetwork* ptr);
+ ~ExtendedTestTransport();
+ VoENetwork* myNetw;
+
+ protected:
+ virtual int SendPacket(int channel, const void *data, int len);
+ virtual int SendRTCPPacket(int channel, const void *data, int len);
+
+ private:
+ static bool Run(void* ptr);
+ bool Process();
+
+ private:
+ ThreadWrapper* _thread;
+ CriticalSectionWrapper* _lock;
+ EventWrapper* _event;
+
+ private:
+ unsigned char _packetBuffer[1612];
+ int _length;
+ int _channel;
+};
+
+class XTransport : public Transport {
+ public:
+ XTransport(VoENetwork* netw, VoEFile* file);
+ VoENetwork* _netw;
+ VoEFile* _file;
+
+ public:
+ virtual int SendPacket(int channel, const void *data, int len);
+ virtual int SendRTCPPacket(int channel, const void *data, int len);
+};
+
+class XRTPObserver : public VoERTPObserver {
+ public:
+ XRTPObserver();
+ ~XRTPObserver();
+ virtual void OnIncomingCSRCChanged(const int channel,
+ const unsigned int CSRC,
+ const bool added);
+ virtual void OnIncomingSSRCChanged(const int channel,
+ const unsigned int SSRC);
+ public:
+ unsigned int _SSRC;
+};
+
+// ----------------------------------------------------------------------------
+// VoEExtendedTest
+// ----------------------------------------------------------------------------
+
+class VoEExtendedTest : public VoiceEngineObserver,
+ public VoEConnectionObserver {
+ public:
+ VoEExtendedTest(VoETestManager& mgr);
+ ~VoEExtendedTest();
+ int PrepareTest(const char* str) const;
+ int TestPassed(const char* str) const;
+ int TestBase();
+ int TestCallReport();
+ int TestCodec();
+ int TestDtmf();
+ int TestEncryption();
+ int TestExternalMedia();
+ int TestFile();
+ int TestMixing();
+ int TestHardware();
+ int TestNetEqStats();
+ int TestNetwork();
+ int TestRTP_RTCP();
+ int TestVideoSync();
+ int TestVolumeControl();
+ public:
+ int ErrorCode() const {
+ return _errCode;
+ }
+ void ClearErrorCode() {
+ _errCode = 0;
+ }
+ protected:
+ // from VoiceEngineObserver
+ void CallbackOnError(const int errCode, const int channel);
+ void CallbackOnTrace(const TraceLevel level, const char* message, const int length);
+ protected:
+ // from VoEConnectionObserver
+ void OnPeriodicDeadOrAlive(const int channel, const bool alive);
+ private:
+ void Play(int channel, unsigned int timeMillisec, bool addFileAsMicrophone = false,
+ bool addTimeMarker = false);
+ void Sleep(unsigned int timeMillisec, bool addMarker = false);
+ void StartMedia(int channel, int rtpPort, bool listen, bool playout, bool send);
+ void StopMedia(int channel);
+ int RunMixingTest(int num_remote_channels, int num_local_channels,
+ int16_t input_value, int16_t max_output_value,
+ int16_t min_output_value);
+ private:
+ VoETestManager& _mgr;
+ private:
+ int _errCode;
+ bool _alive;
+ bool _listening[32];
+ bool _playing[32];
+ bool _sending[32];
+};
+
+} // namespace voetest
+#endif // WEBRTC_VOICE_ENGINE_VOE_EXTENDED_TEST_H
diff --git a/voice_engine/test/auto_test/voe_standard_test.cc b/voice_engine/test/auto_test/voe_standard_test.cc
new file mode 100644
index 0000000..9977e98
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_standard_test.cc
@@ -0,0 +1,604 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/test/auto_test/voe_standard_test.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include "engine_configurations.h"
+#include "system_wrappers/interface/event_wrapper.h"
+#include "voice_engine/include/voe_neteq_stats.h"
+#include "voice_engine/test/auto_test/automated_mode.h"
+#include "voice_engine/test/auto_test/voe_cpu_test.h"
+#include "voice_engine/test/auto_test/voe_extended_test.h"
+#include "voice_engine/test/auto_test/voe_stress_test.h"
+#include "voice_engine/test/auto_test/voe_unit_test.h"
+#include "voice_engine/voice_engine_defines.h"
+
+DEFINE_bool(include_timing_dependent_tests, true,
+ "If true, we will include tests / parts of tests that are known "
+ "to break in slow execution environments (such as valgrind).");
+DEFINE_bool(automated, false,
+ "If true, we'll run the automated tests we have in noninteractive "
+ "mode.");
+
+using namespace webrtc;
+
+namespace voetest {
+
+int dummy = 0; // Dummy used in different functions to avoid warnings
+
+void SubAPIManager::DisplayStatus() const {
+ TEST_LOG("Supported sub APIs:\n\n");
+ if (_base)
+ TEST_LOG(" Base\n");
+ if (_callReport)
+ TEST_LOG(" CallReport\n");
+ if (_codec)
+ TEST_LOG(" Codec\n");
+ if (_dtmf)
+ TEST_LOG(" Dtmf\n");
+ if (_encryption)
+ TEST_LOG(" Encryption\n");
+ if (_externalMedia)
+ TEST_LOG(" ExternalMedia\n");
+ if (_file)
+ TEST_LOG(" File\n");
+ if (_hardware)
+ TEST_LOG(" Hardware\n");
+ if (_netEqStats)
+ TEST_LOG(" NetEqStats\n");
+ if (_network)
+ TEST_LOG(" Network\n");
+ if (_rtp_rtcp)
+ TEST_LOG(" RTP_RTCP\n");
+ if (_videoSync)
+ TEST_LOG(" VideoSync\n");
+ if (_volumeControl)
+ TEST_LOG(" VolumeControl\n");
+ if (_apm)
+ TEST_LOG(" AudioProcessing\n");
+ ANL();
+ TEST_LOG("Excluded sub APIs:\n\n");
+ if (!_base)
+ TEST_LOG(" Base\n");
+ if (!_callReport)
+ TEST_LOG(" CallReport\n");
+ if (!_codec)
+ TEST_LOG(" Codec\n");
+ if (!_dtmf)
+ TEST_LOG(" Dtmf\n");
+ if (!_encryption)
+ TEST_LOG(" Encryption\n");
+ if (!_externalMedia)
+ TEST_LOG(" ExternamMedia\n");
+ if (!_file)
+ TEST_LOG(" File\n");
+ if (!_hardware)
+ TEST_LOG(" Hardware\n");
+ if (!_netEqStats)
+ TEST_LOG(" NetEqStats\n");
+ if (!_network)
+ TEST_LOG(" Network\n");
+ if (!_rtp_rtcp)
+ TEST_LOG(" RTP_RTCP\n");
+ if (!_videoSync)
+ TEST_LOG(" VideoSync\n");
+ if (!_volumeControl)
+ TEST_LOG(" VolumeControl\n");
+ if (!_apm)
+ TEST_LOG(" AudioProcessing\n");
+ ANL();
+}
+
+bool SubAPIManager::GetExtendedMenuSelection(ExtendedSelection& sel) {
+ printf("------------------------------------------------\n");
+ printf("Select extended test\n\n");
+ printf(" (0) None\n");
+ printf("- - - - - - - - - - - - - - - - - - - - - - - - \n");
+ printf(" (1) Base");
+ if (_base)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (2) CallReport");
+ if (_callReport)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (3) Codec");
+ if (_codec)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (4) Dtmf");
+ if (_dtmf)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (5) Encryption");
+ if (_encryption)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (6) VoEExternalMedia");
+ if (_externalMedia)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (7) File");
+ if (_file)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (8) Hardware");
+ if (_hardware)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (9) NetEqStats");
+ if (_netEqStats)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (10) Network");
+ if (_network)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (11) RTP_RTCP");
+ if (_rtp_rtcp)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (12) VideoSync");
+ if (_videoSync)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf(" (13) VolumeControl");
+ if (_volumeControl)
+ printf("\n");
+ else
+ printf(" (NA)\n");
+ printf("\n: ");
+
+ ExtendedSelection xsel(XSEL_Invalid);
+ int selection(0);
+ dummy = scanf("%d", &selection);
+
+ switch (selection) {
+ case 0:
+ xsel = XSEL_None;
+ break;
+ case 1:
+ if (_base)
+ xsel = XSEL_Base;
+ break;
+ case 2:
+ if (_callReport)
+ xsel = XSEL_CallReport;
+ break;
+ case 3:
+ if (_codec)
+ xsel = XSEL_Codec;
+ break;
+ case 4:
+ if (_dtmf)
+ xsel = XSEL_DTMF;
+ break;
+ case 5:
+ if (_encryption)
+ xsel = XSEL_Encryption;
+ break;
+ case 6:
+ if (_externalMedia)
+ xsel = XSEL_ExternalMedia;
+ break;
+ case 7:
+ if (_file)
+ xsel = XSEL_File;
+ break;
+ case 8:
+ if (_hardware)
+ xsel = XSEL_Hardware;
+ break;
+ case 9:
+ if (_netEqStats)
+ xsel = XSEL_NetEqStats;
+ break;
+ case 10:
+ if (_network)
+ xsel = XSEL_Network;
+ break;
+ case 11:
+ if (_rtp_rtcp)
+ xsel = XSEL_RTP_RTCP;
+ break;
+ case 12:
+ if (_videoSync)
+ xsel = XSEL_VideoSync;
+ break;
+ case 13:
+ if (_volumeControl)
+ xsel = XSEL_VolumeControl;
+ break;
+ default:
+ xsel = XSEL_Invalid;
+ break;
+ }
+ if (xsel == XSEL_Invalid)
+ printf("Invalid selection!\n");
+
+ sel = xsel;
+ _xsel = xsel;
+
+ return (xsel != XSEL_Invalid);
+}
+
+VoETestManager::VoETestManager()
+ : initialized_(false),
+ voice_engine_(NULL),
+ voe_base_(0),
+ voe_call_report_(0),
+ voe_codec_(0),
+ voe_dtmf_(0),
+ voe_encrypt_(0),
+ voe_xmedia_(0),
+ voe_file_(0),
+ voe_hardware_(0),
+ voe_network_(0),
+#ifdef _TEST_NETEQ_STATS_
+ voe_neteq_stats_(NULL),
+#endif
+ voe_rtp_rtcp_(0),
+ voe_vsync_(0),
+ voe_volume_control_(0),
+ voe_apm_(0)
+{
+}
+
+VoETestManager::~VoETestManager() {
+}
+
+bool VoETestManager::Init() {
+ if (initialized_)
+ return true;
+
+ if (VoiceEngine::SetTraceFile(NULL) != -1) {
+ // should not be possible to call a Trace method before the VoE is
+ // created
+ TEST_LOG("\nError at line: %i (VoiceEngine::SetTraceFile()"
+ "should fail)!\n", __LINE__);
+ return false;
+ }
+
+ voice_engine_ = VoiceEngine::Create();
+ if (!voice_engine_) {
+ TEST_LOG("Failed to create VoiceEngine\n");
+ return false;
+ }
+
+ return true;
+}
+
+void VoETestManager::GetInterfaces() {
+ if (voice_engine_) {
+ voe_base_ = VoEBase::GetInterface(voice_engine_);
+ voe_codec_ = VoECodec::GetInterface(voice_engine_);
+ voe_volume_control_ = VoEVolumeControl::GetInterface(voice_engine_);
+ voe_dtmf_ = VoEDtmf::GetInterface(voice_engine_);
+ voe_rtp_rtcp_ = VoERTP_RTCP::GetInterface(voice_engine_);
+ voe_apm_ = VoEAudioProcessing::GetInterface(voice_engine_);
+ voe_network_ = VoENetwork::GetInterface(voice_engine_);
+ voe_file_ = VoEFile::GetInterface(voice_engine_);
+#ifdef _TEST_VIDEO_SYNC_
+ voe_vsync_ = VoEVideoSync::GetInterface(voice_engine_);
+#endif
+ voe_encrypt_ = VoEEncryption::GetInterface(voice_engine_);
+ voe_hardware_ = VoEHardware::GetInterface(voice_engine_);
+ // Set the audio layer to use in all tests
+ if (voe_hardware_) {
+ int res = voe_hardware_->SetAudioDeviceLayer(TESTED_AUDIO_LAYER);
+ if (res < 0) {
+ printf("\nERROR: failed to set audio layer to use in "
+ "testing\n");
+ } else {
+ printf("\nAudio layer %d will be used in testing\n",
+ TESTED_AUDIO_LAYER);
+ }
+ }
+#ifdef _TEST_XMEDIA_
+ voe_xmedia_ = VoEExternalMedia::GetInterface(voice_engine_);
+#endif
+#ifdef _TEST_CALL_REPORT_
+ voe_call_report_ = VoECallReport::GetInterface(voice_engine_);
+#endif
+#ifdef _TEST_NETEQ_STATS_
+ voe_neteq_stats_ = VoENetEqStats::GetInterface(voice_engine_);
+#endif
+ }
+}
+
+int VoETestManager::ReleaseInterfaces() {
+ bool releaseOK(true);
+
+ if (voe_base_) {
+ voe_base_->Release();
+ voe_base_ = NULL;
+ }
+ if (voe_codec_) {
+ voe_codec_->Release();
+ voe_codec_ = NULL;
+ }
+ if (voe_volume_control_) {
+ voe_volume_control_->Release();
+ voe_volume_control_ = NULL;
+ }
+ if (voe_dtmf_) {
+ voe_dtmf_->Release();
+ voe_dtmf_ = NULL;
+ }
+ if (voe_rtp_rtcp_) {
+ voe_rtp_rtcp_->Release();
+ voe_rtp_rtcp_ = NULL;
+ }
+ if (voe_apm_) {
+ voe_apm_->Release();
+ voe_apm_ = NULL;
+ }
+ if (voe_network_) {
+ voe_network_->Release();
+ voe_network_ = NULL;
+ }
+ if (voe_file_) {
+ voe_file_->Release();
+ voe_file_ = NULL;
+ }
+#ifdef _TEST_VIDEO_SYNC_
+ if (voe_vsync_) {
+ voe_vsync_->Release();
+ voe_vsync_ = NULL;
+ }
+#endif
+ if (voe_encrypt_) {
+ voe_encrypt_->Release();
+ voe_encrypt_ = NULL;
+ }
+ if (voe_hardware_) {
+ voe_hardware_->Release();
+ voe_hardware_ = NULL;
+ }
+#ifdef _TEST_XMEDIA_
+ if (voe_xmedia_) {
+ voe_xmedia_->Release();
+ voe_xmedia_ = NULL;
+ }
+#endif
+#ifdef _TEST_CALL_REPORT_
+ if (voe_call_report_) {
+ voe_call_report_->Release();
+ voe_call_report_ = NULL;
+ }
+#endif
+#ifdef _TEST_NETEQ_STATS_
+ if (voe_neteq_stats_) {
+ voe_neteq_stats_->Release();
+ voe_neteq_stats_ = NULL;
+ }
+#endif
+ if (false == VoiceEngine::Delete(voice_engine_)) {
+ TEST_LOG("\n\nVoiceEngine::Delete() failed. \n");
+ releaseOK = false;
+ }
+
+ if (VoiceEngine::SetTraceFile(NULL) != -1) {
+ TEST_LOG("\nError at line: %i (VoiceEngine::SetTraceFile()"
+ "should fail)!\n", __LINE__);
+ }
+
+ return (releaseOK == true) ? 0 : -1;
+}
+
+int run_auto_test(TestType test_type, ExtendedSelection ext_selection) {
+ assert(test_type != Standard);
+
+ SubAPIManager api_manager;
+ api_manager.DisplayStatus();
+
+ ////////////////////////////////////
+ // Create VoiceEngine and sub API:s
+
+ voetest::VoETestManager test_manager;
+ if (!test_manager.Init()) {
+ return -1;
+ }
+ test_manager.GetInterfaces();
+
+ int result(-1);
+ if (test_type == Extended) {
+ VoEExtendedTest xtend(test_manager);
+
+ result = 0;
+ while (ext_selection != XSEL_None) {
+ if (ext_selection == XSEL_Base || ext_selection == XSEL_All) {
+ if ((result = xtend.TestBase()) == -1)
+ break;
+ xtend.TestPassed("Base");
+ }
+ if (ext_selection == XSEL_CallReport || ext_selection == XSEL_All) {
+ if ((result = xtend.TestCallReport()) == -1)
+ break;
+ xtend.TestPassed("CallReport");
+ }
+ if (ext_selection == XSEL_Codec || ext_selection == XSEL_All) {
+ if ((result = xtend.TestCodec()) == -1)
+ break;
+ xtend.TestPassed("Codec");
+ }
+ if (ext_selection == XSEL_DTMF || ext_selection == XSEL_All) {
+ if ((result = xtend.TestDtmf()) == -1)
+ break;
+ xtend.TestPassed("Dtmf");
+ }
+ if (ext_selection == XSEL_Encryption || ext_selection == XSEL_All) {
+ if ((result = xtend.TestEncryption()) == -1)
+ break;
+ xtend.TestPassed("Encryption");
+ }
+ if (ext_selection == XSEL_ExternalMedia || ext_selection == XSEL_All) {
+ if ((result = xtend.TestExternalMedia()) == -1)
+ break;
+ xtend.TestPassed("ExternalMedia");
+ }
+ if (ext_selection == XSEL_File || ext_selection == XSEL_All) {
+ if ((result = xtend.TestFile()) == -1)
+ break;
+ xtend.TestPassed("File");
+ }
+ if (ext_selection == XSEL_Hardware || ext_selection == XSEL_All) {
+ if ((result = xtend.TestHardware()) == -1)
+ break;
+ xtend.TestPassed("Hardware");
+ }
+ if (ext_selection == XSEL_NetEqStats || ext_selection == XSEL_All) {
+ if ((result = xtend.TestNetEqStats()) == -1)
+ break;
+ xtend.TestPassed("NetEqStats");
+ }
+ if (ext_selection == XSEL_Network || ext_selection == XSEL_All) {
+ if ((result = xtend.TestNetwork()) == -1)
+ break;
+ xtend.TestPassed("Network");
+ }
+ if (ext_selection == XSEL_RTP_RTCP || ext_selection == XSEL_All) {
+ if ((result = xtend.TestRTP_RTCP()) == -1)
+ break;
+ xtend.TestPassed("RTP_RTCP");
+ }
+ if (ext_selection == XSEL_VideoSync || ext_selection == XSEL_All) {
+ if ((result = xtend.TestVideoSync()) == -1)
+ break;
+ xtend.TestPassed("VideoSync");
+ }
+ if (ext_selection == XSEL_VolumeControl || ext_selection == XSEL_All) {
+ if ((result = xtend.TestVolumeControl()) == -1)
+ break;
+ xtend.TestPassed("VolumeControl");
+ }
+ api_manager.GetExtendedMenuSelection(ext_selection);
+ } // while (extendedSel != XSEL_None)
+ } else if (test_type == Stress) {
+ VoEStressTest stressTest(test_manager);
+ result = stressTest.DoTest();
+ } else if (test_type == Unit) {
+ VoEUnitTest unitTest(test_manager);
+ result = unitTest.DoTest();
+ } else if (test_type == CPU) {
+ VoECpuTest cpuTest(test_manager);
+ result = cpuTest.DoTest();
+ } else {
+ // Should never end up here
+ assert(false);
+ }
+
+ //////////////////
+ // Release/Delete
+
+ int release_ok = test_manager.ReleaseInterfaces();
+
+ if ((0 == result) && (release_ok != -1)) {
+ TEST_LOG("\n\n*** All tests passed *** \n\n");
+ } else {
+ TEST_LOG("\n\n*** Test failed! *** \n");
+ }
+
+ return 0;
+}
+} // namespace voetest
+
+int RunInManualMode() {
+ using namespace voetest;
+
+ SubAPIManager api_manager;
+ api_manager.DisplayStatus();
+
+ printf("----------------------------\n");
+ printf("Select type of test\n\n");
+ printf(" (0) Quit\n");
+ printf(" (1) Standard test\n");
+ printf(" (2) Extended test(s)...\n");
+ printf(" (3) Stress test(s)...\n");
+ printf(" (4) Unit test(s)...\n");
+ printf(" (5) CPU & memory reference test [Windows]...\n");
+ printf("\n: ");
+
+ int selection(0);
+
+ dummy = scanf("%d", &selection);
+
+ ExtendedSelection ext_selection = XSEL_Invalid;
+ TestType test_type = Invalid;
+
+ switch (selection) {
+ case 0:
+ return 0;
+ case 1:
+ test_type = Standard;
+ break;
+ case 2:
+ test_type = Extended;
+ while (!api_manager.GetExtendedMenuSelection(ext_selection))
+ continue;
+ break;
+ case 3:
+ test_type = Stress;
+ break;
+ case 4:
+ test_type = Unit;
+ break;
+ case 5:
+ test_type = CPU;
+ break;
+ default:
+ TEST_LOG("Invalid selection!\n");
+ return 0;
+ }
+
+ if (test_type == Standard) {
+ TEST_LOG("\n\n+++ Running standard tests +++\n\n");
+
+ // Currently, all googletest-rewritten tests are in the "automated" suite.
+ return RunInAutomatedMode();
+ }
+
+ // Function that can be called from other entry functions.
+ return run_auto_test(test_type, ext_selection);
+}
+
+// ----------------------------------------------------------------------------
+// main
+// ----------------------------------------------------------------------------
+
+#if !defined(WEBRTC_IOS)
+int main(int argc, char** argv) {
+ // This function and RunInAutomatedMode is defined in automated_mode.cc
+ // to avoid macro clashes with googletest (for instance ASSERT_TRUE).
+ InitializeGoogleTest(&argc, argv);
+ google::ParseCommandLineFlags(&argc, &argv, true);
+
+ if (FLAGS_automated) {
+ return RunInAutomatedMode();
+ }
+
+ return RunInManualMode();
+}
+#endif //#if !defined(WEBRTC_IOS)
diff --git a/voice_engine/test/auto_test/voe_standard_test.h b/voice_engine/test/auto_test/voe_standard_test.h
new file mode 100644
index 0000000..6a58d7b
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_standard_test.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_STANDARD_TEST_H
+#define WEBRTC_VOICE_ENGINE_VOE_STANDARD_TEST_H
+
+#include <stdio.h>
+#include <string>
+
+#include "gflags/gflags.h"
+#include "resource_manager.h"
+#include "voe_audio_processing.h"
+#include "voe_base.h"
+#include "voe_dtmf.h"
+#include "voe_errors.h"
+#include "voe_file.h"
+#include "voe_rtp_rtcp.h"
+#include "voe_test_defines.h"
+#include "voe_test_interface.h"
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+#include "voe_call_report.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+#include "voe_codec.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+#include "voe_encryption.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+#include "voe_external_media.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+#include "voe_hardware.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+#include "voe_network.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+#include "voe_video_sync.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+#include "voe_volume_control.h"
+#endif
+
+#ifdef _TEST_NETEQ_STATS_
+namespace webrtc {
+class CriticalSectionWrapper;
+class ThreadWrapper;
+class VoENetEqStats;
+}
+#endif
+
+#if defined(WEBRTC_ANDROID)
+extern char mobileLogMsg[640];
+#endif
+
+DECLARE_bool(include_timing_dependent_tests);
+
+namespace voetest {
+
+class SubAPIManager {
+ public:
+ SubAPIManager()
+ : _base(true),
+ _callReport(false),
+ _codec(false),
+ _dtmf(false),
+ _encryption(false),
+ _externalMedia(false),
+ _file(false),
+ _hardware(false),
+ _netEqStats(false),
+ _network(false),
+ _rtp_rtcp(false),
+ _videoSync(false),
+ _volumeControl(false),
+ _apm(false),
+ _xsel(XSEL_Invalid) {
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+ _callReport = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+ _codec = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+ _dtmf = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+ _encryption = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+ _externalMedia = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_FILE_API
+ _file = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+ _hardware = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+ _netEqStats = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+ _network = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+ _rtp_rtcp = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+ _videoSync = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+ _volumeControl = true;
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+ _apm = true;
+#endif
+ }
+
+ void DisplayStatus() const;
+ bool GetExtendedMenuSelection(ExtendedSelection& sel);
+
+ private:
+ bool _base, _callReport, _codec, _dtmf, _encryption;
+ bool _externalMedia, _file, _hardware;
+ bool _netEqStats, _network, _rtp_rtcp, _videoSync, _volumeControl, _apm;
+ ExtendedSelection _xsel;
+};
+
+class VoETestManager {
+ public:
+ VoETestManager();
+ ~VoETestManager();
+
+ // Must be called after construction.
+ bool Init();
+
+ void GetInterfaces();
+ int ReleaseInterfaces();
+
+ const char* AudioFilename() const {
+ const std::string& result = resource_manager_.long_audio_file_path();
+ if (result.length() == 0) {
+ TEST_LOG("ERROR: Failed to open input file!");
+ }
+ return result.c_str();
+ }
+
+ VoiceEngine* VoiceEnginePtr() const {
+ return voice_engine_;
+ }
+ VoEBase* BasePtr() const {
+ return voe_base_;
+ }
+ VoECodec* CodecPtr() const {
+ return voe_codec_;
+ }
+ VoEVolumeControl* VolumeControlPtr() const {
+ return voe_volume_control_;
+ }
+ VoEDtmf* DtmfPtr() const {
+ return voe_dtmf_;
+ }
+ VoERTP_RTCP* RTP_RTCPPtr() const {
+ return voe_rtp_rtcp_;
+ }
+ VoEAudioProcessing* APMPtr() const {
+ return voe_apm_;
+ }
+
+ VoENetwork* NetworkPtr() const {
+ return voe_network_;
+ }
+
+ VoEFile* FilePtr() const {
+ return voe_file_;
+ }
+
+ VoEHardware* HardwarePtr() const {
+ return voe_hardware_;
+ }
+
+ VoEVideoSync* VideoSyncPtr() const {
+ return voe_vsync_;
+ }
+
+ VoEEncryption* EncryptionPtr() const {
+ return voe_encrypt_;
+ }
+
+ VoEExternalMedia* ExternalMediaPtr() const {
+ return voe_xmedia_;
+ }
+
+ VoECallReport* CallReportPtr() const {
+ return voe_call_report_;
+ }
+
+#ifdef _TEST_NETEQ_STATS_
+ VoENetEqStats* NetEqStatsPtr() const {
+ return voe_neteq_stats_;
+ }
+
+#endif
+
+ private:
+ bool initialized_;
+
+ VoiceEngine* voice_engine_;
+ VoEBase* voe_base_;
+ VoECallReport* voe_call_report_;
+ VoECodec* voe_codec_;
+ VoEDtmf* voe_dtmf_;
+ VoEEncryption* voe_encrypt_;
+ VoEExternalMedia* voe_xmedia_;
+ VoEFile* voe_file_;
+ VoEHardware* voe_hardware_;
+ VoENetwork* voe_network_;
+#ifdef _TEST_NETEQ_STATS_
+ VoENetEqStats* voe_neteq_stats_;
+#endif
+ VoERTP_RTCP* voe_rtp_rtcp_;
+ VoEVideoSync* voe_vsync_;
+ VoEVolumeControl* voe_volume_control_;
+ VoEAudioProcessing* voe_apm_;
+
+ ResourceManager resource_manager_;
+};
+
+} // namespace voetest
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_STANDARD_TEST_H
diff --git a/voice_engine/test/auto_test/voe_stress_test.cc b/voice_engine/test/auto_test/voe_stress_test.cc
new file mode 100644
index 0000000..5bae92e
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_stress_test.cc
@@ -0,0 +1,409 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Some ideas of improvements:
+// Break out common init and maybe terminate to separate function(s).
+// How much trace should we have enabled?
+// API error counter, to print info and return -1 if any error.
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <cassert>
+#if defined(_WIN32)
+#include <conio.h>
+#endif
+
+#include "voe_stress_test.h"
+#include "voe_standard_test.h"
+
+#include "voice_engine/voice_engine_defines.h" // defines build macros
+#include "thread_wrapper.h"
+
+using namespace webrtc;
+
+namespace voetest {
+
+#define VALIDATE_STRESS(expr) \
+ if (expr) \
+ { \
+ printf("Error at line: %i, %s \n", __LINE__, #expr); \
+ printf("Error code: %i \n", base->LastError()); \
+ }
+
+#ifdef _WIN32
+// Pause if supported
+#define PAUSE_OR_SLEEP(x) PAUSE;
+#else
+// Sleep a bit instead if pause not supported
+#define PAUSE_OR_SLEEP(x) SLEEP(x);
+#endif
+
+const char* VoEStressTest::_key = "====YUtFWRAAAAADBtIHgAAAAAEAAAAcAAAAAQBHU0ds"
+ "b2JhbCBJUCBTb3VuZAAC\nAAAAIwAAAExpY2Vuc2VkIHRvIE5vcnRlbCBOZXR3cm9rcwAAAAA"
+ "xAAAAZxZ7/u0M\niFYyTwSwko5Uutf7mh8S0O4rYZYTFidbzQeuGonuL17F/2oD/2pfDp3jL4"
+ "Rf3z/A\nnlJsEJgEtASkDNFuwLILjGY0pzjjAYQp3pCl6z6k2MtE06AirdjGLYCjENpq/opX"
+ "\nOrs3sIuwdYK5va/aFcsjBDmlsGCUM48RDYG9s23bIHYafXUC4ofOaubbZPWiPTmL\nEVJ8WH"
+ "4F9pgNjALc14oJXfON7r/3\n=EsLx";
+
+int VoEStressTest::DoTest() {
+ int test(-1);
+ while (test != 0) {
+ test = MenuSelection();
+ switch (test) {
+ case 0:
+ // Quit stress test
+ break;
+ case 1:
+ // All tests
+ StartStopTest();
+ CreateDeleteChannelsTest();
+ MultipleThreadsTest();
+ break;
+ case 2:
+ StartStopTest();
+ break;
+ case 3:
+ CreateDeleteChannelsTest();
+ break;
+ case 4:
+ MultipleThreadsTest();
+ break;
+ default:
+ // Should not be possible
+ printf("Invalid selection! (Test code error)\n");
+ assert(false);
+ } // switch
+ } // while
+
+ return 0;
+}
+
+int VoEStressTest::MenuSelection() {
+ printf("------------------------------------------------\n");
+ printf("Select stress test\n\n");
+ printf(" (0) Quit\n");
+ printf(" (1) All\n");
+ printf("- - - - - - - - - - - - - - - - - - - - - - - - \n");
+ printf(" (2) Start/stop\n");
+ printf(" (3) Create/delete channels\n");
+ printf(" (4) Multiple threads\n");
+
+ const int maxMenuSelection = 4;
+ int selection(-1);
+
+ while ((selection < 0) || (selection > maxMenuSelection)) {
+ printf("\n: ");
+ int retval = scanf("%d", &selection);
+ if ((retval != 1) || (selection < 0) || (selection > maxMenuSelection)) {
+ printf("Invalid selection!\n");
+ }
+ }
+
+ return selection;
+}
+
+int VoEStressTest::StartStopTest() {
+ printf("------------------------------------------------\n");
+ printf("Running start/stop test\n");
+ printf("------------------------------------------------\n");
+
+ printf("\nNOTE: this thest will fail after a while if Core audio is used\n");
+ printf("because MS returns AUDCLNT_E_CPUUSAGE_EXCEEDED (VoE Error 10013).\n");
+
+ // Get sub-API pointers
+ VoEBase* base = _mgr.BasePtr();
+
+ // Set trace
+ // VALIDATE_STRESS(base->SetTraceFileName(
+ // GetFilename("VoEStressTest_StartStop_trace.txt")));
+ // VALIDATE_STRESS(base->SetDebugTraceFileName(
+ // GetFilename("VoEStressTest_StartStop_trace_debug.txt")));
+ // VALIDATE_STRESS(base->SetTraceFilter(kTraceStateInfo |
+ // kTraceWarning | kTraceError |
+ // kTraceCritical | kTraceApiCall |
+ // kTraceMemory | kTraceInfo));
+ VALIDATE_STRESS(base->Init());
+ VALIDATE_STRESS(base->CreateChannel());
+
+ ///////////// Start test /////////////
+
+ int numberOfLoops(2000);
+ int loopSleep(200);
+ int i(0);
+ int markInterval(20);
+
+ printf("Running %d loops with %d ms sleep. Mark every %d loop. \n",
+ numberOfLoops, loopSleep, markInterval);
+ printf("Test will take approximately %d minutes. \n",
+ numberOfLoops * loopSleep / 1000 / 60 + 1);
+
+ for (i = 0; i < numberOfLoops; ++i) {
+ VALIDATE_STRESS(base->SetLocalReceiver(0, 4800));
+ VALIDATE_STRESS(base->SetSendDestination(0, 4800, "127.0.0.1"));
+ VALIDATE_STRESS(base->StartReceive(0));
+ VALIDATE_STRESS(base->StartPlayout(0));
+ VALIDATE_STRESS(base->StartSend(0));
+ if (!(i % markInterval))
+ MARK();
+ SLEEP(loopSleep);
+ VALIDATE_STRESS(base->StopSend(0));
+ VALIDATE_STRESS(base->StopPlayout(0));
+ VALIDATE_STRESS(base->StopReceive(0));
+ }
+ ANL();
+
+ VALIDATE_STRESS(base->SetLocalReceiver(0, 4800));
+ VALIDATE_STRESS(base->SetSendDestination(0, 4800, "127.0.0.1"));
+ VALIDATE_STRESS(base->StartReceive(0));
+ VALIDATE_STRESS(base->StartPlayout(0));
+ VALIDATE_STRESS(base->StartSend(0));
+ printf("Verify that audio is good. \n");
+ PAUSE_OR_SLEEP(20000);
+ VALIDATE_STRESS(base->StopSend(0));
+ VALIDATE_STRESS(base->StopPlayout(0));
+ VALIDATE_STRESS(base->StopReceive(0));
+
+ ///////////// End test /////////////
+
+
+ // Terminate
+ VALIDATE_STRESS(base->DeleteChannel(0));
+ VALIDATE_STRESS(base->Terminate());
+
+ printf("Test finished \n");
+
+ return 0;
+}
+
+int VoEStressTest::CreateDeleteChannelsTest() {
+ printf("------------------------------------------------\n");
+ printf("Running create/delete channels test\n");
+ printf("------------------------------------------------\n");
+
+ // Get sub-API pointers
+ VoEBase* base = _mgr.BasePtr();
+
+ // Set trace
+ // VALIDATE_STRESS(base->SetTraceFileName(
+ // GetFilename("VoEStressTest_CreateChannels_trace.txt")));
+ // VALIDATE_STRESS(base->SetDebugTraceFileName(
+ // GetFilename("VoEStressTest_CreateChannels_trace_debug.txt")));
+ // VALIDATE_STRESS(base->SetTraceFilter(kTraceStateInfo |
+ // kTraceWarning | kTraceError |
+ // kTraceCritical | kTraceApiCall |
+ // kTraceMemory | kTraceInfo));
+ VALIDATE_STRESS(base->Init());
+
+ ///////////// Start test /////////////
+
+ int numberOfLoops(10000);
+ int loopSleep(10);
+ int i(0);
+ int markInterval(200);
+
+ printf("Running %d loops with %d ms sleep. Mark every %d loop. \n",
+ numberOfLoops, loopSleep, markInterval);
+ printf("Test will take approximately %d minutes. \n",
+ numberOfLoops * loopSleep / 1000 / 60 + 1);
+
+ // Some possible extensions include:
+ // Different sleep times (fixed or random) or zero.
+ // Start call on all or some channels.
+ // Two parts: first have a slight overweight to creating channels,
+ // then to deleting. (To ensure we hit max channels and go to zero.)
+ // Make sure audio is OK after test has finished.
+
+ // Set up, start with maxChannels/2 channels
+ const int maxChannels = base->MaxNumOfChannels();
+ VALIDATE_STRESS(maxChannels < 1); // Should always have at least one channel
+ bool* channelState = new bool[maxChannels];
+ memset(channelState, 0, maxChannels * sizeof(bool));
+ int channel(0);
+ int noOfActiveChannels(0);
+ for (i = 0; i < (maxChannels / 2); ++i) {
+ channel = base->CreateChannel();
+ VALIDATE_STRESS(channel < 0);
+ if (channel >= 0) {
+ channelState[channel] = true;
+ ++noOfActiveChannels;
+ }
+ }
+ srand((unsigned int) time(NULL));
+ bool action(false);
+ double rnd(0.0);
+ int res(0);
+
+ // Create/delete channels with slight
+ for (i = 0; i < numberOfLoops; ++i) {
+ // Randomize action (create or delete channel)
+ action = rand() <= (RAND_MAX / 2);
+ if (action) {
+ if (noOfActiveChannels < maxChannels) {
+ // Create new channel
+ channel = base->CreateChannel();
+ VALIDATE_STRESS(channel < 0);
+ if (channel >= 0) {
+ channelState[channel] = true;
+ ++noOfActiveChannels;
+ }
+ }
+ } else {
+ if (noOfActiveChannels > 0) {
+ // Delete random channel that's created [0, maxChannels - 1]
+ do {
+ rnd = static_cast<double> (rand());
+ channel = static_cast<int> (rnd /
+ (static_cast<double> (RAND_MAX) + 1.0f) *
+ maxChannels);
+ } while (!channelState[channel]); // Must find a created channel
+
+ res = base->DeleteChannel(channel);
+ VALIDATE_STRESS(0 != res);
+ if (0 == res) {
+ channelState[channel] = false;
+ --noOfActiveChannels;
+ }
+ }
+ }
+
+ if (!(i % markInterval))
+ MARK();
+ SLEEP(loopSleep);
+ }
+ ANL();
+
+ delete[] channelState;
+
+ ///////////// End test /////////////
+
+
+ // Terminate
+ VALIDATE_STRESS(base->Terminate()); // Deletes all channels
+
+ printf("Test finished \n");
+
+ return 0;
+}
+
+int VoEStressTest::MultipleThreadsTest() {
+ printf("------------------------------------------------\n");
+ printf("Running multiple threads test\n");
+ printf("------------------------------------------------\n");
+
+ // Get sub-API pointers
+ VoEBase* base = _mgr.BasePtr();
+
+ // Set trace
+ // VALIDATE_STRESS(base->SetTraceFileName(
+ // GetFilename("VoEStressTest_MultipleThreads_trace.txt")));
+ // VALIDATE_STRESS(base->SetDebugTraceFileName(
+ // GetFilename("VoEStressTest_MultipleThreads_trace_debug.txt")));
+ // VALIDATE_STRESS(base->SetTraceFilter(kTraceStateInfo |
+ // kTraceWarning | kTraceError |
+ // kTraceCritical | kTraceApiCall |
+ // kTraceMemory | kTraceInfo));
+
+ // Init
+ VALIDATE_STRESS(base->Init());
+ VALIDATE_STRESS(base->CreateChannel());
+
+ ///////////// Start test /////////////
+
+ int numberOfLoops(10000);
+ int loopSleep(0);
+ int i(0);
+ int markInterval(1000);
+
+ printf("Running %d loops with %d ms sleep. Mark every %d loop. \n",
+ numberOfLoops, loopSleep, markInterval);
+ printf("Test will take approximately %d minutes. \n",
+ numberOfLoops * loopSleep / 1000 / 60 + 1);
+
+ srand((unsigned int) time(NULL));
+ int rnd(0);
+
+ // Start extra thread
+ const char* threadName = "StressTest Extra API Thread";
+ _ptrExtraApiThread = ThreadWrapper::CreateThread(RunExtraApi, this,
+ kNormalPriority, threadName);
+ unsigned int id(0);
+ VALIDATE_STRESS(!_ptrExtraApiThread->Start(id));
+
+ // Some possible extensions include:
+ // Add more API calls to randomize
+ // More threads
+ // Different sleep times (fixed or random).
+ // Make sure audio is OK after test has finished.
+
+ // Call random API functions here and in extra thread, ignore any error
+ for (i = 0; i < numberOfLoops; ++i) {
+ // This part should be equal to the marked part in the extra thread
+ // --- BEGIN ---
+ rnd = rand();
+ if (rnd < (RAND_MAX / 2)) {
+ // Start playout
+ base->StartPlayout(0);
+ } else {
+ // Stop playout
+ base->StopPlayout(0);
+ }
+ // --- END ---
+
+ if (!(i % markInterval))
+ MARK();
+ SLEEP(loopSleep);
+ }
+ ANL();
+
+ // Stop extra thread
+ VALIDATE_STRESS(!_ptrExtraApiThread->Stop());
+ delete _ptrExtraApiThread;
+
+ ///////////// End test /////////////
+
+ // Terminate
+ VALIDATE_STRESS(base->Terminate()); // Deletes all channels
+
+ printf("Test finished \n");
+
+ return 0;
+}
+
+// Thread functions
+
+bool VoEStressTest::RunExtraApi(void* ptr) {
+ return static_cast<VoEStressTest*> (ptr)->ProcessExtraApi();
+}
+
+bool VoEStressTest::ProcessExtraApi() {
+ // Prepare
+ VoEBase* base = _mgr.BasePtr();
+ int rnd(0);
+
+ // Call random API function, ignore any error
+
+ // This part should be equal to the marked part in the main thread
+ // --- BEGIN ---
+ rnd = rand();
+ if (rnd < (RAND_MAX / 2)) {
+ // Start playout
+ base->StartPlayout(0);
+ } else {
+ // Stop playout
+ base->StopPlayout(0);
+ }
+ // --- END ---
+
+ return true;
+}
+
+} // namespace voetest
diff --git a/voice_engine/test/auto_test/voe_stress_test.h b/voice_engine/test/auto_test/voe_stress_test.h
new file mode 100644
index 0000000..b3a418c
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_stress_test.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_STRESS_TEST_H
+#define WEBRTC_VOICE_ENGINE_VOE_STRESS_TEST_H
+
+namespace webrtc {
+class ThreadWrapper;
+}
+
+namespace voetest {
+// TODO(andrew): using directives are not permitted.
+using namespace webrtc;
+
+class VoETestManager;
+
+class VoEStressTest {
+ public:
+ VoEStressTest(VoETestManager& mgr) :
+ _mgr(mgr), _ptrExtraApiThread(NULL) {
+ }
+ ~VoEStressTest() {
+ }
+ int DoTest();
+
+ private:
+ int MenuSelection();
+ int StartStopTest();
+ int CreateDeleteChannelsTest();
+ int MultipleThreadsTest();
+
+ static bool RunExtraApi(void* ptr);
+ bool ProcessExtraApi();
+
+ VoETestManager& _mgr;
+ static const char* _key;
+
+ ThreadWrapper* _ptrExtraApiThread;
+};
+
+} // namespace voetest
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_STRESS_TEST_H
diff --git a/voice_engine/test/auto_test/voe_test_defines.h b/voice_engine/test/auto_test/voe_test_defines.h
new file mode 100644
index 0000000..9fff35b
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_test_defines.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_TEST_DEFINES_H
+#define WEBRTC_VOICE_ENGINE_VOE_TEST_DEFINES_H
+
+// Read WEBRTC_VOICE_ENGINE_XXX_API compiler flags
+#include "engine_configurations.h"
+
+#ifdef WEBRTC_ANDROID
+#include <android/log.h>
+#define ANDROID_LOG_TAG "VoiceEngine Auto Test"
+#define TEST_LOG(...) \
+ __android_log_print(ANDROID_LOG_DEBUG, ANDROID_LOG_TAG, __VA_ARGS__)
+#define TEST_LOG_ERROR(...) \
+ __android_log_print(ANDROID_LOG_ERROR, ANDROID_LOG_TAG, __VA_ARGS__)
+#define TEST_LOG_FLUSH
+#else
+#define TEST_LOG printf
+#define TEST_LOG_ERROR printf
+#define TEST_LOG_FLUSH fflush(NULL)
+#endif
+
+// Select the tests to execute, list order below is same as they will be
+// executed. Note that, all settings below will be overriden by sub-API
+// settings in engine_configurations.h.
+#define _TEST_BASE_
+#define _TEST_RTP_RTCP_
+#define _TEST_HARDWARE_
+#define _TEST_CODEC_
+#define _TEST_DTMF_
+#define _TEST_VOLUME_
+#define _TEST_AUDIO_PROCESSING_
+#define _TEST_FILE_
+#define _TEST_NETWORK_
+#define _TEST_CALL_REPORT_
+#define _TEST_VIDEO_SYNC_
+#define _TEST_ENCRYPT_
+#define _TEST_NETEQ_STATS_
+#define _TEST_XMEDIA_
+
+#define TESTED_AUDIO_LAYER kAudioPlatformDefault
+//#define TESTED_AUDIO_LAYER kAudioLinuxPulse
+
+// #define _ENABLE_VISUAL_LEAK_DETECTOR_ // Enables VLD to find memory leaks
+// #define _ENABLE_IPV6_TESTS_ // Enables IPv6 tests in network xtest
+// #define _USE_EXTENDED_TRACE_ // Adds unique trace files for extended test
+// #define _MEMORY_TEST_
+
+// Enable this when running instrumentation of some kind to exclude tests
+// that will not pass due to slowed down execution.
+// #define _INSTRUMENTATION_TESTING_
+
+// Exclude (override) API tests given preprocessor settings in
+// engine_configurations.h
+#ifndef WEBRTC_VOICE_ENGINE_CODEC_API
+#undef _TEST_CODEC_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+#undef _TEST_VOLUME_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_DTMF_API
+#undef _TEST_DTMF_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+#undef _TEST_RTP_RTCP_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+#undef _TEST_AUDIO_PROCESSING_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_FILE_API
+#undef _TEST_FILE_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+#undef _TEST_VIDEO_SYNC_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+#undef _TEST_ENCRYPT_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_HARDWARE_API
+#undef _TEST_HARDWARE_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+#undef _TEST_XMEDIA_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_NETWORK_API
+#undef _TEST_NETWORK_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+#undef _TEST_NETEQ_STATS_
+#endif
+#ifndef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+#undef _TEST_CALL_REPORT_
+#endif
+
+// Some parts can cause problems while running Insure
+#ifdef __INSURE__
+#define _INSTRUMENTATION_TESTING_
+#undef WEBRTC_SRTP
+#endif
+
+// Time in ms to test each packet size for each codec
+#define CODEC_TEST_TIME 400
+
+#define MARK() TEST_LOG("."); fflush(NULL); // Add test marker
+#define ANL() TEST_LOG("\n") // Add New Line
+#define AOK() TEST_LOG("[Test is OK]"); fflush(NULL); // Add OK
+#if defined(_WIN32)
+#define PAUSE \
+ { \
+ TEST_LOG("Press any key to continue..."); \
+ _getch(); \
+ TEST_LOG("\n"); \
+ }
+#else
+#define PAUSE \
+ { \
+ TEST_LOG("Continuing (pause not supported)\n"); \
+ }
+#endif
+
+#define TEST(s) \
+ { \
+ TEST_LOG("Testing: %s", #s); \
+ } \
+
+#ifdef _INSTRUMENTATION_TESTING_
+// Don't stop execution if error occurs
+#define TEST_MUSTPASS(expr) \
+ { \
+ if ((expr)) \
+ { \
+ TEST_LOG_ERROR("Error at line:%i, %s \n",__LINE__, #expr); \
+ TEST_LOG_ERROR("Error code: %i\n",voe_base_->LastError()); \
+ } \
+ }
+#define TEST_ERROR(code) \
+ { \
+ int err = voe_base_->LastError(); \
+ if (err != code) \
+ { \
+ TEST_LOG_ERROR("Invalid error code (%d, should be %d) at line %d\n",
+ code, err, __LINE__);
+}
+}
+#else
+#define ASSERT_TRUE(expr) TEST_MUSTPASS(!(expr))
+#define ASSERT_FALSE(expr) TEST_MUSTPASS(expr)
+#define TEST_MUSTFAIL(expr) TEST_MUSTPASS(!((expr) == -1))
+#define TEST_MUSTPASS(expr) \
+ { \
+ if ((expr)) \
+ { \
+ TEST_LOG_ERROR("\nError at line:%i, %s \n",__LINE__, #expr); \
+ TEST_LOG_ERROR("Error code: %i\n", voe_base_->LastError()); \
+ PAUSE \
+ return -1; \
+ } \
+ }
+#define TEST_ERROR(code) \
+ { \
+ int err = voe_base_->LastError(); \
+ if (err != code) \
+ { \
+ TEST_LOG_ERROR("Invalid error code (%d, should be %d) at line %d\n", \
+ err, code, __LINE__); \
+ PAUSE \
+ return -1; \
+ } \
+ }
+#endif // #ifdef _INSTRUMENTATION_TESTING_
+#define EXCLUDE() \
+ { \
+ TEST_LOG("\n>>> Excluding test at line: %i <<<\n\n",__LINE__); \
+ }
+
+#define INCOMPLETE() \
+ { \
+ TEST_LOG("\n>>> Incomplete test at line: %i <<<\n\n",__LINE__); \
+ }
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_TEST_DEFINES_H
diff --git a/voice_engine/test/auto_test/voe_test_interface.h b/voice_engine/test/auto_test/voe_test_interface.h
new file mode 100644
index 0000000..9926f1e
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_test_interface.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * Interface for starting test
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_TEST_INTERFACE_H
+#define WEBRTC_VOICE_ENGINE_VOE_TEST_INTERFACE_H
+
+#include "common_types.h"
+
+namespace voetest {
+// TODO(andrew): Using directives not permitted.
+using namespace webrtc;
+
+// TestType enumerator
+enum TestType {
+ Invalid = -1, Standard = 0, Extended = 1, Stress = 2, Unit = 3, CPU = 4
+};
+
+// ExtendedSelection enumerator
+enum ExtendedSelection {
+ XSEL_Invalid = -1,
+ XSEL_None = 0,
+ XSEL_All,
+ XSEL_Base,
+ XSEL_CallReport,
+ XSEL_Codec,
+ XSEL_DTMF,
+ XSEL_Encryption,
+ XSEL_ExternalMedia,
+ XSEL_File,
+ XSEL_Hardware,
+ XSEL_NetEqStats,
+ XSEL_Network,
+ XSEL_RTP_RTCP,
+ XSEL_VideoSync,
+ XSEL_VolumeControl,
+};
+
+// Main test function
+int runAutoTest(TestType testType, ExtendedSelection extendedSel);
+
+} // namespace voetest
+#endif // WEBRTC_VOICE_ENGINE_VOE_TEST_INTERFACE_H
diff --git a/voice_engine/test/auto_test/voe_unit_test.cc b/voice_engine/test/auto_test/voe_unit_test.cc
new file mode 100644
index 0000000..d76c448
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_unit_test.cc
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_unit_test.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <cassert>
+#if defined(_WIN32)
+#include <conio.h>
+#endif
+
+#include "system_wrappers/interface/thread_wrapper.h"
+#include "testsupport/fileutils.h"
+#include "voice_engine/voice_engine_defines.h"
+#include "voice_engine/test/auto_test/fakes/fake_media_process.h"
+
+using namespace webrtc;
+
+namespace voetest {
+
+#define CHECK(expr) \
+ if (expr) \
+ { \
+ printf("Error at line: %i, %s \n", __LINE__, #expr); \
+ printf("Error code: %i \n", base->LastError()); \
+ PAUSE \
+ return -1; \
+ }
+
+// ----------------------------------------------------------------------------
+// >>> R E A D M E F I R S T <<<
+// ----------------------------------------------------------------------------
+
+// 1) The user must ensure that the following codecs are included in VoE:
+//
+// - L16
+// - G.729
+// - G.722.1C
+
+// 2) It is also possible to modify the simulation time for each individual test
+//
+const int dTBetweenEachTest = 4000;
+
+// ----------------------------------------------------------------------------
+// Encrypt
+// ----------------------------------------------------------------------------
+
+void VoEUnitTest::encrypt(int channel_no, unsigned char * in_data,
+ unsigned char * out_data, int bytes_in,
+ int * bytes_out) {
+ int i;
+
+ if (!_extOnOff) {
+ // no stereo emulation <=> pure bypass
+ for (i = 0; i < bytes_in; i++)
+ out_data[i] = in_data[i];
+ *bytes_out = bytes_in;
+ } else if (_extOnOff && (_extBitsPerSample == 16)) {
+ // stereo emulation (sample based, 2 bytes per sample)
+
+ const int nBytesPayload = bytes_in - 12;
+
+ // RTP header (first 12 bytes)
+ memcpy(out_data, in_data, 12);
+
+ // skip RTP header
+ short* ptrIn = (short*) &in_data[12];
+ short* ptrOut = (short*) &out_data[12];
+
+ // network byte order
+ for (i = 0; i < nBytesPayload / 2; i++) {
+ // produce two output samples for each input sample
+ *ptrOut++ = *ptrIn; // left sample
+ *ptrOut++ = *ptrIn; // right sample
+ ptrIn++;
+ }
+
+ *bytes_out = 12 + 2 * nBytesPayload;
+ } else if (_extOnOff && (_extBitsPerSample == 8)) {
+ // stereo emulation (sample based, 1 bytes per sample)
+
+ const int nBytesPayload = bytes_in - 12;
+
+ // RTP header (first 12 bytes)
+ memcpy(out_data, in_data, 12);
+
+ // skip RTP header
+ unsigned char* ptrIn = (unsigned char*) &in_data[12];
+ unsigned char* ptrOut = (unsigned char*) &out_data[12];
+
+ // network byte order
+ for (i = 0; i < nBytesPayload; i++) {
+ // produce two output samples for each input sample
+ *ptrOut++ = *ptrIn; // left sample
+ *ptrOut++ = *ptrIn; // right sample
+ ptrIn++;
+ }
+
+ *bytes_out = 12 + 2 * nBytesPayload;
+ } else if (_extOnOff && (_extBitsPerSample == -1)) {
+ // stereo emulation (frame based)
+
+ const int nBytesPayload = bytes_in - 12;
+
+ // RTP header (first 12 bytes)
+ memcpy(out_data, in_data, 12);
+
+ // skip RTP header
+ unsigned char* ptrIn = (unsigned char*) &in_data[12];
+ unsigned char* ptrOut = (unsigned char*) &out_data[12];
+
+ // left channel
+ for (i = 0; i < nBytesPayload; i++) {
+ *ptrOut++ = *ptrIn++;
+ }
+
+ ptrIn = (unsigned char*) &in_data[12];
+
+ // right channel
+ for (i = 0; i < nBytesPayload; i++) {
+ *ptrOut++ = *ptrIn++;
+ }
+
+ *bytes_out = 12 + 2 * nBytesPayload;
+ }
+}
+
+void VoEUnitTest::decrypt(int channel_no, unsigned char * in_data,
+ unsigned char * out_data, int bytes_in,
+ int * bytes_out) {
+ int i;
+ for (i = 0; i < bytes_in; i++)
+ out_data[i] = in_data[i];
+ *bytes_out = bytes_in;
+}
+
+void VoEUnitTest::encrypt_rtcp(int channel_no, unsigned char * in_data,
+ unsigned char * out_data, int bytes_in,
+ int * bytes_out) {
+ int i;
+ for (i = 0; i < bytes_in; i++)
+ out_data[i] = in_data[i];
+ *bytes_out = bytes_in;
+}
+
+void VoEUnitTest::decrypt_rtcp(int channel_no, unsigned char * in_data,
+ unsigned char * out_data, int bytes_in,
+ int * bytes_out) {
+ int i;
+ for (i = 0; i < bytes_in; i++)
+ out_data[i] = in_data[i];
+ *bytes_out = bytes_in;
+}
+
+void VoEUnitTest::SetStereoExternalEncryption(int channel, bool onOff,
+ int bitsPerSample) {
+ _extOnOff = onOff;
+ _extChannel = channel;
+ _extBitsPerSample = bitsPerSample;
+}
+
+// VoEVEMediaProcess
+FakeMediaProcess mpobj;
+
+// ----------------------------------------------------------------------------
+// VoEUnitTest
+// ----------------------------------------------------------------------------
+
+VoEUnitTest::VoEUnitTest(VoETestManager& mgr) :
+ _mgr(mgr), _extOnOff(false), _extBitsPerSample(-1), _extChannel(0) {
+ for (int i = 0; i < 32; i++) {
+ _listening[i] = false;
+ _playing[i] = false;
+ _sending[i] = false;
+ }
+}
+
+// ----------------------------------------------------------------------------
+// DoTest
+// ----------------------------------------------------------------------------
+
+int VoEUnitTest::DoTest() {
+ int test(-1);
+ int ret(0);
+ while ((test != 0) && (ret != -1)) {
+ test = MenuSelection();
+ switch (test) {
+ case 0:
+ // Quit stress test
+ break;
+ case 1:
+ ret = MixerTest();
+ break;
+ case 2:
+ ret = MixerTest();
+ break;
+ default:
+ // Should not be possible
+ printf("Invalid selection! (Test code error)\n");
+ assert(false);
+ } // switch
+ } // while
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// MenuSelection
+// ----------------------------------------------------------------------------
+
+int VoEUnitTest::MenuSelection() {
+ printf("------------------------------------------------\n");
+ printf("Select unit test\n\n");
+ printf(" (0) Quit\n");
+ printf(" (1) All\n");
+ printf("- - - - - - - - - - - - - - - - - - - - - - - - \n");
+ printf(" (2) Mixer\n");
+
+ const int maxMenuSelection = 2;
+ int selection(-1);
+
+ while ((selection < 0) || (selection > maxMenuSelection)) {
+ printf("\n: ");
+ int retval = scanf("%d", &selection);
+ if ((retval != 1) || (selection < 0) || (selection > maxMenuSelection)) {
+ printf("Invalid selection!\n");
+ }
+ }
+
+ return selection;
+}
+
+// ----------------------------------------------------------------------------
+// StartMedia
+// ----------------------------------------------------------------------------
+
+int VoEUnitTest::StartMedia(int channel, int rtpPort, bool listen, bool playout,
+ bool send, bool fileAsMic, bool localFile) {
+ VoEBase* base = _mgr.BasePtr();
+ VoEFile* file = _mgr.FilePtr();
+
+ _listening[channel] = false;
+ _playing[channel] = false;
+ _sending[channel] = false;
+
+ CHECK(base->SetLocalReceiver(channel, rtpPort));
+ CHECK(base->SetSendDestination(channel, rtpPort, "127.0.0.1"));
+ if (listen) {
+ _listening[channel] = true;
+ CHECK(base->StartReceive(channel));
+ }
+ if (playout) {
+ _playing[channel] = true;
+ CHECK(base->StartPlayout(channel));
+ }
+ if (send) {
+ _sending[channel] = true;
+ CHECK(base->StartSend(channel));
+ }
+ if (fileAsMic) {
+ // play mic as file, mix with microphone to ensure that SWB can be
+ //tested as well
+ const bool mixWithMic(true);
+ CHECK(file->StartPlayingFileAsMicrophone(channel, _mgr.AudioFilename(),
+ true, mixWithMic));
+ }
+ if (localFile) {
+ std::string inputFile = webrtc::test::OutputPath() + "audio_short16.pcm";
+ CHECK(file->StartPlayingFileLocally(channel,
+ inputFile.c_str(),
+ false,
+ kFileFormatPcm16kHzFile));
+ }
+
+ return 0;
+}
+
+// ----------------------------------------------------------------------------
+// StopMedia
+// ----------------------------------------------------------------------------
+
+int VoEUnitTest::StopMedia(int channel) {
+ VoEBase* base = _mgr.BasePtr();
+ VoEFile* file = _mgr.FilePtr();
+
+ if (file->IsPlayingFileAsMicrophone(channel)) {
+ CHECK(file->StopPlayingFileAsMicrophone(channel));
+ }
+ if (file->IsPlayingFileLocally(channel)) {
+ CHECK(file->StopPlayingFileLocally(channel));
+ }
+ if (_listening[channel]) {
+ _listening[channel] = false;
+ CHECK(base->StopReceive(channel));
+ }
+ if (_playing[channel]) {
+ _playing[channel] = false;
+ CHECK(base->StopPlayout(channel));
+ }
+ if (_sending[channel]) {
+ _sending[channel] = false;
+ CHECK(base->StopSend(channel));
+ }
+
+ return 0;
+}
+
+void VoEUnitTest::Sleep(unsigned int timeMillisec, bool addMarker) {
+ if (addMarker) {
+ float dtSec = (float) ((float) timeMillisec / 1000.0);
+ printf("[dT=%.1f]", dtSec);
+ fflush(NULL);
+ }
+ ::Sleep(timeMillisec);
+}
+
+void VoEUnitTest::Wait() {
+#if defined(_WIN32)
+ printf("\npress any key..."); fflush(NULL);
+ _getch();
+#endif
+}
+
+void VoEUnitTest::Test(const char* msg) {
+ printf("%s", msg);
+ fflush(NULL);
+ printf("\n");
+ fflush(NULL);
+}
+
+int VoEUnitTest::MixerTest() {
+ // Set up test parameters first
+ //
+ const int testTime(dTBetweenEachTest);
+
+ printf("\n\n================================================\n");
+ printf(" Mixer Unit Test\n");
+ printf("================================================\n\n");
+
+ // Get sub-API pointers
+ //
+ VoEBase* base = _mgr.BasePtr();
+ VoECodec* codec = _mgr.CodecPtr();
+ VoEFile* file = _mgr.FilePtr();
+ VoEVolumeControl* volume = _mgr.VolumeControlPtr();
+ VoEEncryption* encrypt = _mgr.EncryptionPtr();
+ VoEDtmf* dtmf = _mgr.DtmfPtr();
+ VoEExternalMedia* xmedia = _mgr.ExternalMediaPtr();
+
+ // Set trace
+ //
+ std::string outputDir = webrtc::test::OutputPath();
+ std::string traceFile = outputDir + "UnitTest_Mixer_trace.txt";
+ VoiceEngine::SetTraceFile(outputDir.c_str());
+ VoiceEngine::SetTraceFilter(kTraceStateInfo | kTraceWarning | kTraceError |
+ kTraceCritical | kTraceApiCall | kTraceMemory |
+ kTraceInfo);
+
+ // Init
+ //
+ CHECK(base->Init());
+
+ // 8 kHz
+ // CodecInst l16_8 = { 123, "L16", 8000, 160, 1, 128000 };
+ CodecInst pcmu_8 = { 0, "pcmu", 8000, 160, 1, 64000 };
+ // CodecInst g729_8 = { 18, "g729", 8000, 160, 1, 8000 };
+
+ // 16 kHz
+ CodecInst ipcmwb_16 = { 97, "ipcmwb", 16000, 320, 1, 80000 };
+ CodecInst l16_16 = { 124, "L16", 16000, 320, 1, 256000 };
+
+ // 32 kHz
+ CodecInst l16_32 = { 125, "L16", 32000, 320, 1, 512000 };
+ CodecInst g722_1c_32 = { 126, "G7221", 32000, 640, 1, 32000 };// 20ms@32kHz
+
+ // ------------------------
+ // Verify mixing frequency
+ // ------------------------
+
+ base->CreateChannel();
+
+ Test(">> Verify correct mixing frequency:\n");
+
+ Test("(ch 0) Sending file at 8kHz <=> mixing at 8kHz...");
+ CHECK(StartMedia(0, 12345, true, true, true, true, false));
+ Sleep(testTime);
+
+ Test("(ch 0) Sending file at 16kHz <=> mixing at 16kHz...");
+ CHECK(codec->SetSendCodec(0, ipcmwb_16));
+ Sleep(testTime);
+
+ Test("(ch 0) Sending speech at 32kHz <=> mixing at 32Hz...");
+ CHECK(codec->SetSendCodec(0, l16_32));
+ Sleep(testTime);
+
+ Test("(ch 0) Sending file at 8kHz <=> mixing at 8kHz...");
+ CHECK(codec->SetSendCodec(0, pcmu_8));
+ Sleep(testTime);
+
+ Test("(ch 0) Playing 16kHz file locally <=> mixing at 16kHz...");
+ std::string inputFile = outputDir + "audio_long16.pcm";
+ CHECK(file->StartPlayingFileLocally(0, inputFile.c_str(),
+ false, kFileFormatPcm16kHzFile));
+ Sleep(testTime);
+ CHECK(file->StopPlayingFileLocally(0));
+
+ base->CreateChannel();
+
+ Test("(ch 0) Sending file at 8kHz <=> mixing at 8kHz...");
+ CHECK(codec->SetSendCodec(0, pcmu_8));
+ Sleep(testTime);
+
+ Test("(ch 0) Sending speech at 32kHz <=> mixing at 32Hz...");
+ CHECK(codec->SetSendCodec(0, l16_32));
+ Sleep(testTime);
+
+ Test("(ch 1) Playing 16kHz file locally <=> mixing at 32kHz...");
+ CHECK(StartMedia(1, 54321, false, true, false, false, true));
+ Sleep(testTime);
+
+ CHECK(StopMedia(1));
+ CHECK(StopMedia(0));
+
+ base->DeleteChannel(1);
+ base->DeleteChannel(0);
+ ANL();
+
+ // -------------------------
+ // Verify stereo mode mixing
+ // -------------------------
+
+ base->CreateChannel();
+ base->CreateChannel();
+
+ // SetOutputVolumePan
+ //
+ // Ensure that all cases sound OK and that the mixer changes state between
+ // mono and stereo as it should. A debugger is required to trace the state
+ // transitions.
+
+ Test(">> Verify correct mixing in stereo using SetOutputVolumePan():\n");
+
+ Test("(ch 0) Playing 16kHz file locally <=> mixing in mono @ 16kHz...");
+ CHECK(StartMedia(0, 12345, false, true, false, false, true));
+ Sleep(testTime);
+ Test("Panning volume to the left <=> mixing in stereo @ 16kHz...");
+ CHECK(volume->SetOutputVolumePan(-1, 1.0, 0.0));
+ Sleep(testTime);
+ Test("Panning volume to the right <=> mixing in stereo @ 16kHz...");
+ CHECK(volume->SetOutputVolumePan(-1, 0.0, 1.0));
+ Sleep(testTime);
+ Test("Back to center volume again <=> mixing in mono @ 16kHz...");
+ CHECK(volume->SetOutputVolumePan(-1, 1.0, 1.0));
+ Sleep(testTime);
+ Test("(ch 1) Playing 16kHz file locally <=> mixing in mono @ 16kHz...");
+ CHECK(StartMedia(1, 54321, false, true, false, false, true));
+ Sleep(testTime);
+ Test("Panning volume to the left <=> mixing in stereo @ 16kHz...");
+ CHECK(volume->SetOutputVolumePan(-1, 1.0, 0.0));
+ Sleep(testTime);
+ Test("Back to center volume again <=> mixing in mono @ 16kHz...");
+ CHECK(volume->SetOutputVolumePan(-1, 1.0, 1.0));
+ Sleep(testTime);
+ Test("(ch 1) Stopped playing file <=> mixing in mono @ 16kHz...");
+ CHECK(StopMedia(1));
+ Sleep(testTime);
+ CHECK(StopMedia(0));
+ Test("(ch 0) Sending file at 8kHz <=> mixing at 8kHz...");
+ CHECK(StartMedia(0, 12345, true, true, true, true, false));
+ Sleep(testTime);
+ Test("(ch 0) Sending speech at 32kHz <=> mixing at 32kHz...");
+ CHECK(codec->SetSendCodec(0, l16_32));
+ Sleep(testTime);
+ Test("Panning volume to the right <=> mixing in stereo @ 32kHz...");
+ CHECK(volume->SetOutputVolumePan(-1, 0.0, 1.0));
+ Sleep(testTime);
+ Test("Back to center volume again <=> mixing in mono @ 32kHz...");
+ CHECK(volume->SetOutputVolumePan(-1, 1.0, 1.0));
+ Sleep(testTime);
+ CHECK(StopMedia(0));
+ ANL();
+
+ base->DeleteChannel(0);
+ base->DeleteChannel(1);
+
+ // SetChannelOutputVolumePan
+ //
+ // Ensure that all cases sound OK and that the mixer changes state between
+ // mono and stereo as it should. A debugger is required to trace the state
+ // transitions.
+
+ base->CreateChannel();
+ base->CreateChannel();
+
+ Test(">> Verify correct mixing in stereo using"
+ " SetChannelOutputVolumePan():\n");
+
+ Test("(ch 0) Playing 16kHz file locally <=> mixing in mono @ 16kHz...");
+ CHECK(StartMedia(0, 12345, false, true, false, false, true));
+ Sleep(testTime);
+ Test("(ch 0) Panning channel volume to the left <=> mixing in stereo @ "
+ "16kHz...");
+ CHECK(volume->SetOutputVolumePan(0, 1.0, 0.0));
+ Sleep(testTime);
+ Test("(ch 0) Panning channel volume to the right <=> mixing in stereo"
+ " @ 16kHz...");
+ CHECK(volume->SetOutputVolumePan(0, 0.0, 1.0));
+ Sleep(testTime);
+ Test("(ch 0) Back to center volume again <=> mixing in mono @"
+ " 16kHz...");
+ CHECK(volume->SetOutputVolumePan(0, 1.0, 1.0));
+ Sleep(testTime);
+ Test("(ch 1) Playing 16kHz file locally <=> mixing in mono @ 16kHz...");
+ CHECK(StartMedia(1, 54321, false, true, false, false, true));
+ Sleep(testTime);
+ Test("(ch 1) Panning channel volume to the left <=> mixing in stereo "
+ "@ 16kHz...");
+ CHECK(volume->SetOutputVolumePan(1, 1.0, 0.0));
+ Sleep(testTime);
+ Test("(ch 1) Back to center volume again <=> mixing in mono @ 16kHz...");
+ CHECK(volume->SetOutputVolumePan(1, 1.0, 1.0));
+ Sleep(testTime);
+ Test("(ch 1) Stopped playing file <=> mixing in mono @ 16kHz...");
+ CHECK(StopMedia(1));
+ Sleep(testTime);
+ CHECK(StopMedia(0));
+ ANL();
+
+ base->DeleteChannel(0);
+ base->DeleteChannel(1);
+
+ // Emulate stereo-encoding using Encryption
+ //
+ // Modify the transmitted RTP stream by using external encryption.
+ // Supports frame based and sample based "stereo-encoding schemes".
+
+ base->CreateChannel();
+
+ Test(">> Verify correct mixing in stereo using emulated stereo input:\n");
+
+ // enable external encryption
+ CHECK(encrypt->RegisterExternalEncryption(0, *this));
+ Test("(ch 0) External Encryption is now enabled:");
+
+ Test("(ch 0) Sending file at 8kHz <=> mixing in mono @ 8kHz...");
+ CHECK(StartMedia(0, 12345, true, true, true, true, false));
+ Sleep(testTime);
+
+ // switch to 16kHz (L16) sending codec
+ CHECK(codec->SetSendCodec(0, l16_16));
+ Test("(ch 0) Sending file at 16kHz (L16) <=> mixing in mono @ 16kHz...");
+ Sleep(testTime);
+
+ // register L16 as 2-channel codec on receiving side =>
+ // should sound bad since RTP module splits all received packets in half
+ // (sample based)
+ CHECK(base->StopPlayout(0));
+ CHECK(base->StopReceive(0));
+ l16_16.channels = 2;
+ CHECK(codec->SetRecPayloadType(0, l16_16));
+ CHECK(base->StartReceive(0));
+ CHECK(base->StartPlayout(0));
+ Test("(ch 0) 16kHz L16 is now registered as 2-channel codec on RX side => "
+ "should sound bad...");
+ Sleep(testTime);
+
+ // emulate sample-based stereo encoding
+ Test("(ch 0) Emulate sample-based stereo encoding on sending side => "
+ "should sound OK...");
+ SetStereoExternalEncryption(0, true, 16);
+ Sleep(testTime);
+ Test("(ch 0) Stop emulating sample-based stereo encoding on sending side =>"
+ " should sound bad...");
+ SetStereoExternalEncryption(0, false, 16);
+ Sleep(testTime);
+ Test("(ch 0) Emulate sample-based stereo encoding on sending side => "
+ "should sound OK...");
+ SetStereoExternalEncryption(0, true, 16);
+ Sleep(testTime);
+
+ // switch to 32kHz (L16) sending codec and disable stereo encoding
+ CHECK(codec->SetSendCodec(0, l16_32));
+ SetStereoExternalEncryption(0, false, 16);
+ Test("(ch 0) Sending file and spech at 32kHz (L16) <=> mixing in mono @ "
+ "32kHz...");
+ Sleep(testTime);
+
+ // register L16 32kHz as 2-channel codec on receiving side
+ CHECK(base->StopPlayout(0));
+ CHECK(base->StopReceive(0));
+ l16_32.channels = 2;
+ CHECK(codec->SetRecPayloadType(0, l16_32));
+ CHECK(base->StartReceive(0));
+ CHECK(base->StartPlayout(0));
+ Test("(ch 0) 32kHz L16 is now registered as 2-channel codec on RX side =>"
+ " should sound bad...");
+ Sleep(testTime);
+
+ // emulate sample-based stereo encoding
+ Test("(ch 0) Emulate sample-based stereo encoding on sending side =>"
+ " should sound OK...");
+ SetStereoExternalEncryption(0, true, 16);
+ Sleep(testTime);
+
+ StopMedia(0);
+ l16_32.channels = 1;
+
+ // disable external encryption
+ CHECK(encrypt->DeRegisterExternalEncryption(0));
+ ANL();
+
+ base->DeleteChannel(0);
+
+ // ------------------
+ // Verify put-on-hold
+ // ------------------
+
+ base->CreateChannel();
+ base->CreateChannel();
+
+ Test(">> Verify put-on-hold functionality:\n");
+
+ Test("(ch 0) Sending at 8kHz...");
+ CHECK(StartMedia(0, 12345, true, true, true, true, false));
+ Sleep(testTime);
+
+ CHECK(base->SetOnHoldStatus(0, true, kHoldPlayOnly));
+ Test("(ch 0) Playout is now on hold...");
+ Sleep(testTime);
+ CHECK(base->SetOnHoldStatus(0, false, kHoldPlayOnly));
+ Test("(ch 0) Playout is now enabled again...");
+ Sleep(testTime);
+
+ Test("(ch 0) Sending at 16kHz...");
+ l16_16.channels = 1;
+ CHECK(codec->SetSendCodec(0, l16_16));
+ Sleep(testTime);
+
+ CHECK(base->SetOnHoldStatus(0, true, kHoldPlayOnly));
+ Test("(ch 0) Playout is now on hold...");
+ Sleep(testTime);
+ CHECK(base->SetOnHoldStatus(0, false, kHoldPlayOnly));
+ Test("(ch 0) Playout is now enabled again...");
+ Sleep(testTime);
+
+ Test("(ch 0) Perform minor panning to the left to force mixing in"
+ " stereo...");
+ CHECK(volume->SetOutputVolumePan(0, (float)1.0, (float)0.7));
+ Sleep(testTime);
+
+ CHECK(base->SetOnHoldStatus(0, true, kHoldPlayOnly));
+ Test("(ch 0) Playout is now on hold...");
+ Sleep(testTime);
+ CHECK(base->SetOnHoldStatus(0, false, kHoldPlayOnly));
+ Test("(ch 0) Playout is now enabled again...");
+ Sleep(testTime);
+
+ Test("(ch 0) Back to center volume again...");
+ CHECK(volume->SetOutputVolumePan(0, 1.0, 1.0));
+ Sleep(testTime);
+
+ Test("(ch 1) Add 16kHz local file to the mixer...");
+ CHECK(StartMedia(1, 54321, false, true, false, false, true));
+ Sleep(testTime);
+
+ CHECK(base->SetOnHoldStatus(0, true, kHoldPlayOnly));
+ Test("(ch 0) Playout is now on hold...");
+ Sleep(testTime);
+ CHECK(base->SetOnHoldStatus(1, true, kHoldPlayOnly));
+ Test("(ch 1) Playout is now on hold => should be silent...");
+ Sleep(testTime);
+ CHECK(base->SetOnHoldStatus(0, false, kHoldPlayOnly));
+ Test("(ch 0) Playout is now enabled again...");
+ CHECK(base->SetOnHoldStatus(1, false, kHoldPlayOnly));
+ Test("(ch 1) Playout is now enabled again...");
+ Sleep(testTime);
+ StopMedia(1);
+ Test("(ch 1) Stopped playing file...");
+ Sleep(testTime);
+ StopMedia(0);
+ ANL();
+
+ base->DeleteChannel(0);
+ base->DeleteChannel(1);
+
+ // -----------------------------------
+ // Verify recording of playout to file
+ // -----------------------------------
+
+ // StartRecordingPlayout
+ //
+ // Verify that the correct set of signals is recorded in the mixer.
+ // Record each channel and all channels (-1) to ensure that post and pre
+ // mixing recording works.
+
+ base->CreateChannel();
+ base->CreateChannel();
+
+ Test(">> Verify file-recording functionality:\n");
+
+ Test("(ch 0) Sending at 8kHz...");
+ CHECK(StartMedia(0, 12345, true, true, true, true, false));
+ Sleep(testTime);
+
+ Test("(ch 0) Recording of playout to 16kHz PCM file...");
+
+ std::string recordedPlayoutFile = webrtc::test::OutputPath() +
+ "RecordedPlayout16kHz.pcm";
+ CHECK(file->StartRecordingPlayout(
+ 0, recordedPlayoutFile.c_str(), NULL));
+ Sleep(testTime);
+ CHECK(file->StopRecordingPlayout(0));
+
+ Test("(ch 0) Playing out the recorded file...");
+ CHECK(volume->SetInputMute(0, true));
+ CHECK(file->StartPlayingFileLocally(
+ 0, recordedPlayoutFile.c_str()));
+ Sleep(testTime);
+ CHECK(file->StopPlayingFileLocally(0));
+ CHECK(volume->SetInputMute(0, false));
+
+ CHECK(codec->SetSendCodec(0, l16_16));
+ Test("(ch 0) Sending at 16kHz (L16)...");
+ Sleep(testTime);
+
+ Test("(ch 0) Recording of playout to 16kHz PCM file...");
+ CHECK(file->StartRecordingPlayout(
+ 0, recordedPlayoutFile.c_str(), NULL));
+ Sleep(testTime);
+ CHECK(file->StopRecordingPlayout(0));
+
+ Test("(ch 0) Playing out the recorded file...");
+ CHECK(volume->SetInputMute(0, true));
+ CHECK(file->StartPlayingFileLocally(
+ 0, recordedPlayoutFile.c_str()));
+ Sleep(testTime);
+ CHECK(file->StopPlayingFileLocally(0));
+ CHECK(volume->SetInputMute(0, false));
+
+ CHECK(codec->SetSendCodec(0, l16_32));
+ Test("(ch 0) Sending at 32kHz (L16)...");
+ Sleep(testTime);
+
+ Test("(ch 0) Recording of playout to 16kHz PCM file...");
+ CHECK(file->StartRecordingPlayout(
+ 0, recordedPlayoutFile.c_str(), NULL));
+ Sleep(testTime);
+ CHECK(file->StopRecordingPlayout(0));
+
+ Test("(ch 0) Playing out the recorded file...");
+ CHECK(volume->SetInputMute(0, true));
+ CHECK(file->StartPlayingFileLocally(
+ 0, recordedPlayoutFile.c_str()));
+ Sleep(testTime);
+ CHECK(file->StopPlayingFileLocally(0));
+ CHECK(volume->SetInputMute(0, false));
+
+ Test("(ch 0) Sending at 16kHz without file as mic but file added on the"
+ " playout side instead...");
+ CHECK(StopMedia(0));
+ CHECK(StartMedia(0, 12345, false, true, false, false, true));
+ CHECK(codec->SetSendCodec(0, l16_16));
+ Sleep(testTime);
+
+ Test("(ch 0) Recording of playout to 16kHz PCM file...");
+ CHECK(file->StartRecordingPlayout(
+ 0, recordedPlayoutFile.c_str(), NULL));
+ Sleep(testTime);
+ CHECK(file->StopRecordingPlayout(0));
+ CHECK(file->StopPlayingFileLocally(0));
+
+ Test("(ch 0) Playing out the recorded file...");
+ CHECK(file->StartPlayingFileLocally(
+ 0, recordedPlayoutFile.c_str()));
+ Sleep(testTime);
+ CHECK(file->StopPlayingFileLocally(0));
+
+ CHECK(StopMedia(0));
+ CHECK(StopMedia(1));
+
+ Test("(ch 0) Sending at 16kHz...");
+ CHECK(StartMedia(0, 12345, true, true, true, false, false));
+ CHECK(codec->SetSendCodec(0, l16_16));
+ Test("(ch 1) Adding playout file...");
+ CHECK(StartMedia(1, 33333, false, true, false, false, true));
+ Sleep(testTime);
+
+ Test("(ch -1) Speak while recording all channels to add mixer input on "
+ "channel 0...");
+ CHECK(file->StartRecordingPlayout(
+ -1, recordedPlayoutFile.c_str(), NULL));
+ Sleep(testTime);
+ CHECK(file->StopRecordingPlayout(-1));
+ CHECK(file->StopPlayingFileLocally(1));
+
+ Test("(ch 0) Playing out the recorded file...");
+ CHECK(volume->SetInputMute(0, true));
+ CHECK(file->StartPlayingFileLocally(
+ 0, recordedPlayoutFile.c_str()));
+ Sleep(testTime);
+ CHECK(file->StopPlayingFileLocally(0));
+ CHECK(volume->SetInputMute(0, false));
+
+ CHECK(StopMedia(0));
+ CHECK(StopMedia(1));
+ ANL();
+
+ // StartRecordingPlayoutStereo
+
+ Test(">> Verify recording of playout in stereo:\n");
+
+ Test("(ch 0) Sending at 32kHz...");
+ CHECK(codec->SetSendCodec(0, l16_16));
+ CHECK(StartMedia(0, 12345, true, true, true, true, false));
+ Sleep(testTime);
+
+ Test("Modified master balance (L=10%%, R=100%%) to force stereo mixing...");
+ CHECK(volume->SetOutputVolumePan(-1, (float)0.1, (float)1.0));
+ Sleep(testTime);
+
+ /*
+ Test("Recording of left and right channel playout to two 16kHz PCM "
+ "files...");
+ file->StartRecordingPlayoutStereo(
+ GetFilename("RecordedPlayout_Left_16kHz.pcm"),
+ GetFilename("RecordedPlayout_Right_16kHz.pcm"), StereoBoth);
+ Sleep(testTime);
+ Test("Back to center volume again...");
+ CHECK(volume->SetOutputVolumePan(-1, (float)1.0, (float)1.0));
+ */
+
+ Test("(ch 0) Playing out the recorded file for the left channel (10%%)...");
+ CHECK(volume->SetInputMute(0, true));
+ std::string leftFilename = outputDir + "RecordedPlayout_Left_16kHz.pcm";
+ CHECK(file->StartPlayingFileLocally(0, leftFilename.c_str()));
+ Sleep(testTime);
+ CHECK(file->StopPlayingFileLocally(0));
+
+ Test("(ch 0) Playing out the recorded file for the right channel (100%%) =>"
+ " should sound louder than the left channel...");
+ std::string rightFilename = outputDir + "RecordedPlayout_Right_16kHz.pcm";
+ CHECK(file->StartPlayingFileLocally(0, rightFilename.c_str()));
+ Sleep(testTime);
+ CHECK(file->StopPlayingFileLocally(0));
+ CHECK(volume->SetInputMute(0, false));
+
+ base->DeleteChannel(0);
+ base->DeleteChannel(1);
+ ANL();
+
+ // ---------------------------
+ // Verify inserted Dtmf tones
+ // ---------------------------
+
+ Test(">> Verify Dtmf feedback functionality:\n");
+
+ base->CreateChannel();
+
+ for (int i = 0; i < 2; i++) {
+ if (i == 0)
+ Test("Dtmf direct feedback is now enabled...");
+ else
+ Test("Dtmf direct feedback is now disabled...");
+
+ CHECK(dtmf->SetDtmfFeedbackStatus(true, (i==0)));
+
+ Test("(ch 0) Sending at 32kHz using G.722.1C...");
+ CHECK(codec->SetRecPayloadType(0, g722_1c_32));
+ CHECK(codec->SetSendCodec(0, g722_1c_32));
+ CHECK(StartMedia(0, 12345, true, true, true, false, false));
+ Sleep(500);
+
+ Test("(ch 0) Sending outband Dtmf events => ensure that they are added"
+ " to the mixer...");
+ // ensure that receiver will not play out outband Dtmf
+ CHECK(dtmf->SetSendTelephoneEventPayloadType(0, 118));
+ CHECK(dtmf->SendTelephoneEvent(0, 9, true, 390));
+ Sleep(500);
+ CHECK(dtmf->SendTelephoneEvent(0, 1, true, 390));
+ Sleep(500);
+ CHECK(dtmf->SendTelephoneEvent(0, 5, true, 390));
+ Sleep(500);
+ Sleep(testTime - 1500);
+
+ Test("(ch 0) Changing codec to 8kHz PCMU...");
+ CHECK(codec->SetSendCodec(0, pcmu_8));
+ Sleep(500);
+
+ Test("(ch 0) Sending outband Dtmf events => ensure that they are added"
+ " to the mixer...");
+ CHECK(dtmf->SendTelephoneEvent(0, 9, true, 390));
+ Sleep(500);
+ CHECK(dtmf->SendTelephoneEvent(0, 1, true, 390));
+ Sleep(500);
+ CHECK(dtmf->SendTelephoneEvent(0, 5, true, 390));
+ Sleep(500);
+ Sleep(testTime - 1500);
+
+ Test("(ch 0) Changing codec to 16kHz L16...");
+ CHECK(codec->SetSendCodec(0, l16_16));
+ Sleep(500);
+
+ Test("(ch 0) Sending outband Dtmf events => ensure that they are added"
+ " to the mixer...");
+ CHECK(dtmf->SendTelephoneEvent(0, 9, true, 390));
+ Sleep(500);
+ CHECK(dtmf->SendTelephoneEvent(0, 1, true, 390));
+ Sleep(500);
+ CHECK(dtmf->SendTelephoneEvent(0, 5, true, 390));
+ Sleep(500);
+ Sleep(testTime - 1500);
+
+ StopMedia(0);
+ ANL();
+ }
+
+ base->DeleteChannel(0);
+
+ // ---------------------------
+ // Verify external processing
+ // --------------------------
+
+ base->CreateChannel();
+
+ Test(">> Verify external media processing:\n");
+
+ Test("(ch 0) Playing 16kHz file locally <=> mixing in mono @ 16kHz...");
+ CHECK(StartMedia(0, 12345, false, true, false, false, true));
+ Sleep(testTime);
+ Test("Enabling playout external media processing => played audio should "
+ "now be affected");
+ CHECK(xmedia->RegisterExternalMediaProcessing(
+ 0, kPlaybackAllChannelsMixed, mpobj));
+ Sleep(testTime);
+ Test("(ch 0) Sending speech at 32kHz <=> mixing at 32kHz...");
+ CHECK(codec->SetSendCodec(0, l16_32));
+ Sleep(testTime);
+ printf("Back to normal again\n");
+ CHECK(xmedia->DeRegisterExternalMediaProcessing(0,
+ kPlaybackAllChannelsMixed));
+ Sleep(testTime);
+ printf("Enabling playout external media processing on ch 0 => "
+ "played audio should now be affected\n");
+ CHECK(xmedia->RegisterExternalMediaProcessing(0, kPlaybackPerChannel,
+ mpobj));
+ Sleep(testTime);
+ Test("Panning volume to the right <=> mixing in stereo @ 32kHz...");
+ CHECK(volume->SetOutputVolumePan(-1, 0.0, 1.0));
+ Sleep(testTime);
+ Test("Back to center volume again <=> mixing in mono @ 32kHz...");
+ CHECK(volume->SetOutputVolumePan(-1, 1.0, 1.0));
+ Sleep(testTime);
+ printf("Back to normal again\n");
+ CHECK(xmedia->DeRegisterExternalMediaProcessing(0, kPlaybackPerChannel));
+ Sleep(testTime);
+ CHECK(StopMedia(0));
+ ANL();
+
+ base->DeleteChannel(0);
+
+ // --------------------------------------------------
+ // Extended tests of emulated stereo encoding schemes
+ // --------------------------------------------------
+
+ CodecInst PCMU;
+ CodecInst G729;
+ CodecInst L16_8;
+ CodecInst L16_16;
+ CodecInst L16_32;
+
+ base->CreateChannel();
+
+ Test(">> Verify emulated stereo encoding for differenct codecs:\n");
+
+ // enable external encryption
+ CHECK(encrypt->RegisterExternalEncryption(0, *this));
+ Test("(ch 0) External Encryption is now enabled:");
+
+ // register all codecs on the receiving side
+ strcpy(PCMU.plname, "PCMU");
+ PCMU.channels = 2;
+ PCMU.pacsize = 160;
+ PCMU.plfreq = 8000;
+ PCMU.pltype = 125;
+ PCMU.rate = 64000;
+ CHECK(codec->SetRecPayloadType(0, PCMU));
+
+ strcpy(G729.plname, "G729");
+ G729.channels = 2;
+ G729.pacsize = 160;
+ G729.plfreq = 8000;
+ G729.pltype = 18;
+ G729.rate = 8000;
+ CHECK(codec->SetRecPayloadType(0, G729));
+
+ strcpy(L16_8.plname, "L16");
+ L16_8.channels = 2;
+ L16_8.pacsize = 160;
+ L16_8.plfreq = 8000;
+ L16_8.pltype = 120;
+ L16_8.rate = 128000;
+ CHECK(codec->SetRecPayloadType(0, L16_8));
+
+ strcpy(L16_16.plname, "L16");
+ L16_16.channels = 2;
+ L16_16.pacsize = 320;
+ L16_16.plfreq = 16000;
+ L16_16.pltype = 121;
+ L16_16.rate = 256000;
+ CHECK(codec->SetRecPayloadType(0, L16_16));
+
+ // NOTE - we cannot send larger than 1500 bytes per RTP packet
+ strcpy(L16_32.plname, "L16");
+ L16_32.channels = 2;
+ L16_32.pacsize = 320;
+ L16_32.plfreq = 32000;
+ L16_32.pltype = 122;
+ L16_32.rate = 512000;
+ CHECK(codec->SetRecPayloadType(0, L16_32));
+
+ // sample-based, 8-bits per sample
+
+ Test("(ch 0) Sending using G.711 (sample based, 8 bits/sample)...");
+ PCMU.channels = 1;
+ CHECK(codec->SetSendCodec(0, PCMU));
+ SetStereoExternalEncryption(0, true, 8);
+ CHECK(StartMedia(0, 12345, true, true, true, true, false));
+ Sleep(testTime);
+
+ // sample-based, 16-bits per sample
+
+ Test("(ch 0) Sending using L16 8kHz (sample based, 16 bits/sample)...");
+ L16_8.channels = 1;
+ CHECK(codec->SetSendCodec(0, L16_8));
+ SetStereoExternalEncryption(0, true, 16);
+ Sleep(testTime);
+
+ Test("(ch 0) Sending using L16 16kHz (sample based, 16 bits/sample)...");
+ L16_16.channels = 1;
+ CHECK(codec->SetSendCodec(0, L16_16));
+ Sleep(testTime);
+
+ Test("(ch 0) Sending using L16 32kHz (sample based, 16 bits/sample)...");
+ L16_32.channels = 1;
+ CHECK(codec->SetSendCodec(0, L16_32));
+ Sleep(testTime);
+
+ Test("(ch 0) Sending using G.729 (frame based)...");
+ G729.channels = 1;
+ CHECK(codec->SetSendCodec(0, G729));
+ Sleep(testTime);
+
+ StopMedia(0);
+
+ // disable external encryption
+ CHECK(encrypt->DeRegisterExternalEncryption(0));
+
+ base->DeleteChannel(0);
+
+ // ------------------------------------------------------------------------
+ CHECK(base->Terminate());
+
+ printf("\n\n------------------------------------------------\n");
+ printf(" Test passed!\n");
+ printf("------------------------------------------------\n\n");
+
+ return 0;
+}
+
+} // namespace voetest
diff --git a/voice_engine/test/auto_test/voe_unit_test.h b/voice_engine/test/auto_test/voe_unit_test.h
new file mode 100644
index 0000000..346713a
--- /dev/null
+++ b/voice_engine/test/auto_test/voe_unit_test.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_UNIT_TEST_H
+#define WEBRTC_VOICE_ENGINE_VOE_UNIT_TEST_H
+
+#include "voice_engine/test/auto_test/voe_standard_test.h"
+
+namespace voetest {
+
+class VoETestManager;
+
+class VoEUnitTest : public Encryption {
+ public:
+ VoEUnitTest(VoETestManager& mgr);
+ ~VoEUnitTest() {}
+ int DoTest();
+
+ protected:
+ // Encryption
+ void encrypt(int channel_no, unsigned char * in_data,
+ unsigned char * out_data, int bytes_in, int * bytes_out);
+ void decrypt(int channel_no, unsigned char * in_data,
+ unsigned char * out_data, int bytes_in, int * bytes_out);
+ void encrypt_rtcp(int channel_no, unsigned char * in_data,
+ unsigned char * out_data, int bytes_in, int * bytes_out);
+ void decrypt_rtcp(int channel_no, unsigned char * in_data,
+ unsigned char * out_data, int bytes_in, int * bytes_out);
+
+ private:
+ int MenuSelection();
+ int MixerTest();
+ void Sleep(unsigned int timeMillisec, bool addMarker = false);
+ void Wait();
+ int StartMedia(int channel,
+ int rtpPort,
+ bool listen,
+ bool playout,
+ bool send,
+ bool fileAsMic,
+ bool localFile);
+ int StopMedia(int channel);
+ void Test(const char* msg);
+ void SetStereoExternalEncryption(int channel, bool onOff, int bitsPerSample);
+
+ private:
+ VoETestManager& _mgr;
+
+ private:
+ bool _listening[32];
+ bool _playing[32];
+ bool _sending[32];
+
+ private:
+ bool _extOnOff;
+ int _extBitsPerSample;
+ int _extChannel;
+};
+
+} // namespace voetest
+#endif // WEBRTC_VOICE_ENGINE_VOE_UNIT_TEST_H
diff --git a/voice_engine/test/cmd_test/Android.mk b/voice_engine/test/cmd_test/Android.mk
new file mode 100644
index 0000000..f1a4f1a
--- /dev/null
+++ b/voice_engine/test/cmd_test/Android.mk
@@ -0,0 +1,60 @@
+# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+LOCAL_PATH:= $(call my-dir)
+
+# voice engine test app
+
+include $(CLEAR_VARS)
+
+include $(LOCAL_PATH)/../../../../../android-webrtc.mk
+
+LOCAL_MODULE_TAGS := tests
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_SRC_FILES:= \
+ voe_cmd_test.cc
+
+# Flags passed to both C and C++ files.
+LOCAL_CFLAGS := \
+ '-DWEBRTC_TARGET_PC' \
+ '-DWEBRTC_ANDROID' \
+ '-DDEBUG'
+
+LOCAL_C_INCLUDES := \
+ $(LOCAL_PATH)/../../interface \
+ $(LOCAL_PATH)/../../../.. \
+ $(LOCAL_PATH)/../../../../.. \
+ external/gtest/include \
+ frameworks/base/include
+
+LOCAL_SHARED_LIBRARIES := \
+ libutils \
+ libmedia \
+ libcamera_client \
+ libgui \
+ libhardware \
+ libandroid_runtime \
+ libbinder
+
+#libwilhelm.so libDunDef-Android.so libbinder.so libsystem_server.so
+
+LOCAL_MODULE:= webrtc_voe_cmd
+
+ifdef NDK_ROOT
+LOCAL_SHARED_LIBRARIES += \
+ libstlport_shared \
+ libwebrtc-voice-jni \
+ libwebrtc_audio_preprocessing
+include $(BUILD_EXECUTABLE)
+else
+LOCAL_SHARED_LIBRARIES += \
+ libstlport \
+ libwebrtc
+include external/stlport/libstlport.mk
+include $(BUILD_NATIVE_TEST)
+endif
diff --git a/voice_engine/test/cmd_test/voe_cmd_test.cc b/voice_engine/test/cmd_test/voe_cmd_test.cc
new file mode 100644
index 0000000..8753002
--- /dev/null
+++ b/voice_engine/test/cmd_test/voe_cmd_test.cc
@@ -0,0 +1,924 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "test/testsupport/fileutils.h"
+
+#include "voe_errors.h"
+#include "voe_base.h"
+#include "voe_codec.h"
+#include "voe_volume_control.h"
+#include "voe_dtmf.h"
+#include "voe_rtp_rtcp.h"
+#include "voe_audio_processing.h"
+#include "voe_file.h"
+#include "voe_video_sync.h"
+#include "voe_encryption.h"
+#include "voe_hardware.h"
+#include "voe_external_media.h"
+#include "voe_network.h"
+#include "voe_neteq_stats.h"
+#include "engine_configurations.h"
+
+// Enable this this flag to run this test with hard coded
+// IP/Port/codec and start test automatically with key input
+// it could be useful in repeat tests.
+//#define DEBUG
+
+// #define EXTERNAL_TRANSPORT
+
+using namespace webrtc;
+
+#define VALIDATE \
+ if (res != 0) \
+ { \
+ printf("*** Error at position %i / line %i \n", cnt, __LINE__); \
+ printf("*** Error code = %i \n", base1->LastError()); \
+ } \
+ cnt++;
+
+VoiceEngine* m_voe = NULL;
+VoEBase* base1 = NULL;
+VoECodec* codec = NULL;
+VoEVolumeControl* volume = NULL;
+VoEDtmf* dtmf = NULL;
+VoERTP_RTCP* rtp_rtcp = NULL;
+VoEAudioProcessing* apm = NULL;
+VoENetwork* netw = NULL;
+VoEFile* file = NULL;
+VoEVideoSync* vsync = NULL;
+VoEEncryption* encr = NULL;
+VoEHardware* hardware = NULL;
+VoEExternalMedia* xmedia = NULL;
+VoENetEqStats* neteqst = NULL;
+
+void RunTest(std::string out_path);
+
+#ifdef EXTERNAL_TRANSPORT
+
+class my_transportation : public Transport
+{
+ int SendPacket(int channel,const void *data,int len);
+ int SendRTCPPacket(int channel, const void *data, int len);
+};
+
+int my_transportation::SendPacket(int channel,const void *data,int len)
+{
+ netw->ReceivedRTPPacket(channel, data, len);
+ return 0;
+}
+
+int my_transportation::SendRTCPPacket(int channel, const void *data, int len)
+{
+ netw->ReceivedRTCPPacket(channel, data, len);
+ return 0;
+}
+
+my_transportation my_transport;
+#endif
+
+class MyObserver : public VoiceEngineObserver {
+ public:
+ virtual void CallbackOnError(const int channel, const int err_code);
+};
+
+void MyObserver::CallbackOnError(const int channel, const int err_code) {
+ // Add printf for other error codes here
+ if (err_code == VE_TYPING_NOISE_WARNING) {
+ printf(" TYPING NOISE DETECTED \n");
+ } else if (err_code == VE_RECEIVE_PACKET_TIMEOUT) {
+ printf(" RECEIVE PACKET TIMEOUT \n");
+ } else if (err_code == VE_PACKET_RECEIPT_RESTARTED) {
+ printf(" PACKET RECEIPT RESTARTED \n");
+ } else if (err_code == VE_RUNTIME_PLAY_WARNING) {
+ printf(" RUNTIME PLAY WARNING \n");
+ } else if (err_code == VE_RUNTIME_REC_WARNING) {
+ printf(" RUNTIME RECORD WARNING \n");
+ } else if (err_code == VE_SATURATION_WARNING) {
+ printf(" SATURATION WARNING \n");
+ } else if (err_code == VE_RUNTIME_PLAY_ERROR) {
+ printf(" RUNTIME PLAY ERROR \n");
+ } else if (err_code == VE_RUNTIME_REC_ERROR) {
+ printf(" RUNTIME RECORD ERROR \n");
+ } else if (err_code == VE_REC_DEVICE_REMOVED) {
+ printf(" RECORD DEVICE REMOVED \n");
+ }
+}
+
+int main() {
+ int res = 0;
+ int cnt = 0;
+
+ printf("Test started \n");
+
+ m_voe = VoiceEngine::Create();
+ base1 = VoEBase::GetInterface(m_voe);
+ codec = VoECodec::GetInterface(m_voe);
+ apm = VoEAudioProcessing::GetInterface(m_voe);
+ volume = VoEVolumeControl::GetInterface(m_voe);
+ dtmf = VoEDtmf::GetInterface(m_voe);
+ rtp_rtcp = VoERTP_RTCP::GetInterface(m_voe);
+ netw = VoENetwork::GetInterface(m_voe);
+ file = VoEFile::GetInterface(m_voe);
+ vsync = VoEVideoSync::GetInterface(m_voe);
+ encr = VoEEncryption::GetInterface(m_voe);
+ hardware = VoEHardware::GetInterface(m_voe);
+ xmedia = VoEExternalMedia::GetInterface(m_voe);
+ neteqst = VoENetEqStats::GetInterface(m_voe);
+
+ MyObserver my_observer;
+
+ const std::string out_path = webrtc::test::OutputPath();
+ const std::string trace_filename = out_path + "webrtc_trace.txt";
+
+ printf("Set trace filenames (enable trace)\n");
+ VoiceEngine::SetTraceFilter(kTraceAll);
+ res = VoiceEngine::SetTraceFile(trace_filename.c_str());
+ VALIDATE;
+
+ res = VoiceEngine::SetTraceCallback(NULL);
+ VALIDATE;
+
+ printf("Init\n");
+ res = base1->Init();
+ if (res != 0) {
+ printf("\nError calling Init: %d\n", base1->LastError());
+ fflush(NULL);
+ exit(1);
+ }
+
+ res = base1->RegisterVoiceEngineObserver(my_observer);
+ VALIDATE;
+
+ cnt++;
+ printf("Version\n");
+ char tmp[1024];
+ res = base1->GetVersion(tmp);
+ VALIDATE;
+ cnt++;
+ printf("%s\n", tmp);
+
+ RunTest(out_path);
+
+ printf("Terminate \n");
+
+ base1->DeRegisterVoiceEngineObserver();
+
+ res = base1->Terminate();
+ VALIDATE;
+
+ if (base1)
+ base1->Release();
+
+ if (codec)
+ codec->Release();
+
+ if (volume)
+ volume->Release();
+
+ if (dtmf)
+ dtmf->Release();
+
+ if (rtp_rtcp)
+ rtp_rtcp->Release();
+
+ if (apm)
+ apm->Release();
+
+ if (netw)
+ netw->Release();
+
+ if (file)
+ file->Release();
+
+ if (vsync)
+ vsync->Release();
+
+ if (encr)
+ encr->Release();
+
+ if (hardware)
+ hardware->Release();
+
+ if (xmedia)
+ xmedia->Release();
+
+ if (neteqst)
+ neteqst->Release();
+
+ VoiceEngine::Delete(m_voe);
+
+ return 0;
+}
+
+void RunTest(std::string out_path) {
+ int chan, cnt, res;
+ CodecInst cinst;
+ cnt = 0;
+ int i;
+ int codecinput;
+ bool AEC = false;
+ bool AGC = true;
+ bool AGC1 = false;
+ bool VAD = false;
+ bool NS = false;
+ bool NS1 = false;
+ bool typing_detection = false;
+ bool muted = false;
+ bool on_hold = false;
+
+#if defined(WEBRTC_ANDROID)
+ std::string resource_path = "/sdcard/";
+#else
+ std::string resource_path = webrtc::test::ProjectRootPath();
+ if (resource_path == webrtc::test::kCannotFindProjectRootDir) {
+ printf("*** Unable to get project root directory. "
+ "File playing may fail. ***\n");
+ // Fall back to the current directory.
+ resource_path = "./";
+ } else {
+ resource_path += "data/voice_engine/";
+ }
+#endif
+ const std::string audio_filename = resource_path + "audio_long16.pcm";
+
+ const std::string play_filename = out_path + "recorded_playout.pcm";
+ const std::string mic_filename = out_path + "recorded_mic.pcm";
+
+ chan = base1->CreateChannel();
+ if (chan < 0) {
+ printf("Error at position %i\n", cnt);
+ printf("************ Error code = %i\n", base1->LastError());
+ fflush(NULL);
+ }
+ cnt++;
+
+ int j = 0;
+#ifdef EXTERNAL_TRANSPORT
+ my_transportation ch0transport;
+ printf("Enabling external transport \n");
+ netw->RegisterExternalTransport(0, ch0transport);
+#else
+ char ip[64];
+#ifdef DEBUG
+ strcpy(ip, "127.0.0.1");
+#else
+ char localip[64];
+ netw->GetLocalIP(localip);
+ printf("local IP:%s\n", localip);
+
+ printf("1. 127.0.0.1 \n");
+ printf("2. Specify IP \n");
+ ASSERT_EQ(1, scanf("%i", &i));
+
+ if (1 == i)
+ strcpy(ip, "127.0.0.1");
+ else {
+ printf("Specify remote IP: ");
+ ASSERT_EQ(1, scanf("%s", ip));
+ }
+#endif
+
+ int colons(0);
+ while (ip[j] != '\0' && j < 64 && !(colons = (ip[j++] == ':')))
+ ;
+ if (colons) {
+ printf("Enabling IPv6\n");
+ res = netw->EnableIPv6(0);
+ VALIDATE;
+ }
+
+ int rPort;
+#ifdef DEBUG
+ rPort=8500;
+#else
+ printf("Specify remote port (1=1234): ");
+ ASSERT_EQ(1, scanf("%i", &rPort));
+ if (1 == rPort)
+ rPort = 1234;
+ printf("Set Send port \n");
+#endif
+
+ printf("Set Send IP \n");
+ res = base1->SetSendDestination(chan, rPort, ip);
+ VALIDATE;
+
+ int lPort;
+#ifdef DEBUG
+ lPort=8500;
+#else
+ printf("Specify local port (1=1234): ");
+ ASSERT_EQ(1, scanf("%i", &lPort));
+ if (1 == lPort)
+ lPort = 1234;
+ printf("Set Rec Port \n");
+#endif
+ res = base1->SetLocalReceiver(chan, lPort);
+ VALIDATE;
+#endif
+
+ printf("\n");
+ for (i = 0; i < codec->NumOfCodecs(); i++) {
+ res = codec->GetCodec(i, cinst);
+ VALIDATE;
+ if (strncmp(cinst.plname, "ISAC", 4) == 0 && cinst.plfreq == 32000) {
+ printf("%i. ISAC-swb pltype:%i plfreq:%i channels:%i\n", i, cinst.pltype,
+ cinst.plfreq, cinst.channels);
+ }
+ else {
+ printf("%i. %s pltype:%i plfreq:%i channels:%i\n", i, cinst.plname,
+ cinst.pltype, cinst.plfreq, cinst.channels);
+ }
+ }
+#ifdef DEBUG
+ codecinput=0;
+#else
+ printf("Select send codec: ");
+ ASSERT_EQ(1, scanf("%i", &codecinput));
+#endif
+ codec->GetCodec(codecinput, cinst);
+
+ printf("Set primary codec\n");
+ res = codec->SetSendCodec(chan, cinst);
+ VALIDATE;
+
+#ifndef WEBRTC_ANDROID
+ const int kMaxNumChannels = 8;
+#else
+ const int kMaxNumChannels = 1;
+#endif
+ int channel_index = 0;
+ std::vector<int> channels(kMaxNumChannels);
+ for (i = 0; i < kMaxNumChannels; ++i) {
+ channels[i] = base1->CreateChannel();
+ int port = rPort + (i + 1) * 2;
+ res = base1->SetSendDestination(channels[i], port, ip);
+ VALIDATE;
+ res = base1->SetLocalReceiver(channels[i], port);
+ VALIDATE;
+ res = codec->SetSendCodec(channels[i], cinst);
+ VALIDATE;
+ }
+
+ // Call loop
+ bool newcall = true;
+ while (newcall) {
+
+#if defined(WEBRTC_LINUX) && !defined(WEBRTC_ANDROID)
+ int rd(-1), pd(-1);
+ res = hardware->GetNumOfRecordingDevices(rd);
+ VALIDATE;
+ res = hardware->GetNumOfPlayoutDevices(pd);
+ VALIDATE;
+
+ char dn[128] = { 0 };
+ char guid[128] = { 0 };
+ printf("\nPlayout devices (%d): \n", pd);
+ for (j=0; j<pd; ++j) {
+ res = hardware->GetPlayoutDeviceName(j, dn, guid);
+ VALIDATE;
+ printf(" %d: %s \n", j, dn);
+ }
+
+ printf("Recording devices (%d): \n", rd);
+ for (j=0; j<rd; ++j) {
+ res = hardware->GetRecordingDeviceName(j, dn, guid);
+ VALIDATE;
+ printf(" %d: %s \n", j, dn);
+ }
+
+ printf("Select playout device: ");
+ ASSERT_EQ(1, scanf("%d", &pd));
+ res = hardware->SetPlayoutDevice(pd);
+ VALIDATE;
+ printf("Select recording device: ");
+ ASSERT_EQ(1, scanf("%d", &rd));
+ printf("Setting sound devices \n");
+ res = hardware->SetRecordingDevice(rd);
+ VALIDATE;
+
+#endif // WEBRTC_LINUX
+ res = codec->SetVADStatus(0, VAD);
+ VALIDATE;
+
+ res = apm->SetAgcStatus(AGC);
+ VALIDATE;
+
+ res = apm->SetEcStatus(AEC);
+ VALIDATE;
+
+ res = apm->SetNsStatus(NS);
+ VALIDATE;
+
+#ifdef DEBUG
+ i = 1;
+#else
+ printf("\n1. Send, listen and playout \n");
+ printf("2. Send only \n");
+ printf("3. Listen and playout only \n");
+ printf("Select transfer mode: ");
+ ASSERT_EQ(1, scanf("%i", &i));
+#endif
+ const bool send = !(3 == i);
+ const bool receive = !(2 == i);
+
+ if (receive) {
+#ifndef EXTERNAL_TRANSPORT
+ printf("Start Listen \n");
+ res = base1->StartReceive(chan);
+ VALIDATE;
+#endif
+
+ printf("Start Playout \n");
+ res = base1->StartPlayout(chan);
+ VALIDATE;
+ }
+
+ if (send) {
+ printf("Start Send \n");
+ res = base1->StartSend(chan);
+ VALIDATE;
+ }
+
+#ifndef WEBRTC_ANDROID
+ printf("Getting mic volume \n");
+ unsigned int vol = 999;
+ res = volume->GetMicVolume(vol);
+ VALIDATE;
+ if ((vol > 255) || (vol < 1)) {
+ printf("\n****ERROR in GetMicVolume");
+ }
+#endif
+
+ int forever = 1;
+ while (forever) {
+ printf("\nActions\n");
+
+ printf("Codec Changes\n");
+ for (i = 0; i < codec->NumOfCodecs(); i++) {
+ res = codec->GetCodec(i, cinst);
+ VALIDATE;
+ if (strncmp(cinst.plname, "ISAC", 4) == 0 && cinst.plfreq
+ == 32000) {
+ printf("\t%i. ISAC-swb pltype:%i plfreq:%i channels:%i\n", i,
+ cinst.pltype, cinst.plfreq, cinst.channels);
+ }
+ else {
+ printf("\t%i. %s pltype:%i plfreq:%i channels:%i\n", i, cinst.plname,
+ cinst.pltype, cinst.plfreq, cinst.channels);
+ }
+ }
+ printf("Other\n");
+ const int noCodecs = i - 1;
+ printf("\t%i. Toggle VAD\n", i);
+ i++;
+ printf("\t%i. Toggle AGC\n", i);
+ i++;
+ printf("\t%i. Toggle NS\n", i);
+ i++;
+ printf("\t%i. Toggle EC\n", i);
+ i++;
+ printf("\t%i. Select AEC\n", i);
+ i++;
+ printf("\t%i. Select AECM\n", i);
+ i++;
+ printf("\t%i. Get speaker volume\n", i);
+ i++;
+ printf("\t%i. Set speaker volume\n", i);
+ i++;
+ printf("\t%i. Get microphone volume\n", i);
+ i++;
+ printf("\t%i. Set microphone volume\n", i);
+ i++;
+ printf("\t%i. Play local file (audio_long16.pcm) \n", i);
+ i++;
+ printf("\t%i. Change playout device \n", i);
+ i++;
+ printf("\t%i. Change recording device \n", i);
+ i++;
+ printf("\t%i. Toggle receive-side AGC \n", i);
+ i++;
+ printf("\t%i. Toggle receive-side NS \n", i);
+ i++;
+ printf("\t%i. AGC status \n", i);
+ i++;
+ printf("\t%i. Toggle microphone mute \n", i);
+ i++;
+ printf("\t%i. Toggle on hold status \n", i);
+ i++;
+ printf("\t%i. Get last error code \n", i);
+ i++;
+ printf("\t%i. Toggle typing detection (for Mac/Windows only) \n", i);
+ i++;
+ printf("\t%i. Record a PCM file \n", i);
+ i++;
+ printf("\t%i. Play a previously recorded PCM file locally \n", i);
+ i++;
+ printf("\t%i. Play a previously recorded PCM file as microphone \n", i);
+ i++;
+ printf("\t%i. Add an additional file-playing channel \n", i);
+ i++;
+ printf("\t%i. Remove a file-playing channel \n", i);
+ i++;
+
+ printf("Select action or %i to stop the call: ", i);
+ ASSERT_EQ(1, scanf("%i", &codecinput));
+
+ if (codecinput < codec->NumOfCodecs()) {
+ res = codec->GetCodec(codecinput, cinst);
+ VALIDATE;
+
+ printf("Set primary codec\n");
+ res = codec->SetSendCodec(chan, cinst);
+ VALIDATE;
+ }
+ else if (codecinput == (noCodecs + 1)) {
+ VAD = !VAD;
+ res = codec->SetVADStatus(0, VAD);
+ VALIDATE;
+ if (VAD)
+ printf("\n VAD is now on! \n");
+ else
+ printf("\n VAD is now off! \n");
+ }
+ else if (codecinput == (noCodecs + 2)) {
+ AGC = !AGC;
+ res = apm->SetAgcStatus(AGC);
+ VALIDATE;
+ if (AGC)
+ printf("\n AGC is now on! \n");
+ else
+ printf("\n AGC is now off! \n");
+ }
+ else if (codecinput == (noCodecs + 3)) {
+ NS = !NS;
+ res = apm->SetNsStatus(NS);
+ VALIDATE;
+ if (NS)
+ printf("\n NS is now on! \n");
+ else
+ printf("\n NS is now off! \n");
+ }
+ else if (codecinput == (noCodecs + 4)) {
+ AEC = !AEC;
+ res = apm->SetEcStatus(AEC, kEcUnchanged);
+ VALIDATE;
+ if (AEC)
+ printf("\n Echo control is now on! \n");
+ else
+ printf("\n Echo control is now off! \n");
+ }
+ else if (codecinput == (noCodecs + 5)) {
+ res = apm->SetEcStatus(AEC, kEcAec);
+ VALIDATE;
+ printf("\n AEC selected! \n");
+ if (AEC)
+ printf(" (Echo control is on)\n");
+ else
+ printf(" (Echo control is off)\n");
+ }
+ else if (codecinput == (noCodecs + 6)) {
+ res = apm->SetEcStatus(AEC, kEcAecm);
+ VALIDATE;
+ printf("\n AECM selected! \n");
+ if (AEC)
+ printf(" (Echo control is on)\n");
+ else
+ printf(" (Echo control is off)\n");
+ }
+ else if (codecinput == (noCodecs + 7)) {
+ unsigned vol(0);
+ res = volume->GetSpeakerVolume(vol);
+ VALIDATE;
+ printf("\n Speaker Volume is %d \n", vol);
+ }
+ else if (codecinput == (noCodecs + 8)) {
+ printf("Level: ");
+ ASSERT_EQ(1, scanf("%i", &i));
+ res = volume->SetSpeakerVolume(i);
+ VALIDATE;
+ }
+ else if (codecinput == (noCodecs + 9)) {
+ unsigned vol(0);
+ res = volume->GetMicVolume(vol);
+ VALIDATE;
+ printf("\n Microphone Volume is %d \n", vol);
+ }
+ else if (codecinput == (noCodecs + 10)) {
+ printf("Level: ");
+ ASSERT_EQ(1, scanf("%i", &i));
+ res = volume->SetMicVolume(i);
+ VALIDATE;
+ }
+ else if (codecinput == (noCodecs + 11)) {
+ res = file->StartPlayingFileLocally(0, audio_filename.c_str());
+ VALIDATE;
+ }
+ else if (codecinput == (noCodecs + 12)) {
+ // change the playout device with current call
+ int num_pd(-1);
+ res = hardware->GetNumOfPlayoutDevices(num_pd);
+ VALIDATE;
+
+ char dn[128] = { 0 };
+ char guid[128] = { 0 };
+
+ printf("\nPlayout devices (%d): \n", num_pd);
+ for (j = 0; j < num_pd; ++j) {
+ res = hardware->GetPlayoutDeviceName(j, dn, guid);
+ VALIDATE;
+ printf(" %d: %s \n", j, dn);
+ }
+ printf("Select playout device: ");
+ ASSERT_EQ(1, scanf("%d", &num_pd));
+ // Will use plughw for hardware devices
+ res = hardware->SetPlayoutDevice(num_pd);
+ VALIDATE;
+ }
+ else if (codecinput == (noCodecs + 13)) {
+ // change the recording device with current call
+ int num_rd(-1);
+
+ res = hardware->GetNumOfRecordingDevices(num_rd);
+ VALIDATE;
+
+ char dn[128] = { 0 };
+ char guid[128] = { 0 };
+
+ printf("Recording devices (%d): \n", num_rd);
+ for (j = 0; j < num_rd; ++j) {
+ res = hardware->GetRecordingDeviceName(j, dn, guid);
+ VALIDATE;
+ printf(" %d: %s \n", j, dn);
+ }
+
+ printf("Select recording device: ");
+ ASSERT_EQ(1, scanf("%d", &num_rd));
+ printf("Setting sound devices \n");
+ // Will use plughw for hardware devices
+ res = hardware->SetRecordingDevice(num_rd);
+ VALIDATE;
+ }
+ else if (codecinput == (noCodecs + 14)) {
+ // Remote AGC
+ AGC1 = !AGC1;
+ res = apm->SetRxAgcStatus(chan, AGC1);
+ VALIDATE;
+ if (AGC1)
+ printf("\n Receive-side AGC is now on! \n");
+ else
+ printf("\n Receive-side AGC is now off! \n");
+ }
+ else if (codecinput == (noCodecs + 15)) {
+ // Remote NS
+ NS1 = !NS1;
+ res = apm->SetRxNsStatus(chan, NS);
+ VALIDATE;
+ if (NS1)
+ printf("\n Receive-side NS is now on! \n");
+ else
+ printf("\n Receive-side NS is now off! \n");
+ }
+ else if (codecinput == (noCodecs + 16)) {
+ AgcModes agcmode;
+ bool enable;
+ res = apm->GetAgcStatus(enable, agcmode);
+ VALIDATE
+ printf("\n AGC enable is %d, mode is %d \n", enable, agcmode);
+ }
+ else if (codecinput == (noCodecs + 17)) {
+ // Toggle Mute on Microphone
+ res = volume->GetInputMute(chan, muted);
+ VALIDATE;
+ muted = !muted;
+ res = volume->SetInputMute(chan, muted);
+ VALIDATE;
+ if (muted)
+ printf("\n Microphone is now on mute! \n");
+ else
+ printf("\n Microphone is no longer on mute! \n");
+
+ }
+ else if (codecinput == (noCodecs + 18)) {
+ // Toggle the call on hold
+ OnHoldModes mode;
+ res = base1->GetOnHoldStatus(chan, on_hold, mode);
+ VALIDATE;
+ on_hold = !on_hold;
+ mode = kHoldSendAndPlay;
+ res = base1->SetOnHoldStatus(chan, on_hold, mode);
+ VALIDATE;
+ if (on_hold)
+ printf("\n Call now on hold! \n");
+ else
+ printf("\n Call now not on hold! \n");
+ }
+
+ else if (codecinput == (noCodecs + 19)) {
+ // Get the last error code and print to screen
+ int err_code = 0;
+ err_code = base1->LastError();
+ if (err_code != -1)
+ printf("\n The last error code was %i.\n", err_code);
+ }
+ else if (codecinput == (noCodecs + 20)) {
+ typing_detection= !typing_detection;
+ res = apm->SetTypingDetectionStatus(typing_detection);
+ VALIDATE;
+ if (typing_detection)
+ printf("\n Typing detection is now on!\n");
+ else
+ printf("\n Typing detection is now off!\n");
+ }
+ else if (codecinput == (noCodecs + 21)) {
+ int stop_record = 1;
+ int file_source = 1;
+ printf("\n Select source of recorded file. ");
+ printf("\n 1. Record from microphone to file ");
+ printf("\n 2. Record from playout to file ");
+ printf("\n Enter your selection: \n");
+ ASSERT_EQ(1, scanf("%i", &file_source));
+ if (file_source == 1) {
+ printf("\n Start recording microphone as %s \n",
+ mic_filename.c_str());
+ res = file->StartRecordingMicrophone(mic_filename.c_str());
+ VALIDATE;
+ }
+ else {
+ printf("\n Start recording playout as %s \n", play_filename.c_str());
+ res = file->StartRecordingPlayout(chan, play_filename.c_str());
+ VALIDATE;
+ }
+ while (stop_record != 0) {
+ printf("\n Type 0 to stop recording file \n");
+ ASSERT_EQ(1, scanf("%i", &stop_record));
+ }
+ if (file_source == 1) {
+ res = file->StopRecordingMicrophone();
+ VALIDATE;
+ }
+ else {
+ res = file->StopRecordingPlayout(chan);
+ VALIDATE;
+ }
+ printf("\n File finished recording \n");
+ }
+ else if (codecinput == (noCodecs + 22)) {
+ int file_type = 1;
+ int stop_play = 1;
+ printf("\n Select a file to play locally in a loop.");
+ printf("\n 1. Play %s", mic_filename.c_str());
+ printf("\n 2. Play %s", play_filename.c_str());
+ printf("\n Enter your selection\n");
+ ASSERT_EQ(1, scanf("%i", &file_type));
+ if (file_type == 1) {
+ printf("\n Start playing %s locally in a loop\n",
+ mic_filename.c_str());
+ res = file->StartPlayingFileLocally(chan, mic_filename.c_str(), true);
+ VALIDATE;
+ }
+ else {
+ printf("\n Start playing %s locally in a loop\n",
+ play_filename.c_str());
+ res = file->StartPlayingFileLocally(chan, play_filename.c_str(),
+ true);
+ VALIDATE;
+ }
+ while (stop_play != 0) {
+ printf("\n Type 0 to stop playing file\n");
+ ASSERT_EQ(1, scanf("%i", &stop_play));
+ }
+ res = file->StopPlayingFileLocally(chan);
+ VALIDATE;
+ }
+ else if (codecinput == (noCodecs + 23)) {
+ int file_type = 1;
+ int stop_play = 1;
+ printf("\n Select a file to play as microphone in a loop.");
+ printf("\n 1. Play %s", mic_filename.c_str());
+ printf("\n 2. Play %s", play_filename.c_str());
+ printf("\n Enter your selection\n");
+ ASSERT_EQ(1, scanf("%i", &file_type));
+ if (file_type == 1) {
+ printf("\n Start playing %s as mic in a loop\n",
+ mic_filename.c_str());
+ res = file->StartPlayingFileAsMicrophone(chan, mic_filename.c_str(),
+ true);
+ VALIDATE;
+ }
+ else {
+ printf("\n Start playing %s as mic in a loop\n",
+ play_filename.c_str());
+ res = file->StartPlayingFileAsMicrophone(chan, play_filename.c_str(),
+ true);
+ VALIDATE;
+ }
+ while (stop_play != 0) {
+ printf("\n Type 0 to stop playing file\n");
+ ASSERT_EQ(1, scanf("%i", &stop_play));
+ }
+ res = file->StopPlayingFileAsMicrophone(chan);
+ VALIDATE;
+ }
+ else if (codecinput == (noCodecs + 24)) {
+ if (channel_index < kMaxNumChannels) {
+ res = base1->StartReceive(channels[channel_index]);
+ VALIDATE;
+ res = base1->StartPlayout(channels[channel_index]);
+ VALIDATE;
+ res = base1->StartSend(channels[channel_index]);
+ VALIDATE;
+ res = file->StartPlayingFileAsMicrophone(channels[channel_index],
+ audio_filename.c_str(),
+ true,
+ false);
+ VALIDATE;
+ channel_index++;
+ printf("Using %d additional channels\n", channel_index);
+ } else {
+ printf("Max number of channels reached\n");
+ }
+ }
+ else if (codecinput == (noCodecs + 25)) {
+ if (channel_index > 0) {
+ channel_index--;
+ res = file->StopPlayingFileAsMicrophone(channels[channel_index]);
+ VALIDATE;
+ res = base1->StopSend(channels[channel_index]);
+ VALIDATE;
+ res = base1->StopPlayout(channels[channel_index]);
+ VALIDATE;
+ res = base1->StopReceive(channels[channel_index]);
+ VALIDATE;
+ printf("Using %d additional channels\n", channel_index);
+ } else {
+ printf("All additional channels stopped\n");
+ }
+ }
+ else
+ break;
+ }
+
+ if (send) {
+ printf("Stop Send \n");
+ res = base1->StopSend(chan);
+ VALIDATE;
+ }
+
+ if (receive) {
+ printf("Stop Playout \n");
+ res = base1->StopPlayout(chan);
+ VALIDATE;
+
+#ifndef EXTERNAL_TRANSPORT
+ printf("Stop Listen \n");
+ res = base1->StopReceive(chan);
+ VALIDATE;
+#endif
+ }
+
+ while (channel_index > 0) {
+ --channel_index;
+ res = file->StopPlayingFileAsMicrophone(channels[channel_index]);
+ VALIDATE;
+ res = base1->StopSend(channels[channel_index]);
+ VALIDATE;
+ res = base1->StopPlayout(channels[channel_index]);
+ VALIDATE;
+ res = base1->StopReceive(channels[channel_index]);
+ VALIDATE;
+ }
+
+ printf("\n1. New call \n");
+ printf("2. Quit \n");
+ printf("Select action: ");
+ ASSERT_EQ(1, scanf("%i", &i));
+ newcall = (1 == i);
+ // Call loop
+ }
+
+ printf("Delete channels \n");
+ res = base1->DeleteChannel(chan);
+ VALIDATE;
+
+ for (i = 0; i < kMaxNumChannels; ++i) {
+ channels[i] = base1->DeleteChannel(channels[i]);
+ VALIDATE;
+ }
+}
diff --git a/voice_engine/test/voice_engine_tests.gypi b/voice_engine/test/voice_engine_tests.gypi
new file mode 100644
index 0000000..efa272d
--- /dev/null
+++ b/voice_engine/test/voice_engine_tests.gypi
@@ -0,0 +1,164 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'targets': [
+ # Auto test - command line test for all platforms
+ {
+ 'target_name': 'voe_auto_test',
+ 'type': 'executable',
+ 'dependencies': [
+ 'voice_engine_core',
+ '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+ '<(webrtc_root)/test/test.gyp:test_support',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(DEPTH)/testing/gmock.gyp:gmock',
+ '<(DEPTH)/third_party/google-gflags/google-gflags.gyp:google-gflags',
+ '<(webrtc_root)/test/libtest/libtest.gyp:libtest',
+ ],
+ 'include_dirs': [
+ 'auto_test',
+ 'auto_test/fixtures',
+ '<(webrtc_root)/modules/interface',
+ # TODO(phoglund): We only depend on voice_engine_defines.h here -
+ # move that file to interface and then remove this dependency.
+ '<(webrtc_root)/voice_engine',
+ '<(webrtc_root)/modules/audio_device/main/interface',
+ ],
+ 'sources': [
+ 'auto_test/automated_mode.cc',
+ 'auto_test/extended/agc_config_test.cc',
+ 'auto_test/extended/ec_metrics_test.cc',
+ 'auto_test/fakes/fake_external_transport.cc',
+ 'auto_test/fakes/fake_external_transport.h',
+ 'auto_test/fixtures/after_initialization_fixture.cc',
+ 'auto_test/fixtures/after_initialization_fixture.h',
+ 'auto_test/fixtures/after_streaming_fixture.cc',
+ 'auto_test/fixtures/after_streaming_fixture.h',
+ 'auto_test/fixtures/before_initialization_fixture.cc',
+ 'auto_test/fixtures/before_initialization_fixture.h',
+ 'auto_test/fuzz/rtp_fuzz_test.cc',
+ 'auto_test/standard/audio_processing_test.cc',
+ 'auto_test/standard/call_report_test.cc',
+ 'auto_test/standard/codec_before_streaming_test.cc',
+ 'auto_test/standard/codec_test.cc',
+ 'auto_test/standard/dtmf_test.cc',
+ 'auto_test/standard/encryption_test.cc',
+ 'auto_test/standard/external_media_test.cc',
+ 'auto_test/standard/file_before_streaming_test.cc',
+ 'auto_test/standard/file_test.cc',
+ 'auto_test/standard/hardware_before_initializing_test.cc',
+ 'auto_test/standard/hardware_before_streaming_test.cc',
+ 'auto_test/standard/hardware_test.cc',
+ 'auto_test/standard/manual_hold_test.cc',
+ 'auto_test/standard/mixing_test.cc',
+ 'auto_test/standard/neteq_stats_test.cc',
+ 'auto_test/standard/neteq_test.cc',
+ 'auto_test/standard/network_before_streaming_test.cc',
+ 'auto_test/standard/network_test.cc',
+ 'auto_test/standard/rtp_rtcp_before_streaming_test.cc',
+ 'auto_test/standard/rtp_rtcp_test.cc',
+ 'auto_test/standard/voe_base_misc_test.cc',
+ 'auto_test/standard/video_sync_test.cc',
+ 'auto_test/standard/volume_test.cc',
+ 'auto_test/resource_manager.cc',
+ 'auto_test/voe_cpu_test.cc',
+ 'auto_test/voe_cpu_test.h',
+ 'auto_test/voe_extended_test.cc',
+ 'auto_test/voe_extended_test.h',
+ 'auto_test/voe_standard_test.cc',
+ 'auto_test/voe_standard_test.h',
+ 'auto_test/voe_stress_test.cc',
+ 'auto_test/voe_stress_test.h',
+ 'auto_test/voe_test_defines.h',
+ 'auto_test/voe_test_interface.h',
+ 'auto_test/voe_unit_test.cc',
+ 'auto_test/voe_unit_test.h',
+ ],
+ 'conditions': [
+ ['OS=="android"', {
+ # some tests are not supported on android yet, exclude these tests.
+ 'sources!': [
+ 'auto_test/standard/hardware_before_streaming_test.cc',
+ ],
+ }],
+ ],
+ },
+ {
+ # command line test that should work on linux/mac/win
+ 'target_name': 'voe_cmd_test',
+ 'type': 'executable',
+ 'dependencies': [
+ '<(webrtc_root)/test/test.gyp:test_support',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ 'voice_engine_core',
+ '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+ ],
+ 'sources': [
+ 'cmd_test/voe_cmd_test.cc',
+ ],
+ },
+ ],
+ 'conditions': [
+ # TODO(kjellander): Support UseoFMFC on VS2010.
+ # http://code.google.com/p/webrtc/issues/detail?id=709
+ ['OS=="win" and MSVS_VERSION < "2010"', {
+ 'targets': [
+ # WinTest - GUI test for Windows
+ {
+ 'target_name': 'voe_ui_win_test',
+ 'type': 'executable',
+ 'dependencies': [
+ 'voice_engine_core',
+ '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+ '<(webrtc_root)/test/test.gyp:test_support',
+ ],
+ 'include_dirs': [
+ 'win_test',
+ ],
+ 'sources': [
+ 'win_test/Resource.h',
+ 'win_test/WinTest.cc',
+ 'win_test/WinTest.h',
+ 'win_test/WinTest.rc',
+ 'win_test/WinTestDlg.cc',
+ 'win_test/WinTestDlg.h',
+ 'win_test/res/WinTest.ico',
+ 'win_test/res/WinTest.rc2',
+ 'win_test/stdafx.cc',
+ 'win_test/stdafx.h',
+ ],
+ 'configurations': {
+ 'Common_Base': {
+ 'msvs_configuration_attributes': {
+ 'conditions': [
+ ['component=="shared_library"', {
+ 'UseOfMFC': '2', # Shared DLL
+ },{
+ 'UseOfMFC': '1', # Static
+ }],
+ ],
+ },
+ },
+ },
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '2', # Windows
+ },
+ },
+ },
+ ],
+ }],
+ ],
+}
+
+# Local Variables:
+# tab-width:2
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=2 shiftwidth=2:
diff --git a/voice_engine/test/win_test/Resource.h b/voice_engine/test/win_test/Resource.h
new file mode 100644
index 0000000..5ae9c5f
--- /dev/null
+++ b/voice_engine/test/win_test/Resource.h
@@ -0,0 +1,241 @@
+//{{NO_DEPENDENCIES}}
+// Microsoft Visual C++ generated include file.
+// Used by WinTest.rc
+//
+#define IDM_ABOUTBOX 0x0010
+#define IDD_ABOUTBOX 100
+#define IDS_ABOUTBOX 101
+#define IDD_WINTEST_DIALOG 102
+#define IDR_MAINFRAME 128
+#define IDD_DTMF_DIALOG 129
+#define IDC_BUTTON_CREATE_1 1000
+#define IDC_BUTTON_DELETE_1 1001
+#define IDC_EDIT_1 1002
+#define IDC_BUTTON_CREATE_2 1003
+#define IDC_BUTTON_DELETE_2 1004
+#define IDC_EDIT_2 1005
+#define IDC_EDIT_MESSAGE 1006
+#define IDC_BUTTON_START_LISTEN_1 1007
+#define IDC_COMBO_IP_1 1008
+#define IDC_EDIT_TX_PORT_1 1009
+#define IDC_EDIT_RX_PORT_1 1010
+#define IDC_COMBO_CODEC_1 1011
+#define IDC_BUTTON_STOP_LISTEN_1 1012
+#define IDC_STATIC_LISTEN 1013
+#define IDC_BUTTON_START_PLAYOUT_1 1014
+#define IDC_BUTTON_STOP_PLAYOUT_1 1015
+#define IDC_STATIC_PLAY 1016
+#define IDC_BUTTON_START_SEND_1 1017
+#define IDC_BUTTON_STOP_SEND_1 1018
+#define IDC_STATIC_SEND 1019
+#define IDC_COMBO_IP_2 1020
+#define IDC_STATIC_IP 1021
+#define IDC_STATIC_PORTS 1022
+#define IDC_STATIC_CODEC 1023
+#define IDC_STATIC_CHANNEL 1024
+#define IDC_STATIC_ID 1025
+#define IDC_EDIT_TX_PORT_2 1026
+#define IDC_EDIT_RX_PORT_2 1027
+#define IDC_COMBO_CODEC_2 1028
+#define IDC_BUTTON_START_LISTEN_2 1029
+#define IDC_BUTTON_STOP_LISTEN_2 1030
+#define IDC_BUTTON_START_PLAYOUT_2 1031
+#define IDC_BUTTON_STOP_PLAYOUT_2 1032
+#define IDC_BUTTON_START_SEND_2 1033
+#define IDC_BUTTON_STOP_SEND_2 1034
+#define IDC_BUTTON_START_SEND_3 1035
+#define IDC_BUTTON_TEST_1_1 1035
+#define IDC_BUTTON_TEST_1 1035
+#define IDC_EDIT_RESULT 1036
+#define IDC_EDIT_N_FAILS 1037
+#define IDC_STATIC_ERROR 1038
+#define IDC_EDIT_LAST_ERROR 1039
+#define IDC_STATIC_LAST_ERROR 1040
+#define IDC_STATIC_PLAY_FILE 1041
+#define IDC_STATIC_EXTERNAL 1042
+#define IDC_CHECK_EXT_TRANS_1 1043
+#define IDC_CHECK2 1044
+#define IDC_CHECK_PLAY_FILE_IN_1 1044
+#define IDC_CHECK_PLAY_FILE_OUT_1 1045
+#define IDC_CHECK_PLAY_FILE_IN_2 1046
+#define IDC_CHECK_PLAY_FILE_OUT_2 1047
+#define IDC_CHECK_EXT_TRANS_2 1048
+#define IDC_STATIC_ALL_CHANNELS 1049
+#define IDC_CHECK_PLAY_FILE_IN 1050
+#define IDC_CHECK_PLAY_FILE_OUT 1051
+#define IDC_CHECK_EXT_MEDIA_IN_1 1051
+#define IDC_COMBO_REC_DEVICE 1052
+#define IDC_STATIC_REC_DEVICE 1053
+#define IDC_COMBO_PLAY_DEVICE2 1054
+#define IDC_COMBO_PLAY_DEVICE 1054
+#define IDC_STATIC_PLAY_DEVICE 1055
+#define IDC_CHECK_EXT_MEDIA_PLAY_1 1056
+#define IDC_CHECK_EXT_MEDIA_OUT_1 1056
+#define IDC_STATIC_PLAY_FILE2 1057
+#define IDC_SLIDER_INPUT_VOLUME 1058
+#define IDC_STATIC_MIC_VOLUME 1059
+#define IDC_SLIDER_OUTPUT_VOLUME 1060
+#define IDC_STATIC_SPK_VOLUME2 1061
+#define IDC_STATIC_SPK_VOLUME 1061
+#define IDC_CHECK_PLAY_FILE_IN2 1062
+#define IDC_CHECK_AGC 1062
+#define IDC_STATIC_MIC_VOLUME2 1063
+#define IDC_STATIC_AUDIO_LEVEL_IN 1063
+#define IDC_PROGRESS_AUDIO_LEVEL_IN 1064
+#define IDC_CHECK_AGC2 1065
+#define IDC_CHECK_NS 1065
+#define IDC_BUTTON_1 1065
+#define IDC_CHECK_VAD 1066
+#define IDC_CHECK_EXT_MEDIA_IN_2 1066
+#define IDC_BUTTON_2 1066
+#define IDC_CHECK_VAD2 1067
+#define IDC_CHECK_EC 1067
+#define IDC_BUTTON_3 1067
+#define IDC_CHECK_VAD_1 1068
+#define IDC_BUTTON_4 1068
+#define IDC_CHECK_VAD_2 1069
+#define IDC_CHECK_EXT_MEDIA_OUT_2 1069
+#define IDC_BUTTON_5 1069
+#define IDC_CHECK_VAD_3 1070
+#define IDC_BUTTON_6 1070
+#define IDC_CHECK_MUTE_IN 1071
+#define IDC_BUTTON_7 1071
+#define IDC_CHECK_MUTE_IN_1 1072
+#define IDC_BUTTON_8 1072
+#define IDC_CHECK_MUTE_IN_2 1073
+#define IDC_BUTTON_9 1073
+#define IDC_CHECK_SRTP_TX_1 1074
+#define IDC_BUTTON_10 1074
+#define IDC_CHECK_SRTP_RX_1 1075
+#define IDC_BUTTON_11 1075
+#define IDC_STATIC_PLAY_FILE3 1076
+#define IDC_STATIC_SRTP 1076
+#define IDC_BUTTON_12 1076
+#define IDC_CHECK_SRTP_TX_2 1077
+#define IDC_BUTTON_13 1077
+#define IDC_CHECK_SRTP_RX_2 1078
+#define IDC_BUTTON_14 1078
+#define IDC_CHECK_EXT_ENCRYPTION_1 1079
+#define IDC_BUTTON_15 1079
+#define IDC_STATIC_PLAY_FILE4 1080
+#define IDC_BUTTON_16 1080
+#define IDC_CHECK_EXT_ENCRYPTION_2 1081
+#define IDC_BUTTON_17 1081
+#define IDC_BUTTON_DTMF_1 1082
+#define IDC_BUTTON_18 1082
+#define IDC_EDIT_DTMF_EVENT 1083
+#define IDC_CHECK_REC_ 1083
+#define IDC_CHECK_REC_MIC 1083
+#define IDC_STATIC_DTMF_EVENT 1084
+#define IDC_BUTTON_DTMF_2 1084
+#define IDC_STATIC_GROUP_DTMF 1085
+#define IDC_CHECK_CONFERENCE_1 1085
+#define IDC_BUTTON_19 1086
+#define IDC_CHECK_CONFERENCE_2 1086
+#define IDC_BUTTON_20 1087
+#define IDC_CHECK_ON_HOLD_1 1087
+#define IDC_BUTTON_21 1088
+#define IDC_CHECK_ON_HOLD_2 1088
+#define IDC_BUTTON_22 1089
+#define IDC_CHECK_DTMF_PLAYOUT_RX 1089
+#define IDC_CHECK_EXT_MEDIA_IN 1089
+#define IDC_STATIC_PLAYOUT_RX 1090
+#define IDC_EDIT_GET_OUTPUT 1090
+#define IDC_CHECK_DTMF_PLAY_TONE 1091
+#define IDC_STATIC_LAST_ERROR2 1091
+#define IDC_STATIC_GET 1091
+#define IDC_STATIC_PLAY_TONE 1092
+#define IDC_CHECK_EXT_MEDIA_OUT 1092
+#define IDC_CHECK_START_STOP_MODE 1093
+#define IDC_BUTTON_SET_TX_TELEPHONE_PT 1093
+#define IDC_PROGRESS_AUDIO_LEVEL_IN2 1093
+#define IDC_PROGRESS_AUDIO_LEVEL_OUT 1093
+#define IDC_EDIT_EVENT_LENGTH 1094
+#define IDC_EDIT_RX_PORT_3 1094
+#define IDC_EDIT_DELAY_ESTIMATE_1 1094
+#define IDC_STATIC_EVENT_LENGTH 1095
+#define IDC_EDIT_PLAYOUT_BUFFER_SIZE 1095
+#define IDC_STATIC_START_STOP_MODE 1096
+#define IDC_EDIT_EVENT_RX_PT 1096
+#define IDC_CHECK_DELAY_ESTIMATE_1 1096
+#define IDC_EDIT_EVENT_ATTENUATION 1097
+#define IDC_CHECK_AGC_1 1097
+#define IDC_CHECK_EVENT_INBAND 1098
+#define IDC_CHECK_NS_1 1098
+#define IDC_STATIC_EVENT_ATTENUATION 1099
+#define IDC_STATIC_SRTP2 1099
+#define IDC_STATIC_RX_VQE 1099
+#define IDC_EDIT_EVENT_TX_PT 1100
+#define IDC_CHECK_REC_MIC2 1100
+#define IDC_CHECK_REC_CALL 1100
+#define IDC_CHECK_DTMF_FEEDBACK 1101
+#define IDC_CHECK_REC_CALL2 1101
+#define IDC_CHECK_TYPING_DETECTION 1101
+#define IDC_CHECK_START_STOP_MODE2 1102
+#define IDC_CHECK_DIRECT_FEEDBACK 1102
+#define IDC_CHECK_FEC 1102
+#define IDC_BUTTON_SET_RX_TELEPHONE_PT_TYPE 1103
+#define IDC_BUTTON_SET_RX_TELEPHONE_PT 1103
+#define IDC_BUTTON_CLEAR_ERROR_CALLBACK 1103
+#define IDC_EDIT_EVENT_CODE 1104
+#define IDC_STATIC_DIRECT_FEEDBACK 1105
+#define IDC_RADIO_SINGLE 1106
+#define IDC_RADIO_MULTI 1107
+#define IDC_RADIO_START_STOP 1108
+#define IDC_STATIC_MODE 1109
+#define IDC_STATIC_EVENT_RX_PT 1110
+#define IDC_STATIC_EVENT_TX_PT 1111
+#define IDC_STATIC_PT 1112
+#define IDC_BUTTON_SEND_TELEPHONE_EVENT 1113
+#define IDC_STATIC_EVENT_CODE 1114
+#define IDC_CHECK_EVENT_DETECTION 1115
+#define IDC_CHECK_DETECT_INBAND 1116
+#define IDC_CHECK_DETECT_OUT_OF_BAND 1117
+#define IDC_STATIC_INBAND_DETECTION 1118
+#define IDC_STATIC_OUT_OF_BAND_DETECTION 1119
+#define IDC_STATIC_EVENT_DETECTION 1120
+#define IDC_STATIC_TELEPHONE_EVENTS 1121
+#define IDC_EDIT_EVENT_CODE2 1122
+#define IDC_EDIT_ON_EVENT 1122
+#define IDC_EDIT_ON_EVENT_OUT_OF_BAND 1122
+#define IDC_STATIC_ON_EVENT 1123
+#define IDC_EDIT_ON_EVENT_INBAND 1123
+#define IDC_STATIC_EVEN 1124
+#define IDC_STATIC_LINE 1125
+#define IDC_LIST_CODEC_1 1128
+#define IDC_EDIT2 1129
+#define IDC_EDIT_CODEC_1 1129
+#define IDC_STATIC_PANNING 1131
+#define IDC_SLIDER_PAN_LEFT 1132
+#define IDC_SLIDER_PAN_RIGHT 1133
+#define IDC_STATIC_LEFT 1134
+#define IDC_STATIC_LEFT2 1135
+#define IDC_STATIC_RIGHT 1135
+#define IDC_BUTTON_VERSION 1136
+#define IDC_STATIC_PLAYOUT_BUFFER 1137
+#define IDC_CHECK_RXVAD 1138
+#define IDC_EDIT1 1139
+#define IDC_EDIT_RXVAD 1139
+#define IDC_STATIC_RX_PORT 1140
+#define IDC_STATIC_RX_PORT2 1141
+#define IDC_EDIT3 1142
+#define IDC_EDIT_AUDIO_LAYER 1142
+#define IDC_EDIT_AUDIO_LAYER2 1143
+#define IDC_EDIT_CPU_LOAD 1143
+#define IDC_STATIC_ERROR_CALLBACK 1144
+#define IDC_EDIT_ERROR_CALLBACK 1145
+#define IDC_EDIT_RX_CODEC_1 1146
+#define IDC_STATIC_BYTES_SENT_TEXT 1147
+#define IDC_EDIT_RTCP_STAT 1147
+#define IDC_EDIT_RTCP_STAT_1 1147
+
+// Next default values for new objects
+//
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+#define _APS_NEXT_RESOURCE_VALUE 130
+#define _APS_NEXT_COMMAND_VALUE 32771
+#define _APS_NEXT_CONTROL_VALUE 1148
+#define _APS_NEXT_SYMED_VALUE 101
+#endif
+#endif
diff --git a/voice_engine/test/win_test/WinTest.aps b/voice_engine/test/win_test/WinTest.aps
new file mode 100644
index 0000000..499db5f
--- /dev/null
+++ b/voice_engine/test/win_test/WinTest.aps
Binary files differ
diff --git a/voice_engine/test/win_test/WinTest.cc b/voice_engine/test/win_test/WinTest.cc
new file mode 100644
index 0000000..e0e0248
--- /dev/null
+++ b/voice_engine/test/win_test/WinTest.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "stdafx.h"
+#include "WinTest.h"
+#include "WinTestDlg.h"
+
+#ifdef _DEBUG
+#define new DEBUG_NEW
+#endif
+
+
+// CWinTestApp
+
+BEGIN_MESSAGE_MAP(CWinTestApp, CWinApp)
+ ON_COMMAND(ID_HELP, &CWinApp::OnHelp)
+END_MESSAGE_MAP()
+
+
+// CWinTestApp construction
+
+CWinTestApp::CWinTestApp()
+{
+}
+
+
+// The one and only CWinTestApp object
+
+CWinTestApp theApp;
+
+
+// CWinTestApp initialization
+
+BOOL CWinTestApp::InitInstance()
+{
+ // InitCommonControlsEx() is required on Windows XP if an application
+ // manifest specifies use of ComCtl32.dll version 6 or later to enable
+ // visual styles. Otherwise, any window creation will fail.
+ INITCOMMONCONTROLSEX InitCtrls;
+ InitCtrls.dwSize = sizeof(InitCtrls);
+ // Set this to include all the common control classes you want to use
+ // in your application.
+ InitCtrls.dwICC = ICC_WIN95_CLASSES;
+ InitCommonControlsEx(&InitCtrls);
+
+ CWinApp::InitInstance();
+
+ // Standard initialization
+ // If you are not using these features and wish to reduce the size
+ // of your final executable, you should remove from the following
+ // the specific initialization routines you do not need
+ // Change the registry key under which our settings are stored
+ SetRegistryKey(_T("Local AppWizard-Generated Applications"));
+
+ CWinTestDlg dlg;
+ m_pMainWnd = &dlg;
+ INT_PTR nResponse = dlg.DoModal();
+ if (nResponse == IDOK)
+ {
+ }
+ else if (nResponse == IDCANCEL)
+ {
+ }
+
+ // Since the dialog has been closed, return FALSE so that we exit the
+ // application, rather than start the application's message pump.
+ return FALSE;
+}
diff --git a/voice_engine/test/win_test/WinTest.h b/voice_engine/test/win_test/WinTest.h
new file mode 100644
index 0000000..d012ce6
--- /dev/null
+++ b/voice_engine/test/win_test/WinTest.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#pragma once
+
+#ifndef __AFXWIN_H__
+ #error "include 'stdafx.h' before including this file for PCH"
+#endif
+
+#include "resource.h" // main symbols
+
+
+// CWinTestApp:
+// See WinTest.cpp for the implementation of this class
+//
+
+class CWinTestApp : public CWinApp
+{
+public:
+ CWinTestApp();
+
+// Overrides
+ public:
+ virtual BOOL InitInstance();
+
+// Implementation
+
+ DECLARE_MESSAGE_MAP()
+};
+
+extern CWinTestApp theApp;
diff --git a/voice_engine/test/win_test/WinTest.rc b/voice_engine/test/win_test/WinTest.rc
new file mode 100644
index 0000000..dfe503f
--- /dev/null
+++ b/voice_engine/test/win_test/WinTest.rc
@@ -0,0 +1,394 @@
+// Microsoft Visual C++ generated resource script.
+//
+#include "resource.h"
+
+#define APSTUDIO_READONLY_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 2 resource.
+//
+#include "afxres.h"
+
+/////////////////////////////////////////////////////////////////////////////
+#undef APSTUDIO_READONLY_SYMBOLS
+
+/////////////////////////////////////////////////////////////////////////////
+// Swedish resources
+
+#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_SVE)
+#ifdef _WIN32
+LANGUAGE LANG_SWEDISH, SUBLANG_DEFAULT
+#pragma code_page(1252)
+#endif //_WIN32
+
+#ifdef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// TEXTINCLUDE
+//
+
+1 TEXTINCLUDE
+BEGIN
+ "resource.h\0"
+END
+
+2 TEXTINCLUDE
+BEGIN
+ "#include ""afxres.h""\r\n"
+ "\0"
+END
+
+3 TEXTINCLUDE
+BEGIN
+ "#define _AFX_NO_SPLITTER_RESOURCES\r\n"
+ "#define _AFX_NO_OLE_RESOURCES\r\n"
+ "#define _AFX_NO_TRACKER_RESOURCES\r\n"
+ "#define _AFX_NO_PROPERTY_RESOURCES\r\n"
+ "\r\n"
+ "#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_SVE)\r\n"
+ "LANGUAGE 29, 1\r\n"
+ "#pragma code_page(1252)\r\n"
+ "#include ""res\\WinTest.rc2"" // non-Microsoft Visual C++ edited resources\r\n"
+ "#include ""afxres.rc"" // Standard components\r\n"
+ "#endif\r\n"
+ "\0"
+END
+
+#endif // APSTUDIO_INVOKED
+
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Icon
+//
+
+// Icon with lowest ID value placed first to ensure application icon
+// remains consistent on all systems.
+IDR_MAINFRAME ICON "res\\WinTest.ico"
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Dialog
+//
+
+IDD_ABOUTBOX DIALOGEX 0, 0, 235, 55
+STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | WS_POPUP | WS_CAPTION | WS_SYSMENU
+CAPTION "About WinTest"
+FONT 8, "MS Shell Dlg", 0, 0, 0x1
+BEGIN
+ ICON IDR_MAINFRAME,IDC_STATIC,11,17,20,20
+ LTEXT "WinTest Version 1.0",IDC_STATIC,40,10,119,8,SS_NOPREFIX
+ LTEXT "Copyright (C) 2010",IDC_STATIC,40,25,119,8
+ DEFPUSHBUTTON "OK",IDOK,178,7,50,16,WS_GROUP
+END
+
+IDD_WINTEST_DIALOG DIALOGEX 0, 0, 796, 278
+STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | WS_MINIMIZEBOX | WS_POPUP | WS_VISIBLE | WS_CAPTION | WS_SYSMENU
+EXSTYLE WS_EX_APPWINDOW
+CAPTION "WinTest"
+FONT 8, "MS Shell Dlg", 0, 0, 0x1
+BEGIN
+ PUSHBUTTON "Create",IDC_BUTTON_CREATE_1,28,24,32,14
+ PUSHBUTTON "Delete",IDC_BUTTON_DELETE_1,28,40,32,14
+ EDITTEXT IDC_EDIT_1,6,32,18,14,ES_AUTOHSCROLL | ES_READONLY
+ PUSHBUTTON "Create",IDC_BUTTON_CREATE_2,28,72,32,14
+ PUSHBUTTON "Delete",IDC_BUTTON_DELETE_2,28,88,32,14
+ EDITTEXT IDC_EDIT_2,6,82,18,14,ES_AUTOHSCROLL | ES_READONLY
+ EDITTEXT IDC_EDIT_MESSAGE,28,244,764,12,ES_AUTOHSCROLL
+ COMBOBOX IDC_COMBO_IP_1,64,24,76,30,CBS_DROPDOWN | CBS_SORT | WS_VSCROLL | WS_TABSTOP
+ EDITTEXT IDC_EDIT_TX_PORT_1,144,24,28,14,ES_AUTOHSCROLL
+ EDITTEXT IDC_EDIT_RX_PORT_1,144,40,28,14,ES_AUTOHSCROLL
+ COMBOBOX IDC_COMBO_CODEC_1,176,24,76,156,CBS_DROPDOWN | WS_VSCROLL | WS_TABSTOP
+ PUSHBUTTON "Start",IDC_BUTTON_START_LISTEN_1,256,24,32,14
+ PUSHBUTTON "Stop",IDC_BUTTON_STOP_LISTEN_1,256,40,32,14
+ LTEXT "Receive",IDC_STATIC_LISTEN,262,8,26,8
+ PUSHBUTTON "Start",IDC_BUTTON_START_PLAYOUT_1,292,24,32,14
+ PUSHBUTTON "Stop",IDC_BUTTON_STOP_PLAYOUT_1,292,40,32,14
+ LTEXT "Playout",IDC_STATIC_PLAY,295,8,25,8
+ PUSHBUTTON "Start",IDC_BUTTON_START_SEND_1,328,24,32,14
+ PUSHBUTTON "Stop",IDC_BUTTON_STOP_SEND_1,328,40,32,14
+ LTEXT "Send",IDC_STATIC_SEND,335,8,17,8
+ COMBOBOX IDC_COMBO_IP_2,64,72,76,30,CBS_DROPDOWN | CBS_SORT | WS_VSCROLL | WS_TABSTOP
+ LTEXT "Destination IP address",IDC_STATIC_IP,64,8,73,8
+ LTEXT "Ports",IDC_STATIC_PORTS,145,8,18,8
+ LTEXT "Codec",IDC_STATIC_CODEC,177,8,21,8
+ LTEXT "Channel",IDC_STATIC_CHANNEL,30,8,27,8
+ LTEXT "ID",IDC_STATIC_ID,12,8,8,8
+ EDITTEXT IDC_EDIT_TX_PORT_2,144,72,28,14,ES_AUTOHSCROLL
+ EDITTEXT IDC_EDIT_RX_PORT_2,144,88,28,14,ES_AUTOHSCROLL
+ COMBOBOX IDC_COMBO_CODEC_2,176,72,76,156,CBS_DROPDOWN | WS_VSCROLL | WS_TABSTOP
+ PUSHBUTTON "Start",IDC_BUTTON_START_LISTEN_2,256,72,32,14
+ PUSHBUTTON "Stop",IDC_BUTTON_STOP_LISTEN_2,256,88,32,14
+ PUSHBUTTON "Start",IDC_BUTTON_START_PLAYOUT_2,292,72,32,14
+ PUSHBUTTON "Stop",IDC_BUTTON_STOP_PLAYOUT_2,292,88,32,14
+ PUSHBUTTON "Start",IDC_BUTTON_START_SEND_2,328,72,32,14
+ PUSHBUTTON "Stop",IDC_BUTTON_STOP_SEND_2,328,88,32,14
+ PUSHBUTTON "TEST 1",IDC_BUTTON_TEST_1,756,224,36,14
+ LTEXT "API",IDC_STATIC,4,247,12,8
+ EDITTEXT IDC_EDIT_RESULT,28,260,96,12,ES_AUTOHSCROLL
+ LTEXT "Result",IDC_STATIC,3,263,21,8
+ EDITTEXT IDC_EDIT_N_FAILS,156,260,30,12,ES_AUTOHSCROLL
+ LTEXT "#Fails",IDC_STATIC_ERROR,132,263,20,8
+ EDITTEXT IDC_EDIT_LAST_ERROR,228,260,36,12,ES_AUTOHSCROLL
+ LTEXT "Last Error",IDC_STATIC_LAST_ERROR,192,262,32,8
+ LTEXT "Ext. Trans.",IDC_STATIC_EXTERNAL,361,8,37,8
+ CONTROL "",IDC_CHECK_EXT_TRANS_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,371,33,16,10
+ CONTROL "In",IDC_CHECK_PLAY_FILE_IN_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,24,36,14,WS_EX_DLGMODALFRAME
+ LTEXT "Play File",IDC_STATIC_PLAY_FILE,401,8,27,8
+ CONTROL "Out",IDC_CHECK_PLAY_FILE_OUT_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,40,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "In",IDC_CHECK_PLAY_FILE_IN_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,72,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "Out",IDC_CHECK_PLAY_FILE_OUT_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,88,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "",IDC_CHECK_EXT_TRANS_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,371,82,16,10
+ GROUPBOX "",IDC_STATIC_ALL_CHANNELS,6,107,662,113
+ CONTROL "PlayFileAsMic",IDC_CHECK_PLAY_FILE_IN,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,122,60,14,WS_EX_DLGMODALFRAME
+ COMBOBOX IDC_COMBO_REC_DEVICE,12,132,184,80,CBS_DROPDOWN | WS_VSCROLL | WS_TABSTOP
+ LTEXT "Recording device",IDC_STATIC_REC_DEVICE,12,120,56,8
+ COMBOBOX IDC_COMBO_PLAY_DEVICE,12,180,184,80,CBS_DROPDOWN | WS_VSCROLL | WS_TABSTOP
+ LTEXT "Playout device",IDC_STATIC_PLAY_DEVICE,12,167,56,8
+ CONTROL "In",IDC_CHECK_EXT_MEDIA_IN_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,436,24,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "Out",IDC_CHECK_EXT_MEDIA_OUT_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,436,40,36,14,WS_EX_DLGMODALFRAME
+ LTEXT "Ext. Media",IDC_STATIC_PLAY_FILE2,437,8,35,8
+ CONTROL "",IDC_SLIDER_INPUT_VOLUME,"msctls_trackbar32",TBS_BOTH | TBS_NOTICKS | WS_TABSTOP,196,130,72,15
+ LTEXT "Microphone Volume",IDC_STATIC_MIC_VOLUME,202,120,62,8
+ CONTROL "",IDC_SLIDER_OUTPUT_VOLUME,"msctls_trackbar32",TBS_BOTH | TBS_NOTICKS | WS_TABSTOP,196,179,72,15
+ LTEXT "Speaker Volume",IDC_STATIC_SPK_VOLUME,202,167,52,8
+ CONTROL "AGC",IDC_CHECK_AGC,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,316,122,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "",IDC_PROGRESS_AUDIO_LEVEL_IN,"msctls_progress32",WS_BORDER,268,135,42,6
+ LTEXT "Audio Level",IDC_STATIC_AUDIO_LEVEL_IN,271,120,38,8,NOT WS_GROUP
+ CONTROL "NS",IDC_CHECK_NS,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,316,142,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "EC",IDC_CHECK_EC,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,356,122,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "VAD",IDC_CHECK_VAD_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,476,24,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "In",IDC_CHECK_EXT_MEDIA_IN_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,436,72,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "Out",IDC_CHECK_EXT_MEDIA_OUT_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,436,88,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "VAD",IDC_CHECK_VAD_3,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,476,72,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "Mute",IDC_CHECK_MUTE_IN,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,356,142,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "Mute",IDC_CHECK_MUTE_IN_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,476,40,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "Mute",IDC_CHECK_MUTE_IN_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,476,88,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "TX",IDC_CHECK_SRTP_TX_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,516,24,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "RX",IDC_CHECK_SRTP_RX_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,516,40,36,14,WS_EX_DLGMODALFRAME
+ LTEXT "SRTP",IDC_STATIC_SRTP,525,8,18,8
+ CONTROL "TX",IDC_CHECK_SRTP_TX_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,516,72,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "RX",IDC_CHECK_SRTP_RX_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,516,88,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "",IDC_CHECK_EXT_ENCRYPTION_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,564,33,16,10
+ LTEXT "Encrypt",IDC_STATIC_PLAY_FILE4,556,8,26,8
+ CONTROL "",IDC_CHECK_EXT_ENCRYPTION_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,564,82,16,10
+ PUSHBUTTON "DTMF>>",IDC_BUTTON_DTMF_1,584,24,36,14
+ CONTROL "RecMicToFile",IDC_CHECK_REC_MIC,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,396,142,60,14,WS_EX_DLGMODALFRAME
+ PUSHBUTTON "DTMF>>",IDC_BUTTON_DTMF_2,584,72,36,14
+ CONTROL "Conf",IDC_CHECK_CONFERENCE_1,"Button",BS_AUTOCHECKBOX | NOT WS_VISIBLE | WS_TABSTOP,584,40,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "Conf",IDC_CHECK_CONFERENCE_2,"Button",BS_AUTOCHECKBOX | NOT WS_VISIBLE | WS_TABSTOP,584,88,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "Hold",IDC_CHECK_ON_HOLD_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,708,24,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "Hold",IDC_CHECK_ON_HOLD_2,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,708,72,36,14,WS_EX_DLGMODALFRAME
+ EDITTEXT IDC_EDIT_GET_OUTPUT,292,260,500,12,ES_AUTOHSCROLL
+ LTEXT "Get",IDC_STATIC_GET,276,262,12,8
+ CONTROL "Ext. Media",IDC_CHECK_EXT_MEDIA_IN,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,460,122,52,14,WS_EX_DLGMODALFRAME
+ CONTROL "Ext. Media",IDC_CHECK_EXT_MEDIA_OUT,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,460,180,52,14,WS_EX_DLGMODALFRAME
+ LISTBOX IDC_LIST_CODEC_1,208,40,44,28,LBS_NOINTEGRALHEIGHT | NOT WS_BORDER | WS_VSCROLL | WS_TABSTOP,WS_EX_CLIENTEDGE
+ EDITTEXT IDC_EDIT_CODEC_1,176,40,28,14,ES_AUTOHSCROLL
+ CONTROL "",IDC_PROGRESS_AUDIO_LEVEL_OUT,"msctls_progress32",WS_BORDER,268,184,42,6
+ LTEXT "Panning",IDC_STATIC_PANNING,328,167,26,8
+ CONTROL "",IDC_SLIDER_PAN_LEFT,"msctls_trackbar32",TBS_VERT | TBS_BOTH | TBS_NOTICKS | WS_TABSTOP,328,175,12,28
+ CONTROL "",IDC_SLIDER_PAN_RIGHT,"msctls_trackbar32",TBS_VERT | TBS_BOTH | TBS_NOTICKS | WS_TABSTOP,344,175,12,28
+ LTEXT "L",IDC_STATIC_LEFT,332,200,8,8
+ LTEXT "R",IDC_STATIC_RIGHT,347,201,8,8
+ PUSHBUTTON "Version",IDC_BUTTON_VERSION,624,200,36,14
+ EDITTEXT IDC_EDIT_PLAYOUT_BUFFER_SIZE,363,181,28,12,ES_CENTER | ES_AUTOHSCROLL | ES_READONLY | NOT WS_TABSTOP
+ LTEXT "Buffer Size",IDC_STATIC_PLAYOUT_BUFFER,361,167,36,8
+ CONTROL "Delay",IDC_CHECK_DELAY_ESTIMATE_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,624,24,36,14,WS_EX_DLGMODALFRAME
+ EDITTEXT IDC_EDIT_DELAY_ESTIMATE_1,631,40,24,14,ES_CENTER | ES_AUTOHSCROLL | ES_READONLY | NOT WS_TABSTOP
+ CONTROL "RxVAD",IDC_CHECK_RXVAD,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,664,24,40,14,WS_EX_DLGMODALFRAME
+ EDITTEXT IDC_EDIT_RXVAD,671,40,24,14,ES_CENTER | ES_AUTOHSCROLL | ES_READONLY
+ CONTROL "AGC",IDC_CHECK_AGC_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,748,24,36,14,WS_EX_DLGMODALFRAME
+ CONTROL "NS",IDC_CHECK_NS_1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,748,40,36,14,WS_EX_DLGMODALFRAME
+ LTEXT "RX VQE",IDC_STATIC_RX_VQE,753,8,25,8
+ CONTROL "RecordCall",IDC_CHECK_REC_CALL,"Button",BS_AUTOCHECKBOX | NOT WS_VISIBLE | WS_TABSTOP,517,156,52,14,WS_EX_DLGMODALFRAME
+ LTEXT "RX",IDC_STATIC_RX_PORT,133,42,10,8
+ LTEXT "RX",IDC_STATIC_RX_PORT2,133,91,10,8
+ CONTROL "TypingDetect",IDC_CHECK_TYPING_DETECTION,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,572,156,60,14,WS_EX_DLGMODALFRAME
+ EDITTEXT IDC_EDIT_AUDIO_LAYER,28,224,116,14,ES_AUTOHSCROLL | ES_READONLY
+ EDITTEXT IDC_EDIT_CPU_LOAD,152,224,116,14,ES_AUTOHSCROLL | ES_READONLY
+ CONTROL "FEC",IDC_CHECK_FEC,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,176,55,28,14,WS_EX_DLGMODALFRAME
+ LTEXT "=> Callbacks",IDC_STATIC_ERROR_CALLBACK,283,226,43,8
+ EDITTEXT IDC_EDIT_ERROR_CALLBACK,328,224,312,14,ES_AUTOHSCROLL
+ PUSHBUTTON "Clear",IDC_BUTTON_CLEAR_ERROR_CALLBACK,644,224,24,14
+ EDITTEXT IDC_EDIT_RX_CODEC_1,256,56,216,12,ES_AUTOHSCROLL | ES_READONLY
+ EDITTEXT IDC_EDIT_RTCP_STAT_1,476,56,316,12,ES_AUTOHSCROLL | ES_READONLY
+END
+
+IDD_DTMF_DIALOG DIALOGEX 0, 0, 316, 212
+STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | WS_POPUP | WS_CAPTION | WS_SYSMENU
+CAPTION "Telehone Events"
+FONT 8, "MS Shell Dlg", 400, 0, 0x1
+BEGIN
+ DEFPUSHBUTTON "OK",IDOK,260,192,50,14
+ PUSHBUTTON "1",IDC_BUTTON_1,16,20,16,14
+ PUSHBUTTON "2",IDC_BUTTON_2,36,20,16,14
+ PUSHBUTTON "3",IDC_BUTTON_3,56,20,16,14
+ PUSHBUTTON "4",IDC_BUTTON_4,16,36,16,14
+ PUSHBUTTON "5",IDC_BUTTON_5,36,36,16,14
+ PUSHBUTTON "6",IDC_BUTTON_6,56,36,16,14
+ PUSHBUTTON "7",IDC_BUTTON_7,16,52,16,14
+ PUSHBUTTON "8",IDC_BUTTON_8,36,52,16,14
+ PUSHBUTTON "9",IDC_BUTTON_9,56,52,16,14
+ PUSHBUTTON "*",IDC_BUTTON_10,16,68,16,14
+ PUSHBUTTON "0",IDC_BUTTON_11,36,68,16,14
+ PUSHBUTTON "#",IDC_BUTTON_12,56,68,16,14
+ PUSHBUTTON "A",IDC_BUTTON_13,76,20,16,14
+ PUSHBUTTON "B",IDC_BUTTON_14,76,36,16,14
+ PUSHBUTTON "C",IDC_BUTTON_15,76,52,16,14
+ PUSHBUTTON "D",IDC_BUTTON_16,76,68,16,14
+ EDITTEXT IDC_EDIT_DTMF_EVENT,56,90,16,12,ES_AUTOHSCROLL | ES_READONLY
+ LTEXT "Event code",IDC_STATIC_DTMF_EVENT,17,91,37,8
+ PUSHBUTTON "1",IDC_BUTTON_17,16,20,16,14
+ PUSHBUTTON "2",IDC_BUTTON_18,36,20,16,14
+ PUSHBUTTON "3",IDC_BUTTON_19,56,20,16,14
+ PUSHBUTTON "4",IDC_BUTTON_20,16,36,16,14
+ PUSHBUTTON "A",IDC_BUTTON_21,76,20,16,14
+ GROUPBOX "DTMF Events",IDC_STATIC_GROUP_DTMF,4,4,188,132
+ CONTROL "",IDC_CHECK_DTMF_PLAYOUT_RX,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,160,21,12,14
+ LTEXT "Play out-band RX",IDC_STATIC_PLAYOUT_RX,101,24,56,8
+ CONTROL "",IDC_CHECK_DTMF_PLAY_TONE,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,160,39,12,14
+ LTEXT "Play tone locally",IDC_STATIC_PLAY_TONE,101,41,52,8
+ EDITTEXT IDC_EDIT_EVENT_LENGTH,44,163,28,14,ES_AUTOHSCROLL
+ LTEXT "Duration",IDC_STATIC_EVENT_LENGTH,12,165,28,8
+ EDITTEXT IDC_EDIT_EVENT_ATTENUATION,44,183,28,14,ES_AUTOHSCROLL
+ LTEXT "Volume",IDC_STATIC_EVENT_ATTENUATION,12,186,24,8
+ CONTROL "Inband",IDC_CHECK_EVENT_INBAND,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,84,163,40,14,WS_EX_DLGMODALFRAME
+ CONTROL "Feedback",IDC_CHECK_DTMF_FEEDBACK,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,16,112,48,14,WS_EX_DLGMODALFRAME
+ CONTROL "",IDC_CHECK_DIRECT_FEEDBACK,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,96,112,12,14
+ LTEXT "Direct",IDC_STATIC_DIRECT_FEEDBACK,72,115,20,8
+ CONTROL "Single",IDC_RADIO_SINGLE,"Button",BS_AUTORADIOBUTTON | WS_GROUP,112,68,35,10
+ CONTROL "Sequence",IDC_RADIO_MULTI,"Button",BS_AUTORADIOBUTTON,112,80,47,10
+ CONTROL "Start/Stop",IDC_RADIO_START_STOP,"Button",BS_AUTORADIOBUTTON,112,92,49,10
+ GROUPBOX "Mode",IDC_STATIC_MODE,100,56,68,52
+ EDITTEXT IDC_EDIT_EVENT_RX_PT,220,20,24,14,ES_AUTOHSCROLL
+ EDITTEXT IDC_EDIT_EVENT_TX_PT,220,41,24,14,ES_AUTOHSCROLL
+ LTEXT "RX",IDC_STATIC_EVENT_RX_PT,208,22,10,8
+ LTEXT "TX",IDC_STATIC_EVENT_TX_PT,208,42,9,8
+ PUSHBUTTON "Set",IDC_BUTTON_SET_TX_TELEPHONE_PT,248,41,24,14
+ PUSHBUTTON "Set",IDC_BUTTON_SET_RX_TELEPHONE_PT,248,20,24,14
+ GROUPBOX "Payload Type",IDC_STATIC_PT,200,4,80,56
+ EDITTEXT IDC_EDIT_EVENT_CODE,128,163,28,14,ES_AUTOHSCROLL
+ LTEXT "Event code",IDC_STATIC_EVENT_CODE,125,152,37,8
+ PUSHBUTTON "Send",IDC_BUTTON_SEND_TELEPHONE_EVENT,160,163,24,14
+ CONTROL "On/Off",IDC_CHECK_EVENT_DETECTION,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,208,80,40,14,WS_EX_DLGMODALFRAME
+ CONTROL "",IDC_CHECK_DETECT_INBAND,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,208,100,12,14
+ CONTROL "",IDC_CHECK_DETECT_OUT_OF_BAND,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,208,116,12,14
+ LTEXT "Inband",IDC_STATIC_INBAND_DETECTION,220,103,24,8
+ LTEXT "Outband",IDC_STATIC_OUT_OF_BAND_DETECTION,220,120,29,8
+ GROUPBOX "Event Detection",IDC_STATIC_EVENT_DETECTION,200,68,108,68
+ GROUPBOX "Telephone Events",IDC_STATIC_TELEPHONE_EVENTS,4,140,188,64
+ EDITTEXT IDC_EDIT_ON_EVENT_OUT_OF_BAND,252,117,48,14,ES_AUTOHSCROLL
+ EDITTEXT IDC_EDIT_ON_EVENT_INBAND,252,101,48,14,ES_AUTOHSCROLL
+ LTEXT "=> Detections",IDC_STATIC_EVEN,253,90,48,8
+END
+
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Version
+//
+
+VS_VERSION_INFO VERSIONINFO
+ FILEVERSION 1,0,0,0
+ PRODUCTVERSION 1,0,0,0
+ FILEFLAGSMASK 0x3fL
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x4L
+ FILETYPE 0x1L
+ FILESUBTYPE 0x0L
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904e4"
+ BEGIN
+ VALUE "FileDescription", "WebRTC VoiceEngine Test"
+ VALUE "FileVersion", "1.0.0.0"
+ VALUE "InternalName", "WinTest.exe"
+ VALUE "LegalCopyright", "Copyright (c) 2011 The WebRTC project authors. All Rights Reserved."
+ VALUE "OriginalFilename", "WinTest.exe"
+ VALUE "ProductName", "WebRTC VoiceEngine"
+ VALUE "ProductVersion", "1.0.0.0"
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 1252
+ END
+END
+
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// DESIGNINFO
+//
+
+#ifdef APSTUDIO_INVOKED
+GUIDELINES DESIGNINFO
+BEGIN
+ IDD_ABOUTBOX, DIALOG
+ BEGIN
+ LEFTMARGIN, 7
+ RIGHTMARGIN, 228
+ TOPMARGIN, 7
+ BOTTOMMARGIN, 48
+ END
+
+ IDD_WINTEST_DIALOG, DIALOG
+ BEGIN
+ LEFTMARGIN, 7
+ RIGHTMARGIN, 789
+ TOPMARGIN, 7
+ BOTTOMMARGIN, 271
+ END
+
+ IDD_DTMF_DIALOG, DIALOG
+ BEGIN
+ LEFTMARGIN, 7
+ RIGHTMARGIN, 309
+ TOPMARGIN, 7
+ BOTTOMMARGIN, 205
+ END
+END
+#endif // APSTUDIO_INVOKED
+
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// String Table
+//
+
+STRINGTABLE
+BEGIN
+ IDS_ABOUTBOX "&About WinTest..."
+END
+
+#endif // Swedish resources
+/////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 3 resource.
+//
+#define _AFX_NO_SPLITTER_RESOURCES
+#define _AFX_NO_OLE_RESOURCES
+#define _AFX_NO_TRACKER_RESOURCES
+#define _AFX_NO_PROPERTY_RESOURCES
+
+#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_SVE)
+LANGUAGE 29, 1
+#pragma code_page(1252)
+#include "res\WinTest.rc2" // non-Microsoft Visual C++ edited resources
+#include "afxres.rc" // Standard components
+#endif
+
+/////////////////////////////////////////////////////////////////////////////
+#endif // not APSTUDIO_INVOKED
+
diff --git a/voice_engine/test/win_test/WinTestDlg.cc b/voice_engine/test/win_test/WinTestDlg.cc
new file mode 100644
index 0000000..b11c09f
--- /dev/null
+++ b/voice_engine/test/win_test/WinTestDlg.cc
@@ -0,0 +1,3584 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include "stdafx.h"
+#include "WinTest.h"
+#include "WinTestDlg.h"
+#include "testsupport/fileutils.h"
+
+#ifdef _DEBUG
+#define new DEBUG_NEW
+#endif
+
+using namespace webrtc;
+
+unsigned char key[30] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+
+// Hack to convert char to TCHAR, using two buffers to be able to
+// call twice in the same statement
+TCHAR convertTemp1[256] = {0};
+TCHAR convertTemp2[256] = {0};
+bool convertBufferSwitch(false);
+TCHAR* CharToTchar(const char* str, int len)
+{
+#ifdef _UNICODE
+ TCHAR* temp = convertBufferSwitch ? convertTemp1 : convertTemp2;
+ convertBufferSwitch = !convertBufferSwitch;
+ memset(temp, 0, sizeof(convertTemp1));
+ MultiByteToWideChar(CP_UTF8, 0, str, len, temp, 256);
+ return temp;
+#else
+ return str;
+#endif
+}
+
+// Hack to convert TCHAR to char
+char convertTemp3[256] = {0};
+char* TcharToChar(TCHAR* str, int len)
+{
+#ifdef _UNICODE
+ memset(convertTemp3, 0, sizeof(convertTemp3));
+ WideCharToMultiByte(CP_UTF8, 0, str, len, convertTemp3, 256, 0, 0);
+ return convertTemp3;
+#else
+ return str;
+#endif
+}
+
+// ----------------------------------------------------------------------------
+// VoEConnectionObserver
+// ----------------------------------------------------------------------------
+
+class ConnectionObserver : public VoEConnectionObserver
+{
+public:
+ ConnectionObserver();
+ virtual void OnPeriodicDeadOrAlive(const int channel, const bool alive);
+};
+
+ConnectionObserver::ConnectionObserver()
+{
+}
+
+void ConnectionObserver::OnPeriodicDeadOrAlive(const int channel, const bool alive)
+{
+ CString str;
+ str.Format(_T("OnPeriodicDeadOrAlive(channel=%d) => alive=%d"), channel, alive);
+ OutputDebugString(str);
+}
+
+// ----------------------------------------------------------------------------
+// VoiceEngineObserver
+// ----------------------------------------------------------------------------
+
+void CWinTestDlg::CallbackOnError(const int channel, const int errCode)
+{
+ _nErrorCallbacks++;
+
+ CString str;
+ str.Format(_T("[#%d] CallbackOnError(channel=%d) => errCode = %d"), _nErrorCallbacks, channel, errCode);
+ if (errCode == VE_RECEIVE_PACKET_TIMEOUT)
+ {
+ str += _T(" <=> VE_RECEIVE_PACKET_TIMEOUT");
+ }
+ else if (errCode == VE_PACKET_RECEIPT_RESTARTED)
+ {
+ str += _T(" <=> VE_PACKET_RECEIPT_RESTARTED");
+ }
+ else if (errCode == VE_RUNTIME_PLAY_WARNING)
+ {
+ str += _T(" <=> VE_RUNTIME_PLAY_WARNING");
+ }
+ else if (errCode == VE_RUNTIME_REC_WARNING)
+ {
+ str += _T(" <=> VE_RUNTIME_REC_WARNING");
+ }
+ else if (errCode == VE_RUNTIME_PLAY_ERROR)
+ {
+ str += _T(" <=> VE_RUNTIME_PLAY_ERROR");
+ }
+ else if (errCode == VE_RUNTIME_REC_ERROR)
+ {
+ str += _T(" <=> VE_RUNTIME_REC_ERROR");
+ }
+ else if (errCode == VE_SATURATION_WARNING)
+ {
+ str += _T(" <=> VE_SATURATION_WARNING");
+ }
+ else if (errCode == VE_TYPING_NOISE_WARNING)
+ {
+ str += _T(" <=> VE_TYPING_NOISE_WARNING");
+ }
+ else if (errCode == VE_REC_DEVICE_REMOVED)
+ {
+ str += _T(" <=> VE_REC_DEVICE_REMOVED");
+ }
+ // AfxMessageBox((LPCTSTR)str, MB_OK);
+ SetDlgItemText(IDC_EDIT_ERROR_CALLBACK, (LPCTSTR)str);
+}
+
+// ----------------------------------------------------------------------------
+// VoERTPObserver
+// ----------------------------------------------------------------------------
+
+void CWinTestDlg::OnIncomingCSRCChanged(const int channel, const unsigned int CSRC, const bool added)
+{
+ CString str;
+ str.Format(_T("OnIncomingCSRCChanged(channel=%d) => CSRC=%u, added=%d"), channel, CSRC, added);
+ SetDlgItemText(IDC_EDIT_ERROR_CALLBACK, (LPCTSTR)str);
+}
+
+void CWinTestDlg::OnIncomingSSRCChanged(const int channel, const unsigned int SSRC)
+{
+ CString str;
+ str.Format(_T("OnIncomingSSRCChanged(channel=%d) => SSRC=%u"), channel, SSRC);
+ SetDlgItemText(IDC_EDIT_ERROR_CALLBACK, (LPCTSTR)str);
+}
+
+// ----------------------------------------------------------------------------
+// Transport
+// ----------------------------------------------------------------------------
+
+class MyTransport : public Transport
+{
+public:
+ MyTransport(VoENetwork* veNetwork);
+ virtual int SendPacket(int channel, const void *data, int len);
+ virtual int SendRTCPPacket(int channel, const void *data, int len);
+private:
+ VoENetwork* _veNetworkPtr;
+};
+
+MyTransport::MyTransport(VoENetwork* veNetwork) :
+ _veNetworkPtr(veNetwork)
+{
+}
+
+int
+MyTransport::SendPacket(int channel, const void *data, int len)
+{
+ _veNetworkPtr->ReceivedRTPPacket(channel, data, len);
+ return len;
+}
+
+int
+MyTransport::SendRTCPPacket(int channel, const void *data, int len)
+{
+ _veNetworkPtr->ReceivedRTCPPacket(channel, data, len);
+ return len;
+}
+
+// ----------------------------------------------------------------------------
+// VoEMediaProcess
+// ----------------------------------------------------------------------------
+
+class MediaProcessImpl : public VoEMediaProcess
+{
+public:
+ MediaProcessImpl();
+ virtual void Process(const int channel,
+ const ProcessingTypes type,
+ WebRtc_Word16 audio_10ms[],
+ const int length,
+ const int samplingFreqHz,
+ const bool stereo);
+};
+
+MediaProcessImpl::MediaProcessImpl()
+{
+}
+
+void MediaProcessImpl::Process(const int channel,
+ const ProcessingTypes type,
+ WebRtc_Word16 audio_10ms[],
+ const int length,
+ const int samplingFreqHz,
+ const bool stereo)
+{
+ int x = rand() % 100;
+
+ for (int i = 0; i < length; i++)
+ {
+ if (channel == -1)
+ {
+ if (type == kPlaybackAllChannelsMixed)
+ {
+ // playout: scale up
+ if (!stereo)
+ {
+ audio_10ms[i] = (audio_10ms[i] << 2);
+ }
+ else
+ {
+ audio_10ms[2*i] = (audio_10ms[2*i] << 2);
+ audio_10ms[2*i+1] = (audio_10ms[2*i+1] << 2);
+ }
+ }
+ else
+ {
+ // recording: emulate packet loss by "dropping" 10% of the packets
+ if (x >= 0 && x < 10)
+ {
+ if (!stereo)
+ {
+ audio_10ms[i] = 0;
+ }
+ else
+ {
+ audio_10ms[2*i] = 0;
+ audio_10ms[2*i+1] = 0;
+ }
+ }
+ }
+ }
+ else
+ {
+ if (type == kPlaybackPerChannel)
+ {
+ // playout: mute
+ if (!stereo)
+ {
+ audio_10ms[i] = 0;
+ }
+ else
+ {
+ audio_10ms[2*i] = 0;
+ audio_10ms[2*i+1] = 0;
+ }
+ }
+ else
+ {
+ // recording: emulate packet loss by "dropping" 50% of the packets
+ if (x >= 0 && x < 50)
+ {
+ if (!stereo)
+ {
+ audio_10ms[i] = 0;
+ }
+ else
+ {
+ audio_10ms[2*i] = 0;
+ audio_10ms[2*i+1] = 0;
+ }
+ }
+ }
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Encryptionen
+// ----------------------------------------------------------------------------
+
+class MyEncryption : public Encryption
+{
+public:
+ void encrypt(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out);
+ void decrypt(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out);
+ void encrypt_rtcp(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out);
+ void decrypt_rtcp(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out);
+};
+
+void MyEncryption::encrypt(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out)
+{
+ // --- Stereo emulation (sample based, 2 bytes per sample)
+
+ const int nBytesPayload = bytes_in-12;
+
+ // RTP header (first 12 bytes)
+ memcpy(out_data, in_data, 12);
+
+ // skip RTP header
+ short* ptrIn = (short*) &in_data[12];
+ short* ptrOut = (short*) &out_data[12];
+
+ // network byte order
+ for (int i = 0; i < nBytesPayload/2; i++)
+ {
+ // produce two output samples for each input sample
+ *ptrOut++ = *ptrIn; // left sample
+ *ptrOut++ = *ptrIn; // right sample
+ ptrIn++;
+ }
+
+ *bytes_out = 12 + 2*nBytesPayload;
+
+ /*
+ for(int i = 0; i < bytes_in; i++)
+ out_data[i] =~ in_data[i];
+ *bytes_out = bytes_in;
+ */
+}
+
+void MyEncryption::decrypt(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out)
+{
+ // Do nothing (<=> memcpy)
+ for(int i = 0; i < bytes_in; i++)
+ out_data[i] = in_data[i];
+ *bytes_out = bytes_in;
+}
+
+void MyEncryption::encrypt_rtcp(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out)
+{
+ for(int i = 0; i < bytes_in; i++)
+ out_data[i] =~ in_data[i];
+ *bytes_out = bytes_in;
+}
+
+void MyEncryption::decrypt_rtcp(int channel_no, unsigned char * in_data, unsigned char * out_data, int bytes_in, int* bytes_out)
+{
+ for(int i = 0; i < bytes_in; i++)
+ out_data[i] =~ in_data[i];
+ *bytes_out = bytes_in;
+}
+
+// ----------------------------------------------------------------------------
+// TelephoneEventObserver
+// ----------------------------------------------------------------------------
+
+class TelephoneEventObserver: public VoETelephoneEventObserver
+{
+public:
+ TelephoneEventObserver(CWnd* editControlOut, CWnd* editControlIn);
+ virtual void OnReceivedTelephoneEventInband(int channel, int eventCode,
+ bool endOfEvent);
+ virtual void OnReceivedTelephoneEventOutOfBand(int channel, int eventCode,
+ bool endOfEvent);
+private:
+ CWnd* _editControlOutPtr;
+ CWnd* _editControlInPtr;
+};
+
+TelephoneEventObserver::TelephoneEventObserver(CWnd* editControlOut, CWnd* editControlIn) :
+ _editControlOutPtr(editControlOut),
+ _editControlInPtr(editControlIn)
+{
+}
+
+void TelephoneEventObserver::OnReceivedTelephoneEventInband(int channel,
+ int eventCode,
+ bool endOfEvent)
+{
+ CString msg;
+ if (endOfEvent)
+ {
+ msg.AppendFormat(_T("%d [END]"), eventCode);
+ _editControlInPtr->SetWindowText((LPCTSTR)msg);
+ }
+ else
+ {
+ msg.AppendFormat(_T("%d [START]"), eventCode);
+ _editControlInPtr->SetWindowText((LPCTSTR)msg);
+ }
+}
+
+void TelephoneEventObserver::OnReceivedTelephoneEventOutOfBand(int channel,
+ int eventCode,
+ bool endOfEvent)
+{
+ CString msg;
+ if (endOfEvent)
+ {
+ msg.AppendFormat(_T("%d [END]"), eventCode);
+ _editControlOutPtr->SetWindowText((LPCTSTR)msg);
+ }
+ else
+ {
+ msg.AppendFormat(_T("%d [START]"), eventCode);
+ _editControlOutPtr->SetWindowText((LPCTSTR)msg);
+ }
+}
+
+// ----------------------------------------------------------------------------
+// RxVadCallback
+// ----------------------------------------------------------------------------
+
+class RxCallback : public VoERxVadCallback
+{
+public:
+ RxCallback() : vad_decision(-1) {};
+
+ virtual void OnRxVad(int , int vadDecision)
+ {
+ vad_decision = vadDecision;
+ }
+
+ int vad_decision;
+};
+
+// ----------------------------------------------------------------------------
+// CAboutDlg dialog
+// ----------------------------------------------------------------------------
+
+class CAboutDlg : public CDialog
+{
+public:
+ CAboutDlg();
+
+// Dialog Data
+ enum { IDD = IDD_ABOUTBOX };
+
+ protected:
+ virtual void DoDataExchange(CDataExchange* pDX); // DDX/DDV support
+
+// Implementation
+protected:
+ DECLARE_MESSAGE_MAP()
+};
+
+CAboutDlg::CAboutDlg() : CDialog(CAboutDlg::IDD)
+{
+}
+
+void CAboutDlg::DoDataExchange(CDataExchange* pDX)
+{
+ CDialog::DoDataExchange(pDX);
+}
+
+BEGIN_MESSAGE_MAP(CAboutDlg, CDialog)
+END_MESSAGE_MAP()
+
+// ----------------------------------------------------------------------------
+// CTelephonyEvent dialog
+// ----------------------------------------------------------------------------
+
+class CTelephonyEvent : public CDialog
+{
+ DECLARE_DYNAMIC(CTelephonyEvent)
+
+public:
+ CTelephonyEvent(VoiceEngine* voiceEngine, int channel, CDialog* pParentDialog, CWnd* pParent = NULL); // standard constructor
+ virtual ~CTelephonyEvent();
+
+// Dialog Data
+ enum { IDD = IDD_DTMF_DIALOG };
+
+protected:
+ virtual void DoDataExchange(CDataExchange* pDX); // DDX/DDV support
+ virtual BOOL OnInitDialog();
+
+ DECLARE_MESSAGE_MAP()
+public:
+ afx_msg void OnBnClickedButton1();
+ afx_msg void OnBnClickedButton2();
+ afx_msg void OnBnClickedButton3();
+ afx_msg void OnBnClickedButton4();
+ afx_msg void OnBnClickedButton5();
+ afx_msg void OnBnClickedButton6();
+ afx_msg void OnBnClickedButton7();
+ afx_msg void OnBnClickedButton8();
+ afx_msg void OnBnClickedButton9();
+ afx_msg void OnBnClickedButton10();
+ afx_msg void OnBnClickedButton11();
+ afx_msg void OnBnClickedButton12();
+ afx_msg void OnBnClickedButtonA();
+ afx_msg void OnBnClickedButtonB();
+ afx_msg void OnBnClickedButtonC();
+ afx_msg void OnBnClickedButtonD();
+ afx_msg void OnBnClickedCheckDtmfPlayoutRx();
+ afx_msg void OnBnClickedCheckDtmfPlayTone();
+ afx_msg void OnBnClickedCheckStartStopMode();
+ afx_msg void OnBnClickedCheckEventInband();
+ afx_msg void OnBnClickedCheckDtmfFeedback();
+ afx_msg void OnBnClickedCheckDirectFeedback();
+ afx_msg void OnBnClickedRadioSingle();
+ afx_msg void OnBnClickedRadioMulti();
+ afx_msg void OnBnClickedRadioStartStop();
+ afx_msg void OnBnClickedButtonSetRxTelephonePt();
+ afx_msg void OnBnClickedButtonSetTxTelephonePt();
+ afx_msg void OnBnClickedButtonSendTelephoneEvent();
+ afx_msg void OnBnClickedCheckDetectInband();
+ afx_msg void OnBnClickedCheckDetectOutOfBand();
+ afx_msg void OnBnClickedCheckEventDetection();
+
+private:
+ void SendTelephoneEvent(unsigned char eventCode);
+
+private:
+ VoiceEngine* _vePtr;
+ VoEBase* _veBasePtr;
+ VoEDtmf* _veDTMFPtr;
+ VoECodec* _veCodecPtr;
+ int _channel;
+ CString _strMsg;
+ CDialog* _parentDialogPtr;
+ TelephoneEventObserver* _telephoneEventObserverPtr;
+ bool _PlayDtmfToneLocally;
+ bool _modeStartStop;
+ bool _modeSingle;
+ bool _modeSequence;
+ bool _playingDTMFTone;
+ bool _outOfBandEventDetection;
+ bool _inbandEventDetection;
+};
+
+IMPLEMENT_DYNAMIC(CTelephonyEvent, CDialog)
+
+CTelephonyEvent::CTelephonyEvent(VoiceEngine* voiceEngine,
+ int channel,
+ CDialog* pParentDialog,
+ CWnd* pParent /*=NULL*/)
+ : _vePtr(voiceEngine),
+ _channel(channel),
+ _PlayDtmfToneLocally(false),
+ _modeStartStop(false),
+ _modeSingle(true),
+ _modeSequence(false),
+ _playingDTMFTone(false),
+ _outOfBandEventDetection(true),
+ _inbandEventDetection(false),
+ _parentDialogPtr(pParentDialog),
+ _telephoneEventObserverPtr(NULL),
+ CDialog(CTelephonyEvent::IDD, pParent)
+{
+ _veBasePtr = VoEBase::GetInterface(_vePtr);
+ _veDTMFPtr = VoEDtmf::GetInterface(_vePtr);
+ _veCodecPtr = VoECodec::GetInterface(_vePtr);
+}
+
+CTelephonyEvent::~CTelephonyEvent()
+{
+ _veDTMFPtr->Release();
+ _veCodecPtr->Release();
+ _veBasePtr->Release();
+
+ if (_telephoneEventObserverPtr)
+ {
+ _veDTMFPtr->DeRegisterTelephoneEventDetection(_channel);
+ delete _telephoneEventObserverPtr;
+ _telephoneEventObserverPtr = NULL;
+ }
+}
+
+void CTelephonyEvent::DoDataExchange(CDataExchange* pDX)
+{
+ CDialog::DoDataExchange(pDX);
+}
+
+
+BEGIN_MESSAGE_MAP(CTelephonyEvent, CDialog)
+ ON_BN_CLICKED(IDC_BUTTON_1, &CTelephonyEvent::OnBnClickedButton1)
+ ON_BN_CLICKED(IDC_BUTTON_2, &CTelephonyEvent::OnBnClickedButton2)
+ ON_BN_CLICKED(IDC_BUTTON_3, &CTelephonyEvent::OnBnClickedButton3)
+ ON_BN_CLICKED(IDC_BUTTON_4, &CTelephonyEvent::OnBnClickedButton4)
+ ON_BN_CLICKED(IDC_BUTTON_5, &CTelephonyEvent::OnBnClickedButton5)
+ ON_BN_CLICKED(IDC_BUTTON_6, &CTelephonyEvent::OnBnClickedButton6)
+ ON_BN_CLICKED(IDC_BUTTON_7, &CTelephonyEvent::OnBnClickedButton7)
+ ON_BN_CLICKED(IDC_BUTTON_8, &CTelephonyEvent::OnBnClickedButton8)
+ ON_BN_CLICKED(IDC_BUTTON_9, &CTelephonyEvent::OnBnClickedButton9)
+ ON_BN_CLICKED(IDC_BUTTON_10, &CTelephonyEvent::OnBnClickedButton10)
+ ON_BN_CLICKED(IDC_BUTTON_11, &CTelephonyEvent::OnBnClickedButton11)
+ ON_BN_CLICKED(IDC_BUTTON_12, &CTelephonyEvent::OnBnClickedButton12)
+ ON_BN_CLICKED(IDC_BUTTON_13, &CTelephonyEvent::OnBnClickedButtonA)
+ ON_BN_CLICKED(IDC_BUTTON_14, &CTelephonyEvent::OnBnClickedButtonB)
+ ON_BN_CLICKED(IDC_BUTTON_15, &CTelephonyEvent::OnBnClickedButtonC)
+ ON_BN_CLICKED(IDC_BUTTON_16, &CTelephonyEvent::OnBnClickedButtonD)
+ ON_BN_CLICKED(IDC_CHECK_DTMF_PLAYOUT_RX, &CTelephonyEvent::OnBnClickedCheckDtmfPlayoutRx)
+ ON_BN_CLICKED(IDC_CHECK_DTMF_PLAY_TONE, &CTelephonyEvent::OnBnClickedCheckDtmfPlayTone)
+ ON_BN_CLICKED(IDC_CHECK_EVENT_INBAND, &CTelephonyEvent::OnBnClickedCheckEventInband)
+ ON_BN_CLICKED(IDC_CHECK_DTMF_FEEDBACK, &CTelephonyEvent::OnBnClickedCheckDtmfFeedback)
+ ON_BN_CLICKED(IDC_CHECK_DIRECT_FEEDBACK, &CTelephonyEvent::OnBnClickedCheckDirectFeedback)
+ ON_BN_CLICKED(IDC_RADIO_SINGLE, &CTelephonyEvent::OnBnClickedRadioSingle)
+ ON_BN_CLICKED(IDC_RADIO_MULTI, &CTelephonyEvent::OnBnClickedRadioMulti)
+ ON_BN_CLICKED(IDC_RADIO_START_STOP, &CTelephonyEvent::OnBnClickedRadioStartStop)
+ ON_BN_CLICKED(IDC_BUTTON_SET_RX_TELEPHONE_PT, &CTelephonyEvent::OnBnClickedButtonSetRxTelephonePt)
+ ON_BN_CLICKED(IDC_BUTTON_SET_TX_TELEPHONE_PT, &CTelephonyEvent::OnBnClickedButtonSetTxTelephonePt)
+ ON_BN_CLICKED(IDC_BUTTON_SEND_TELEPHONE_EVENT, &CTelephonyEvent::OnBnClickedButtonSendTelephoneEvent)
+ ON_BN_CLICKED(IDC_CHECK_DETECT_INBAND, &CTelephonyEvent::OnBnClickedCheckDetectInband)
+ ON_BN_CLICKED(IDC_CHECK_DETECT_OUT_OF_BAND, &CTelephonyEvent::OnBnClickedCheckDetectOutOfBand)
+ ON_BN_CLICKED(IDC_CHECK_EVENT_DETECTION, &CTelephonyEvent::OnBnClickedCheckEventDetection)
+END_MESSAGE_MAP()
+
+
+// CTelephonyEvent message handlers
+
+BOOL CTelephonyEvent::OnInitDialog()
+{
+ CDialog::OnInitDialog();
+
+ CString str;
+ GetWindowText(str);
+ str.AppendFormat(_T(" [channel = %d]"), _channel);
+ SetWindowText(str);
+
+ // Update dialog with latest playout state
+ bool enabled(false);
+ _veDTMFPtr->GetDtmfPlayoutStatus(_channel, enabled);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_PLAYOUT_RX);
+ button->SetCheck(enabled ? BST_CHECKED : BST_UNCHECKED);
+
+ // Update dialog with latest feedback state
+ bool directFeedback(false);
+ _veDTMFPtr->GetDtmfFeedbackStatus(enabled, directFeedback);
+ button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_FEEDBACK);
+ button->SetCheck(enabled ? BST_CHECKED : BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_DIRECT_FEEDBACK);
+ button->SetCheck(directFeedback ? BST_CHECKED : BST_UNCHECKED);
+
+ // Default event length is 160 ms
+ SetDlgItemInt(IDC_EDIT_EVENT_LENGTH, 160);
+
+ // Default event attenuation is 10 (<-> -10dBm0)
+ SetDlgItemInt(IDC_EDIT_EVENT_ATTENUATION, 10);
+
+ // Current event-detection status
+ TelephoneEventDetectionMethods detectionMethod(kOutOfBand);
+ if (_veDTMFPtr->GetTelephoneEventDetectionStatus(_channel, enabled, detectionMethod) == 0)
+ {
+ // DTMF detection is supported
+ if (enabled)
+ {
+ button = (CButton*)GetDlgItem(IDC_CHECK_EVENT_DETECTION);
+ button->SetCheck(BST_CHECKED);
+ }
+ if (detectionMethod == kOutOfBand || detectionMethod == kInAndOutOfBand)
+ {
+ button = (CButton*)GetDlgItem(IDC_CHECK_DETECT_OUT_OF_BAND);
+ button->SetCheck(BST_CHECKED);
+ }
+ if (detectionMethod == kInBand || detectionMethod == kInAndOutOfBand)
+ {
+ button = (CButton*)GetDlgItem(IDC_CHECK_DETECT_INBAND);
+ button->SetCheck(BST_CHECKED);
+ }
+ }
+ else
+ {
+ // DTMF detection is not supported
+ GetDlgItem(IDC_CHECK_EVENT_DETECTION)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_DETECT_OUT_OF_BAND)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_DETECT_INBAND)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_ON_EVENT_INBAND)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_ON_EVENT_OUT_OF_BAND)->EnableWindow(FALSE);
+ }
+
+ // Telephone-event PTs
+ unsigned char pt(0);
+ _veDTMFPtr->GetSendTelephoneEventPayloadType(_channel, pt);
+ SetDlgItemInt(IDC_EDIT_EVENT_TX_PT, pt);
+
+ CodecInst codec;
+ strcpy_s(codec.plname, 32, "telephone-event"); codec.channels = 1; codec.plfreq = 8000;
+ _veCodecPtr->GetRecPayloadType(_channel, codec);
+ SetDlgItemInt(IDC_EDIT_EVENT_RX_PT, codec.pltype);
+
+ if (_modeSingle)
+ {
+ ((CButton*)GetDlgItem(IDC_RADIO_SINGLE))->SetCheck(BST_CHECKED);
+ }
+ else if (_modeStartStop)
+ {
+ ((CButton*)GetDlgItem(IDC_RADIO_START_STOP))->SetCheck(BST_CHECKED);
+ }
+ else if (_modeSequence)
+ {
+ ((CButton*)GetDlgItem(IDC_RADIO_MULTI))->SetCheck(BST_CHECKED);
+ }
+
+ return TRUE; // return TRUE unless you set the focus to a control
+}
+void CTelephonyEvent::SendTelephoneEvent(unsigned char eventCode)
+{
+ BOOL ret;
+ int lengthMs(0);
+ int attenuationDb(0);
+ bool outBand(false);
+ int res(0);
+
+ // tone length
+ if (!_modeStartStop)
+ {
+ lengthMs = GetDlgItemInt(IDC_EDIT_EVENT_LENGTH, &ret);
+ if (ret == FALSE)
+ {
+ // use default length if edit field is empty
+ lengthMs = 160;
+ }
+ }
+
+ // attenuation
+ attenuationDb = GetDlgItemInt(IDC_EDIT_EVENT_ATTENUATION, &ret);
+ if (ret == FALSE)
+ {
+ // use default length if edit field is empty
+ attenuationDb = 10;
+ }
+
+ // out-band or in-band
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EVENT_INBAND);
+ int check = button->GetCheck();
+ outBand = (check == BST_UNCHECKED);
+
+ if (eventCode < 16)
+ SetDlgItemInt(IDC_EDIT_DTMF_EVENT, eventCode);
+
+ if (_PlayDtmfToneLocally)
+ {
+ // --- PlayDtmfTone
+
+ if (_modeSingle)
+ {
+ TEST2(_veDTMFPtr->PlayDtmfTone(eventCode, lengthMs, attenuationDb) == 0,
+ _T("PlayDtmfTone(eventCode=%u, lengthMs=%d, attenuationDb=%d)"), eventCode, lengthMs, attenuationDb);
+ }
+ else if (_modeStartStop)
+ {
+ if (!_playingDTMFTone)
+ {
+ TEST2((res = _veDTMFPtr->StartPlayingDtmfTone(eventCode, attenuationDb)) == 0,
+ _T("StartPlayingDtmfTone(eventCode=%u, attenuationDb=%d)"), eventCode, attenuationDb);
+ }
+ else
+ {
+ TEST2((res = _veDTMFPtr->StopPlayingDtmfTone()) == 0,
+ _T("StopPlayingDTMFTone()"));
+ }
+ if (res == 0)
+ _playingDTMFTone = !_playingDTMFTone;
+ }
+ else if (_modeSequence)
+ {
+ int nTones(1);
+ int sleepMs(0);
+ int lenMult(1);
+ if (eventCode == 1)
+ {
+ nTones = 2;
+ sleepMs = lengthMs;
+ lenMult = 1;
+ }
+ else if (eventCode == 2)
+ {
+ nTones = 2;
+ sleepMs = lengthMs/2;
+ lenMult = 2;
+ }
+ else if (eventCode == 3)
+ {
+ nTones = 3;
+ sleepMs = 0;
+ lenMult = 1;
+ }
+ for (int i = 0; i < nTones; i++)
+ {
+ TEST2(_veDTMFPtr->PlayDtmfTone(eventCode, lengthMs, attenuationDb) == 0,
+ _T("PlayDtmfTone(eventCode=%u, outBand=%d, lengthMs=%d, attenuationDb=%d)"), eventCode, lengthMs, attenuationDb);
+ Sleep(sleepMs);
+ lengthMs = lenMult*lengthMs;
+ eventCode++;
+ }
+ }
+ }
+ else
+ {
+ // --- SendTelephoneEvent
+
+ if (_modeSingle)
+ {
+ TEST2(_veDTMFPtr->SendTelephoneEvent(_channel, eventCode, outBand, lengthMs, attenuationDb) == 0,
+ _T("SendTelephoneEvent(channel=%d, eventCode=%u, outBand=%d, lengthMs=%d, attenuationDb=%d)"), _channel, eventCode, outBand, lengthMs, attenuationDb);
+ }
+ else if (_modeStartStop)
+ {
+ TEST2(false, _T("*** NOT IMPLEMENTED ***"));
+ }
+ else if (_modeSequence)
+ {
+ int nTones(1);
+ int sleepMs(0);
+ int lenMult(1);
+ if (eventCode == 1)
+ {
+ nTones = 2;
+ sleepMs = lengthMs;
+ lenMult = 1;
+ }
+ else if (eventCode == 2)
+ {
+ eventCode = 1;
+ nTones = 2;
+ sleepMs = lengthMs/2;
+ lenMult = 2;
+ }
+ else if (eventCode == 3)
+ {
+ eventCode = 1;
+ nTones = 3;
+ sleepMs = 0;
+ lenMult = 1;
+ }
+ for (int i = 0; i < nTones; i++)
+ {
+ TEST2(_veDTMFPtr->SendTelephoneEvent(_channel, eventCode, outBand, lengthMs, attenuationDb) == 0,
+ _T("SendTelephoneEvent(channel=%d, eventCode=%u, outBand=%d, lengthMs=%d, attenuationDb=%d)"), _channel, eventCode, outBand, lengthMs, attenuationDb);
+ Sleep(sleepMs);
+ lengthMs = lenMult*lengthMs;
+ eventCode++;
+ }
+ }
+ }
+}
+
+void CTelephonyEvent::OnBnClickedButtonSendTelephoneEvent()
+{
+ BOOL ret;
+ unsigned char eventCode(0);
+
+ eventCode = (unsigned char)GetDlgItemInt(IDC_EDIT_EVENT_CODE, &ret);
+ if (ret == FALSE)
+ {
+ return;
+ }
+ SendTelephoneEvent(eventCode);
+}
+
+void CTelephonyEvent::OnBnClickedButton1()
+{
+ SendTelephoneEvent(1);
+}
+
+void CTelephonyEvent::OnBnClickedButton2()
+{
+ SendTelephoneEvent(2);
+}
+
+void CTelephonyEvent::OnBnClickedButton3()
+{
+ SendTelephoneEvent(3);
+}
+
+void CTelephonyEvent::OnBnClickedButton4()
+{
+ SendTelephoneEvent(4);
+}
+
+void CTelephonyEvent::OnBnClickedButton5()
+{
+ SendTelephoneEvent(5);
+}
+
+void CTelephonyEvent::OnBnClickedButton6()
+{
+ SendTelephoneEvent(6);
+}
+
+void CTelephonyEvent::OnBnClickedButton7()
+{
+ SendTelephoneEvent(7);
+}
+
+void CTelephonyEvent::OnBnClickedButton8()
+{
+ SendTelephoneEvent(8);
+}
+
+void CTelephonyEvent::OnBnClickedButton9()
+{
+ SendTelephoneEvent(9);
+}
+
+void CTelephonyEvent::OnBnClickedButton10()
+{
+ // *
+ SendTelephoneEvent(10);
+}
+
+void CTelephonyEvent::OnBnClickedButton11()
+{
+ SendTelephoneEvent(0);
+}
+
+void CTelephonyEvent::OnBnClickedButton12()
+{
+ // #
+ SendTelephoneEvent(11);
+}
+
+void CTelephonyEvent::OnBnClickedButtonA()
+{
+ SendTelephoneEvent(12);
+}
+
+void CTelephonyEvent::OnBnClickedButtonB()
+{
+ SendTelephoneEvent(13);
+}
+
+void CTelephonyEvent::OnBnClickedButtonC()
+{
+ SendTelephoneEvent(14);
+}
+
+void CTelephonyEvent::OnBnClickedButtonD()
+{
+ SendTelephoneEvent(15);
+}
+
+void CTelephonyEvent::OnBnClickedCheckDtmfPlayoutRx()
+{
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_PLAYOUT_RX);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ TEST2(_veDTMFPtr->SetDtmfPlayoutStatus(_channel, enable) == 0, _T("SetDtmfPlayoutStatus(channel=%d, enable=%d)"), _channel, enable);
+}
+
+void CTelephonyEvent::OnBnClickedCheckDtmfPlayTone()
+{
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_PLAY_TONE);
+ int check = button->GetCheck();
+ _PlayDtmfToneLocally = (check == BST_CHECKED);
+}
+
+void CTelephonyEvent::OnBnClickedRadioSingle()
+{
+ _modeStartStop = false;
+ _modeSingle = true;
+ _modeSequence = false;
+}
+
+void CTelephonyEvent::OnBnClickedRadioMulti()
+{
+ _modeStartStop = false;
+ _modeSingle = false;
+ _modeSequence = true;
+}
+
+void CTelephonyEvent::OnBnClickedRadioStartStop()
+{
+ // CButton* button = (CButton*)GetDlgItem(IDC_RADIO_START_STOP);
+ // int check = button->GetCheck();
+ _modeStartStop = true;
+ _modeSingle = false;
+ _modeSequence = false;
+ // GetDlgItem(IDC_EDIT_EVENT_LENGTH)->EnableWindow();
+}
+
+void CTelephonyEvent::OnBnClickedCheckEventInband()
+{
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EVENT_INBAND);
+ int check = button->GetCheck();
+ GetDlgItem(IDC_EDIT_EVENT_CODE)->EnableWindow(check?FALSE:TRUE);
+ GetDlgItem(IDC_BUTTON_SEND_TELEPHONE_EVENT)->EnableWindow(check?FALSE:TRUE);
+}
+
+void CTelephonyEvent::OnBnClickedCheckDtmfFeedback()
+{
+ CButton* button(NULL);
+
+ // Retrieve feedback state
+ button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_FEEDBACK);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+
+ // Retrieve direct-feedback setting
+ button = (CButton*)GetDlgItem(IDC_CHECK_DIRECT_FEEDBACK);
+ check = button->GetCheck();
+ const bool directFeedback = (check == BST_CHECKED);
+
+ // GetDlgItem(IDC_CHECK_DIRECT_FEEDBACK)->EnableWindow(enable ? TRUE : FALSE);
+
+ TEST2(_veDTMFPtr->SetDtmfFeedbackStatus(enable, directFeedback) == 0,
+ _T("SetDtmfFeedbackStatus(enable=%d, directFeedback=%d)"), enable, directFeedback);
+}
+
+void CTelephonyEvent::OnBnClickedCheckDirectFeedback()
+{
+ CButton* button(NULL);
+
+ // Retrieve feedback state
+ button = (CButton*)GetDlgItem(IDC_CHECK_DTMF_FEEDBACK);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+
+ // Retrieve new direct-feedback setting
+ button = (CButton*)GetDlgItem(IDC_CHECK_DIRECT_FEEDBACK);
+ check = button->GetCheck();
+ const bool directFeedback = (check == BST_CHECKED);
+
+ TEST2(_veDTMFPtr->SetDtmfFeedbackStatus(enable, directFeedback) == 0,
+ _T("SetDtmfFeedbackStatus(enable=%d, directFeedback=%d)"), enable, directFeedback);
+}
+
+void CTelephonyEvent::OnBnClickedButtonSetRxTelephonePt()
+{
+ BOOL ret;
+ int pt = GetDlgItemInt(IDC_EDIT_EVENT_RX_PT, &ret);
+ if (ret == FALSE)
+ return;
+ CodecInst codec;
+ strcpy_s(codec.plname, 32, "telephone-event");
+ codec.pltype = pt; codec.channels = 1; codec.plfreq = 8000;
+ TEST2(_veCodecPtr->SetRecPayloadType(_channel, codec) == 0,
+ _T("SetSendTelephoneEventPayloadType(channel=%d, codec.pltype=%u)"), _channel, codec.pltype);
+}
+
+void CTelephonyEvent::OnBnClickedButtonSetTxTelephonePt()
+{
+ BOOL ret;
+ int pt = GetDlgItemInt(IDC_EDIT_EVENT_TX_PT, &ret);
+ if (ret == FALSE)
+ return;
+ TEST2(_veDTMFPtr->SetSendTelephoneEventPayloadType(_channel, pt) == 0,
+ _T("SetSendTelephoneEventPayloadType(channel=%d, type=%u)"), _channel, pt);
+}
+
+void CTelephonyEvent::OnBnClickedCheckDetectInband()
+{
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DETECT_INBAND);
+ int check = button->GetCheck();
+ _inbandEventDetection = (check == BST_CHECKED);
+
+ bool enabled(false);
+ TelephoneEventDetectionMethods detectionMethod;
+ _veDTMFPtr->GetTelephoneEventDetectionStatus(_channel, enabled, detectionMethod);
+ if (enabled)
+ {
+ // deregister
+ _veDTMFPtr->DeRegisterTelephoneEventDetection(_channel);
+ delete _telephoneEventObserverPtr;
+ _telephoneEventObserverPtr = NULL;
+ SetDlgItemText(IDC_EDIT_ON_EVENT_INBAND,_T(""));
+ SetDlgItemText(IDC_EDIT_ON_EVENT_OUT_OF_BAND,_T(""));
+ }
+ OnBnClickedCheckEventDetection();
+}
+
+void CTelephonyEvent::OnBnClickedCheckDetectOutOfBand()
+{
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DETECT_OUT_OF_BAND);
+ int check = button->GetCheck();
+ _outOfBandEventDetection = (check == BST_CHECKED);
+
+ bool enabled(false);
+ TelephoneEventDetectionMethods detectionMethod;
+ _veDTMFPtr->GetTelephoneEventDetectionStatus(_channel, enabled, detectionMethod);
+ if (enabled)
+ {
+ // deregister
+ _veDTMFPtr->DeRegisterTelephoneEventDetection(_channel);
+ delete _telephoneEventObserverPtr;
+ _telephoneEventObserverPtr = NULL;
+ SetDlgItemText(IDC_EDIT_ON_EVENT_INBAND,_T(""));
+ SetDlgItemText(IDC_EDIT_ON_EVENT_OUT_OF_BAND,_T(""));
+ }
+ OnBnClickedCheckEventDetection();
+}
+
+void CTelephonyEvent::OnBnClickedCheckEventDetection()
+{
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EVENT_DETECTION);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+
+ if (enable)
+ {
+ TelephoneEventDetectionMethods method(kInBand);
+ if (_inbandEventDetection && !_outOfBandEventDetection)
+ method = kInBand;
+ else if (!_inbandEventDetection && _outOfBandEventDetection)
+ method = kOutOfBand;
+ else if (_inbandEventDetection && _outOfBandEventDetection)
+ method = kInAndOutOfBand;
+
+ CWnd* wndOut = GetDlgItem(IDC_EDIT_ON_EVENT_OUT_OF_BAND);
+ CWnd* wndIn = GetDlgItem(IDC_EDIT_ON_EVENT_INBAND);
+ _telephoneEventObserverPtr = new TelephoneEventObserver(wndOut, wndIn);
+
+ TEST2(_veDTMFPtr->RegisterTelephoneEventDetection(_channel, method, *_telephoneEventObserverPtr) == 0,
+ _T("RegisterTelephoneEventDetection(channel=%d, detectionMethod=%d)"), _channel, method);
+ }
+ else
+ {
+ TEST2(_veDTMFPtr->DeRegisterTelephoneEventDetection(_channel) == 0,
+ _T("DeRegisterTelephoneEventDetection(channel=%d)"), _channel);
+ delete _telephoneEventObserverPtr;
+ _telephoneEventObserverPtr = NULL;
+ SetDlgItemText(IDC_EDIT_ON_EVENT_INBAND,_T(""));
+ SetDlgItemText(IDC_EDIT_ON_EVENT_OUT_OF_BAND,_T(""));
+ }
+}
+
+// ============================================================================
+// CWinTestDlg dialog
+// ============================================================================
+
+CWinTestDlg::CWinTestDlg(CWnd* pParent /*=NULL*/)
+ : CDialog(CWinTestDlg::IDD, pParent),
+ _failCount(0),
+ _vePtr(NULL),
+ _veBasePtr(NULL),
+ _veCodecPtr(NULL),
+ _veNetworkPtr(NULL),
+ _veFilePtr(NULL),
+ _veHardwarePtr(NULL),
+ _veExternalMediaPtr(NULL),
+ _veApmPtr(NULL),
+ _veEncryptionPtr(NULL),
+ _veRtpRtcpPtr(NULL),
+ _transportPtr(NULL),
+ _encryptionPtr(NULL),
+ _externalMediaPtr(NULL),
+ _externalTransport(false),
+ _externalTransportBuild(false),
+ _checkPlayFileIn(0),
+ _checkPlayFileIn1(0),
+ _checkPlayFileIn2(0),
+ _checkPlayFileOut1(0),
+ _checkPlayFileOut2(0),
+ _checkAGC(0),
+ _checkAGC1(0),
+ _checkNS(0),
+ _checkNS1(0),
+ _checkEC(0),
+ _checkVAD1(0),
+ _checkVAD2(0),
+ _checkSrtpTx1(0),
+ _checkSrtpTx2(0),
+ _checkSrtpRx1(0),
+ _checkSrtpRx2(0),
+ _checkConference1(0),
+ _checkConference2(0),
+ _checkOnHold1(0),
+ _checkOnHold2(0),
+ _strComboIp1(_T("")),
+ _strComboIp2(_T("")),
+ _delayEstimate1(false),
+ _delayEstimate2(false),
+ _rxVad(false),
+ _nErrorCallbacks(0),
+ _timerTicks(0)
+{
+ m_hIcon = AfxGetApp()->LoadIcon(IDR_MAINFRAME);
+
+ _vePtr = VoiceEngine::Create();
+
+ VoiceEngine::SetTraceFilter(kTraceNone);
+ // VoiceEngine::SetTraceFilter(kTraceAll);
+ // VoiceEngine::SetTraceFilter(kTraceStream | kTraceStateInfo | kTraceWarning | kTraceError | kTraceCritical | kTraceApiCall | kTraceModuleCall | kTraceMemory | kTraceDebug | kTraceInfo);
+ // VoiceEngine::SetTraceFilter(kTraceStateInfo | kTraceWarning | kTraceError | kTraceCritical | kTraceApiCall | kTraceModuleCall | kTraceMemory | kTraceInfo);
+
+ VoiceEngine::SetTraceFile("ve_win_test.txt");
+ VoiceEngine::SetTraceCallback(NULL);
+
+ if (_vePtr)
+ {
+ _veExternalMediaPtr = VoEExternalMedia::GetInterface(_vePtr);
+ _veVolumeControlPtr = VoEVolumeControl::GetInterface(_vePtr);
+ _veEncryptionPtr = VoEEncryption::GetInterface(_vePtr);
+ _veVideoSyncPtr = VoEVideoSync::GetInterface(_vePtr);
+ _veNetworkPtr = VoENetwork::GetInterface(_vePtr);
+ _veFilePtr = VoEFile::GetInterface(_vePtr);
+ _veApmPtr = VoEAudioProcessing::GetInterface(_vePtr);
+
+ _veBasePtr = VoEBase::GetInterface(_vePtr);
+ _veCodecPtr = VoECodec::GetInterface(_vePtr);
+ _veHardwarePtr = VoEHardware::GetInterface(_vePtr);
+ _veRtpRtcpPtr = VoERTP_RTCP::GetInterface(_vePtr);
+ _transportPtr = new MyTransport(_veNetworkPtr);
+ _encryptionPtr = new MyEncryption();
+ _externalMediaPtr = new MediaProcessImpl();
+ _connectionObserverPtr = new ConnectionObserver();
+ _rxVadObserverPtr = new RxCallback();
+ }
+
+ _veBasePtr->RegisterVoiceEngineObserver(*this);
+
+ std::string resource_path = webrtc::test::ProjectRootPath();
+ if (resource_path == webrtc::test::kCannotFindProjectRootDir) {
+ _long_audio_file_path = "./";
+ } else {
+ _long_audio_file_path = resource_path + "data\\voice_engine\\";
+ }
+}
+
+CWinTestDlg::~CWinTestDlg()
+{
+ if (_connectionObserverPtr) delete _connectionObserverPtr;
+ if (_externalMediaPtr) delete _externalMediaPtr;
+ if (_transportPtr) delete _transportPtr;
+ if (_encryptionPtr) delete _encryptionPtr;
+ if (_rxVadObserverPtr) delete _rxVadObserverPtr;
+
+ if (_veExternalMediaPtr) _veExternalMediaPtr->Release();
+ if (_veEncryptionPtr) _veEncryptionPtr->Release();
+ if (_veVideoSyncPtr) _veVideoSyncPtr->Release();
+ if (_veVolumeControlPtr) _veVolumeControlPtr->Release();
+
+ if (_veBasePtr) _veBasePtr->Terminate();
+ if (_veBasePtr) _veBasePtr->Release();
+
+ if (_veCodecPtr) _veCodecPtr->Release();
+ if (_veNetworkPtr) _veNetworkPtr->Release();
+ if (_veFilePtr) _veFilePtr->Release();
+ if (_veHardwarePtr) _veHardwarePtr->Release();
+ if (_veApmPtr) _veApmPtr->Release();
+ if (_veRtpRtcpPtr) _veRtpRtcpPtr->Release();
+ if (_vePtr)
+ {
+ VoiceEngine::Delete(_vePtr);
+ }
+ VoiceEngine::SetTraceFilter(kTraceNone);
+}
+
+void CWinTestDlg::DoDataExchange(CDataExchange* pDX)
+{
+ CDialog::DoDataExchange(pDX);
+ DDX_CBString(pDX, IDC_COMBO_IP_1, _strComboIp1);
+ DDX_CBString(pDX, IDC_COMBO_IP_2, _strComboIp2);
+}
+
+BEGIN_MESSAGE_MAP(CWinTestDlg, CDialog)
+ ON_WM_SYSCOMMAND()
+ ON_WM_PAINT()
+ ON_WM_QUERYDRAGICON()
+ ON_WM_TIMER()
+ //}}AFX_MSG_MAP
+ ON_BN_CLICKED(IDC_BUTTON_CREATE_1, &CWinTestDlg::OnBnClickedButtonCreate1)
+ ON_BN_CLICKED(IDC_BUTTON_DELETE_1, &CWinTestDlg::OnBnClickedButtonDelete1)
+ ON_BN_CLICKED(IDC_BUTTON_CREATE_2, &CWinTestDlg::OnBnClickedButtonCreate2)
+ ON_BN_CLICKED(IDC_BUTTON_DELETE_2, &CWinTestDlg::OnBnClickedButtonDelete2)
+ ON_CBN_SELCHANGE(IDC_COMBO_CODEC_1, &CWinTestDlg::OnCbnSelchangeComboCodec1)
+ ON_BN_CLICKED(IDC_BUTTON_START_LISTEN_1, &CWinTestDlg::OnBnClickedButtonStartListen1)
+ ON_BN_CLICKED(IDC_BUTTON_STOP_LISTEN_1, &CWinTestDlg::OnBnClickedButtonStopListen1)
+ ON_BN_CLICKED(IDC_BUTTON_START_PLAYOUT_1, &CWinTestDlg::OnBnClickedButtonStartPlayout1)
+ ON_BN_CLICKED(IDC_BUTTON_STOP_PLAYOUT_1, &CWinTestDlg::OnBnClickedButtonStopPlayout1)
+ ON_BN_CLICKED(IDC_BUTTON_START_SEND_1, &CWinTestDlg::OnBnClickedButtonStartSend1)
+ ON_BN_CLICKED(IDC_BUTTON_STOP_SEND_1, &CWinTestDlg::OnBnClickedButtonStopSend1)
+ ON_CBN_SELCHANGE(IDC_COMBO_IP_2, &CWinTestDlg::OnCbnSelchangeComboIp2)
+ ON_CBN_SELCHANGE(IDC_COMBO_IP_1, &CWinTestDlg::OnCbnSelchangeComboIp1)
+ ON_CBN_SELCHANGE(IDC_COMBO_CODEC_2, &CWinTestDlg::OnCbnSelchangeComboCodec2)
+ ON_BN_CLICKED(IDC_BUTTON_START_LISTEN_2, &CWinTestDlg::OnBnClickedButtonStartListen2)
+ ON_BN_CLICKED(IDC_BUTTON_STOP_LISTEN_2, &CWinTestDlg::OnBnClickedButtonStopListen2)
+ ON_BN_CLICKED(IDC_BUTTON_START_PLAYOUT_2, &CWinTestDlg::OnBnClickedButtonStartPlayout2)
+ ON_BN_CLICKED(IDC_BUTTON_STOP_PLAYOUT_2, &CWinTestDlg::OnBnClickedButtonStopPlayout2)
+ ON_BN_CLICKED(IDC_BUTTON_START_SEND_2, &CWinTestDlg::OnBnClickedButtonStartSend2)
+ ON_BN_CLICKED(IDC_BUTTON_STOP_SEND_2, &CWinTestDlg::OnBnClickedButtonStopSend2)
+ ON_BN_CLICKED(IDC_CHECK_EXT_TRANS_1, &CWinTestDlg::OnBnClickedCheckExtTrans1)
+ ON_BN_CLICKED(IDC_CHECK_PLAY_FILE_IN_1, &CWinTestDlg::OnBnClickedCheckPlayFileIn1)
+ ON_BN_CLICKED(IDC_CHECK_PLAY_FILE_OUT_1, &CWinTestDlg::OnBnClickedCheckPlayFileOut1)
+ ON_BN_CLICKED(IDC_CHECK_EXT_TRANS_2, &CWinTestDlg::OnBnClickedCheckExtTrans2)
+ ON_BN_CLICKED(IDC_CHECK_PLAY_FILE_IN_2, &CWinTestDlg::OnBnClickedCheckPlayFileIn2)
+ ON_BN_CLICKED(IDC_CHECK_PLAY_FILE_OUT_2, &CWinTestDlg::OnBnClickedCheckPlayFileOut2)
+ ON_BN_CLICKED(IDC_CHECK_PLAY_FILE_IN, &CWinTestDlg::OnBnClickedCheckPlayFileIn)
+ ON_CBN_SELCHANGE(IDC_COMBO_REC_DEVICE, &CWinTestDlg::OnCbnSelchangeComboRecDevice)
+ ON_CBN_SELCHANGE(IDC_COMBO_PLAY_DEVICE, &CWinTestDlg::OnCbnSelchangeComboPlayDevice)
+ ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_IN_1, &CWinTestDlg::OnBnClickedCheckExtMediaIn1)
+ ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_OUT_1, &CWinTestDlg::OnBnClickedCheckExtMediaOut1)
+ ON_NOTIFY(NM_RELEASEDCAPTURE, IDC_SLIDER_INPUT_VOLUME, &CWinTestDlg::OnNMReleasedcaptureSliderInputVolume)
+ ON_NOTIFY(NM_RELEASEDCAPTURE, IDC_SLIDER_OUTPUT_VOLUME, &CWinTestDlg::OnNMReleasedcaptureSliderOutputVolume)
+ ON_BN_CLICKED(IDC_CHECK_AGC, &CWinTestDlg::OnBnClickedCheckAgc)
+ ON_BN_CLICKED(IDC_CHECK_NS, &CWinTestDlg::OnBnClickedCheckNs)
+ ON_BN_CLICKED(IDC_CHECK_EC, &CWinTestDlg::OnBnClickedCheckEc)
+ ON_BN_CLICKED(IDC_CHECK_VAD_1, &CWinTestDlg::OnBnClickedCheckVad1)
+ ON_BN_CLICKED(IDC_CHECK_VAD_3, &CWinTestDlg::OnBnClickedCheckVad2)
+ ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_IN_2, &CWinTestDlg::OnBnClickedCheckExtMediaIn2)
+ ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_OUT_2, &CWinTestDlg::OnBnClickedCheckExtMediaOut2)
+ ON_BN_CLICKED(IDC_CHECK_MUTE_IN, &CWinTestDlg::OnBnClickedCheckMuteIn)
+ ON_BN_CLICKED(IDC_CHECK_MUTE_IN_1, &CWinTestDlg::OnBnClickedCheckMuteIn1)
+ ON_BN_CLICKED(IDC_CHECK_MUTE_IN_2, &CWinTestDlg::OnBnClickedCheckMuteIn2)
+ ON_BN_CLICKED(IDC_CHECK_SRTP_TX_1, &CWinTestDlg::OnBnClickedCheckSrtpTx1)
+ ON_BN_CLICKED(IDC_CHECK_SRTP_RX_1, &CWinTestDlg::OnBnClickedCheckSrtpRx1)
+ ON_BN_CLICKED(IDC_CHECK_SRTP_TX_2, &CWinTestDlg::OnBnClickedCheckSrtpTx2)
+ ON_BN_CLICKED(IDC_CHECK_SRTP_RX_2, &CWinTestDlg::OnBnClickedCheckSrtpRx2)
+ ON_BN_CLICKED(IDC_CHECK_EXT_ENCRYPTION_1, &CWinTestDlg::OnBnClickedCheckExtEncryption1)
+ ON_BN_CLICKED(IDC_CHECK_EXT_ENCRYPTION_2, &CWinTestDlg::OnBnClickedCheckExtEncryption2)
+ ON_BN_CLICKED(IDC_BUTTON_DTMF_1, &CWinTestDlg::OnBnClickedButtonDtmf1)
+ ON_BN_CLICKED(IDC_CHECK_REC_MIC, &CWinTestDlg::OnBnClickedCheckRecMic)
+ ON_BN_CLICKED(IDC_BUTTON_DTMF_2, &CWinTestDlg::OnBnClickedButtonDtmf2)
+ ON_BN_CLICKED(IDC_BUTTON_TEST_1, &CWinTestDlg::OnBnClickedButtonTest1)
+ ON_BN_CLICKED(IDC_CHECK_CONFERENCE_1, &CWinTestDlg::OnBnClickedCheckConference1)
+ ON_BN_CLICKED(IDC_CHECK_CONFERENCE_2, &CWinTestDlg::OnBnClickedCheckConference2)
+ ON_BN_CLICKED(IDC_CHECK_ON_HOLD_1, &CWinTestDlg::OnBnClickedCheckOnHold1)
+ ON_BN_CLICKED(IDC_CHECK_ON_HOLD_2, &CWinTestDlg::OnBnClickedCheckOnHold2)
+ ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_IN, &CWinTestDlg::OnBnClickedCheckExtMediaIn)
+ ON_BN_CLICKED(IDC_CHECK_EXT_MEDIA_OUT, &CWinTestDlg::OnBnClickedCheckExtMediaOut)
+ ON_LBN_SELCHANGE(IDC_LIST_CODEC_1, &CWinTestDlg::OnLbnSelchangeListCodec1)
+ ON_NOTIFY(NM_RELEASEDCAPTURE, IDC_SLIDER_PAN_LEFT, &CWinTestDlg::OnNMReleasedcaptureSliderPanLeft)
+ ON_NOTIFY(NM_RELEASEDCAPTURE, IDC_SLIDER_PAN_RIGHT, &CWinTestDlg::OnNMReleasedcaptureSliderPanRight)
+ ON_BN_CLICKED(IDC_BUTTON_VERSION, &CWinTestDlg::OnBnClickedButtonVersion)
+ ON_BN_CLICKED(IDC_CHECK_DELAY_ESTIMATE_1, &CWinTestDlg::OnBnClickedCheckDelayEstimate1)
+ ON_BN_CLICKED(IDC_CHECK_RXVAD, &CWinTestDlg::OnBnClickedCheckRxvad)
+ ON_BN_CLICKED(IDC_CHECK_AGC_1, &CWinTestDlg::OnBnClickedCheckAgc1)
+ ON_BN_CLICKED(IDC_CHECK_NS_1, &CWinTestDlg::OnBnClickedCheckNs1)
+ ON_BN_CLICKED(IDC_CHECK_REC_CALL, &CWinTestDlg::OnBnClickedCheckRecCall)
+ ON_BN_CLICKED(IDC_CHECK_TYPING_DETECTION, &CWinTestDlg::OnBnClickedCheckTypingDetection)
+ ON_BN_CLICKED(IDC_CHECK_FEC, &CWinTestDlg::OnBnClickedCheckFEC)
+ ON_BN_CLICKED(IDC_BUTTON_CLEAR_ERROR_CALLBACK, &CWinTestDlg::OnBnClickedButtonClearErrorCallback)
+END_MESSAGE_MAP()
+
+BOOL CWinTestDlg::UpdateTest(bool failed, const CString& strMsg)
+{
+ if (failed)
+ {
+ SetDlgItemText(IDC_EDIT_MESSAGE, strMsg);
+ _strErr.Format(_T("FAILED (error=%d)"), _veBasePtr->LastError());
+ SetDlgItemText(IDC_EDIT_RESULT, _strErr);
+ _failCount++;
+ SetDlgItemInt(IDC_EDIT_N_FAILS, _failCount);
+ SetDlgItemInt(IDC_EDIT_LAST_ERROR, _veBasePtr->LastError());
+ }
+ else
+ {
+ SetDlgItemText(IDC_EDIT_MESSAGE, strMsg);
+ SetDlgItemText(IDC_EDIT_RESULT, _T("OK"));
+ }
+ return TRUE;
+}
+
+
+// CWinTestDlg message handlers
+
+BOOL CWinTestDlg::OnInitDialog()
+{
+ CDialog::OnInitDialog();
+
+ // Add "About..." menu item to system menu.
+
+ // IDM_ABOUTBOX must be in the system command range.
+ ASSERT((IDM_ABOUTBOX & 0xFFF0) == IDM_ABOUTBOX);
+ ASSERT(IDM_ABOUTBOX < 0xF000);
+
+ CMenu* pSysMenu = GetSystemMenu(FALSE);
+ if (pSysMenu != NULL)
+ {
+ CString strAboutMenu;
+ strAboutMenu.LoadString(IDS_ABOUTBOX);
+ if (!strAboutMenu.IsEmpty())
+ {
+ pSysMenu->AppendMenu(MF_SEPARATOR);
+ pSysMenu->AppendMenu(MF_STRING, IDM_ABOUTBOX, strAboutMenu);
+ }
+ }
+
+ // Set the icon for this dialog. The framework does this automatically
+ // when the application's main window is not a dialog
+ SetIcon(m_hIcon, TRUE); // Set big icon
+ SetIcon(m_hIcon, FALSE); // Set small icon
+
+ // char version[1024];
+ // _veBasePtr->GetVersion(version);
+ // AfxMessageBox(version, MB_OK);
+
+ if (_veBasePtr->Init() != 0)
+ {
+ AfxMessageBox(_T("Init() failed "), MB_OKCANCEL);
+ }
+
+ int ch = _veBasePtr->CreateChannel();
+ if (_veBasePtr->SetSendDestination(ch, 1234, "127.0.0.1") == -1)
+ {
+ if (_veBasePtr->LastError() == VE_EXTERNAL_TRANSPORT_ENABLED)
+ {
+ _strMsg.Format(_T("*** External transport build ***"));
+ SetDlgItemText(IDC_EDIT_MESSAGE, _strMsg);
+ _externalTransportBuild = true;
+ }
+ }
+ _veBasePtr->DeleteChannel(ch);
+
+ // --- Add (preferred) local IPv4 address in title
+
+ if (_veNetworkPtr)
+ {
+ char localIP[64];
+ _veNetworkPtr->GetLocalIP(localIP);
+ CString str;
+ GetWindowText(str);
+ str.AppendFormat(_T(" [Local IPv4 address: %s]"), CharToTchar(localIP, 64));
+ SetWindowText(str);
+ }
+
+ // --- Volume sliders
+
+ if (_veVolumeControlPtr)
+ {
+ unsigned int volume(0);
+ CSliderCtrl* slider(NULL);
+
+ slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_INPUT_VOLUME);
+ slider->SetRangeMin(0);
+ slider->SetRangeMax(255);
+ _veVolumeControlPtr->GetMicVolume(volume);
+ slider->SetPos(volume);
+
+ slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_OUTPUT_VOLUME);
+ slider->SetRangeMin(0);
+ slider->SetRangeMax(255);
+ _veVolumeControlPtr->GetSpeakerVolume(volume);
+ slider->SetPos(volume);
+ }
+
+ // --- Panning sliders
+
+ if (_veVolumeControlPtr)
+ {
+ float lVol(0.0);
+ float rVol(0.0);
+ int leftVol, rightVol;
+ CSliderCtrl* slider(NULL);
+
+ _veVolumeControlPtr->GetOutputVolumePan(-1, lVol, rVol);
+
+ leftVol = (int)(lVol*10.0f); // [0,10]
+ rightVol = (int)(rVol*10.0f); // [0,10]
+
+ slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_PAN_LEFT);
+ slider->SetRange(0,10);
+ slider->SetPos(10-leftVol); // pos 0 <=> max pan 1.0 (top of slider)
+
+ slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_PAN_RIGHT);
+ slider->SetRange(0,10);
+ slider->SetPos(10-rightVol);
+ }
+
+ // --- APM settings
+
+ bool enable(false);
+ CButton* button(NULL);
+
+ AgcModes agcMode(kAgcDefault);
+ if (_veApmPtr->GetAgcStatus(enable, agcMode) == 0)
+ {
+ button = (CButton*)GetDlgItem(IDC_CHECK_AGC);
+ enable ? button->SetCheck(BST_CHECKED) : button->SetCheck(BST_UNCHECKED);
+ }
+ else
+ {
+ // AGC is not supported
+ GetDlgItem(IDC_CHECK_AGC)->EnableWindow(FALSE);
+ }
+
+ NsModes nsMode(kNsDefault);
+ if (_veApmPtr->GetNsStatus(enable, nsMode) == 0)
+ {
+ button = (CButton*)GetDlgItem(IDC_CHECK_NS);
+ enable ? button->SetCheck(BST_CHECKED) : button->SetCheck(BST_UNCHECKED);
+ }
+ else
+ {
+ // NS is not supported
+ GetDlgItem(IDC_CHECK_NS)->EnableWindow(FALSE);
+ }
+
+ EcModes ecMode(kEcDefault);
+ if (_veApmPtr->GetEcStatus(enable, ecMode) == 0)
+ {
+ button = (CButton*)GetDlgItem(IDC_CHECK_EC);
+ enable ? button->SetCheck(BST_CHECKED) : button->SetCheck(BST_UNCHECKED);
+ }
+ else
+ {
+ // EC is not supported
+ GetDlgItem(IDC_CHECK_EC)->EnableWindow(FALSE);
+ }
+
+ // --- First channel section
+
+ GetDlgItem(IDC_COMBO_IP_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_TX_PORT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_RX_PORT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_COMBO_CODEC_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_LIST_CODEC_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_CODEC_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_DELETE_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_LISTEN_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_LISTEN_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_PLAYOUT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_SEND_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_SEND_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_TRANS_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_IN_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_VAD_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_MUTE_IN_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_SRTP_TX_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_SRTP_RX_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_DTMF_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_CONFERENCE_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_ON_HOLD_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_DELAY_ESTIMATE_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_RXVAD)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_AGC_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_NS_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_FEC)->EnableWindow(FALSE);
+
+ CComboBox* comboIP(NULL);
+ comboIP = (CComboBox*)GetDlgItem(IDC_COMBO_IP_1);
+ comboIP->AddString(_T("127.0.0.1"));
+ comboIP->SetCurSel(0);
+
+ SetDlgItemInt(IDC_EDIT_TX_PORT_1, 1111);
+ SetDlgItemInt(IDC_EDIT_RX_PORT_1, 1111);
+
+ // --- Add supported codecs to the codec combo box
+
+ CComboBox* comboCodec(NULL);
+ comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_CODEC_1);
+ comboCodec->ResetContent();
+
+ int numCodecs = _veCodecPtr->NumOfCodecs();
+ for (int idx = 0; idx < numCodecs; idx++)
+ {
+ CodecInst codec;
+ _veCodecPtr->GetCodec(idx, codec);
+ if ((_stricmp(codec.plname, "CNNB") != 0) &&
+ (_stricmp(codec.plname, "CNWB") != 0))
+ {
+ CString strCodec;
+ if (_stricmp(codec.plname, "G7221") == 0)
+ strCodec.Format(_T("%s (%d/%d/%d)"), CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq/1000, codec.rate/1000);
+ else
+ strCodec.Format(_T("%s (%d/%d)"), CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq/1000);
+ comboCodec->AddString(strCodec);
+ }
+ if (idx == 0)
+ {
+ SetDlgItemInt(IDC_EDIT_CODEC_1, codec.pltype);
+ }
+ }
+ comboCodec->SetCurSel(0);
+
+ CListBox* list = (CListBox*)GetDlgItem(IDC_LIST_CODEC_1);
+ list->AddString(_T("pltype"));
+ list->AddString(_T("plfreq"));
+ list->AddString(_T("pacsize"));
+ list->AddString(_T("channels"));
+ list->AddString(_T("rate"));
+ list->SetCurSel(0);
+
+ // --- Add available audio devices to the combo boxes
+
+ CComboBox* comboRecDevice(NULL);
+ CComboBox* comboPlayDevice(NULL);
+ comboRecDevice = (CComboBox*)GetDlgItem(IDC_COMBO_REC_DEVICE);
+ comboPlayDevice = (CComboBox*)GetDlgItem(IDC_COMBO_PLAY_DEVICE);
+ comboRecDevice->ResetContent();
+ comboPlayDevice->ResetContent();
+
+ if (_veHardwarePtr)
+ {
+ int numPlayout(0);
+ int numRecording(0);
+ char nameStr[128];
+ char guidStr[128];
+ CString strDevice;
+ AudioLayers audioLayer;
+
+ _veHardwarePtr->GetAudioDeviceLayer(audioLayer);
+ if (kAudioWindowsWave == audioLayer)
+ {
+ strDevice.FormatMessage(_T("Audio Layer: Windows Wave API"));
+ }
+ else if (kAudioWindowsCore == audioLayer)
+ {
+ strDevice.FormatMessage(_T("Audio Layer: Windows Core API"));
+ }
+ else
+ {
+ strDevice.FormatMessage(_T("Audio Layer: ** UNKNOWN **"));
+ }
+ SetDlgItemText(IDC_EDIT_AUDIO_LAYER, (LPCTSTR)strDevice);
+
+ _veHardwarePtr->GetNumOfRecordingDevices(numRecording);
+
+ for (int idx = 0; idx < numRecording; idx++)
+ {
+ _veHardwarePtr->GetRecordingDeviceName(idx, nameStr, guidStr);
+ strDevice.Format(_T("%s"), CharToTchar(nameStr, 128));
+ comboRecDevice->AddString(strDevice);
+ }
+ // Select default (communication) device in the combo box
+ _veHardwarePtr->GetRecordingDeviceName(-1, nameStr, guidStr);
+ CString tmp = CString(nameStr);
+ int nIndex = comboRecDevice->SelectString(-1, tmp);
+ ASSERT(nIndex != CB_ERR);
+
+ _veHardwarePtr->GetNumOfPlayoutDevices(numPlayout);
+
+ for (int idx = 0; idx < numPlayout; idx++)
+ {
+ _veHardwarePtr->GetPlayoutDeviceName(idx, nameStr, guidStr);
+ strDevice.Format(_T("%s"), CharToTchar(nameStr, 128));
+ comboPlayDevice->AddString(strDevice);
+ }
+ // Select default (communication) device in the combo box
+ _veHardwarePtr->GetPlayoutDeviceName(-1, nameStr, guidStr);
+ nIndex = comboPlayDevice->SelectString(-1, CString(nameStr));
+ ASSERT(nIndex != CB_ERR);
+ }
+
+ // --- Second channel section
+
+ GetDlgItem(IDC_COMBO_IP_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_TX_PORT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_RX_PORT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_COMBO_CODEC_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_DELETE_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_LISTEN_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_LISTEN_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_PLAYOUT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_SEND_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_SEND_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_TRANS_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_IN_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_VAD_3)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_MUTE_IN_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_SRTP_TX_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_SRTP_RX_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_DTMF_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_CONFERENCE_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_ON_HOLD_2)->EnableWindow(FALSE);
+
+ comboIP = (CComboBox*)GetDlgItem(IDC_COMBO_IP_2);
+ comboIP->AddString(_T("127.0.0.1"));
+ comboIP->SetCurSel(0);
+
+ SetDlgItemInt(IDC_EDIT_TX_PORT_2, 2222);
+ SetDlgItemInt(IDC_EDIT_RX_PORT_2, 2222);
+
+ comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_CODEC_2);
+ comboCodec->ResetContent();
+
+ if (_veCodecPtr)
+ {
+ numCodecs = _veCodecPtr->NumOfCodecs();
+ for (int idx = 0; idx < numCodecs; idx++)
+ {
+ CodecInst codec;
+ _veCodecPtr->GetCodec(idx, codec);
+ CString strCodec;
+ strCodec.Format(_T("%s (%d/%d)"), CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq/1000);
+ comboCodec->AddString(strCodec);
+ }
+ comboCodec->SetCurSel(0);
+ }
+
+ // --- Start windows timer
+
+ SetTimer(0, 1000, NULL);
+
+ return TRUE; // return TRUE unless you set the focus to a control
+}
+
+void CWinTestDlg::OnSysCommand(UINT nID, LPARAM lParam)
+{
+ if ((nID & 0xFFF0) == IDM_ABOUTBOX)
+ {
+ CAboutDlg dlgAbout;
+ dlgAbout.DoModal();
+ }
+ else if (nID == SC_CLOSE)
+ {
+ BOOL ret;
+ int channel(0);
+ channel = GetDlgItemInt(IDC_EDIT_1, &ret);
+ if (ret == TRUE)
+ {
+ _veBasePtr->DeleteChannel(channel);
+ }
+ channel = GetDlgItemInt(IDC_EDIT_2, &ret);
+ if (ret == TRUE)
+ {
+ _veBasePtr->DeleteChannel(channel);
+ }
+
+ CDialog::OnSysCommand(nID, lParam);
+ }
+ else
+ {
+ CDialog::OnSysCommand(nID, lParam);
+ }
+
+}
+
+// If you add a minimize button to your dialog, you will need the code below
+// to draw the icon. For MFC applications using the document/view model,
+// this is automatically done for you by the framework.
+
+void CWinTestDlg::OnPaint()
+{
+ if (IsIconic())
+ {
+ CPaintDC dc(this); // device context for painting
+
+ SendMessage(WM_ICONERASEBKGND, reinterpret_cast<WPARAM>(dc.GetSafeHdc()), 0);
+
+ // Center icon in client rectangle
+ int cxIcon = GetSystemMetrics(SM_CXICON);
+ int cyIcon = GetSystemMetrics(SM_CYICON);
+ CRect rect;
+ GetClientRect(&rect);
+ int x = (rect.Width() - cxIcon + 1) / 2;
+ int y = (rect.Height() - cyIcon + 1) / 2;
+
+ // Draw the icon
+ dc.DrawIcon(x, y, m_hIcon);
+ }
+ else
+ {
+ CDialog::OnPaint();
+ }
+}
+
+// The system calls this function to obtain the cursor to display while the user drags
+// the minimized window.
+HCURSOR CWinTestDlg::OnQueryDragIcon()
+{
+ return static_cast<HCURSOR>(m_hIcon);
+}
+
+
+void CWinTestDlg::OnBnClickedButtonCreate1()
+{
+ int channel(0);
+ TEST((channel = _veBasePtr->CreateChannel()) >= 0, _T("CreateChannel(channel=%d)"), channel);
+ if (channel >= 0)
+ {
+ _veRtpRtcpPtr->RegisterRTPObserver(channel, *this);
+
+ SetDlgItemInt(IDC_EDIT_1, channel);
+ GetDlgItem(IDC_BUTTON_CREATE_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_DELETE_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_COMBO_IP_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_EDIT_TX_PORT_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_EDIT_RX_PORT_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_COMBO_CODEC_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_LIST_CODEC_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_EDIT_CODEC_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_START_LISTEN_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_START_PLAYOUT_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_START_SEND_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_EXT_TRANS_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_IN_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_VAD_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_MUTE_IN_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_SRTP_TX_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_SRTP_RX_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_DTMF_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_ON_HOLD_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_DELAY_ESTIMATE_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_RXVAD)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_AGC_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_NS_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_FEC)->EnableWindow(TRUE);
+
+ // Always set send codec to default codec <=> index 0.
+ CodecInst codec;
+ _veCodecPtr->GetCodec(0, codec);
+ _veCodecPtr->SetSendCodec(channel, codec);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonCreate2()
+{
+ int channel(0);
+ TEST((channel = _veBasePtr->CreateChannel()) >=0 , _T("CreateChannel(%d)"), channel);
+ if (channel >= 0)
+ {
+ _veRtpRtcpPtr->RegisterRTPObserver(channel, *this);
+
+ SetDlgItemInt(IDC_EDIT_2, channel);
+ GetDlgItem(IDC_BUTTON_CREATE_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_DELETE_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_COMBO_IP_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_EDIT_TX_PORT_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_EDIT_RX_PORT_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_COMBO_CODEC_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_START_LISTEN_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_START_PLAYOUT_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_START_SEND_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_EXT_TRANS_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_IN_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_VAD_3)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_MUTE_IN_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_SRTP_TX_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_SRTP_RX_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_DTMF_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_CONFERENCE_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_CHECK_ON_HOLD_2)->EnableWindow(TRUE);
+
+ // Always set send codec to default codec <=> index 0.
+ CodecInst codec;
+ _veCodecPtr->GetCodec(0, codec);
+ _veCodecPtr->SetSendCodec(channel, codec);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonDelete1()
+{
+ BOOL ret;
+ int channel = GetDlgItemInt(IDC_EDIT_1, &ret);
+ if (ret == TRUE)
+ {
+ _delayEstimate1 = false;
+ _rxVad = false;
+ _veRtpRtcpPtr->DeRegisterRTPObserver(channel);
+ TEST(_veBasePtr->DeleteChannel(channel) == 0, _T("DeleteChannel(channel=%d)"), channel);
+ SetDlgItemText(IDC_EDIT_1, _T(""));
+ GetDlgItem(IDC_BUTTON_CREATE_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_DELETE_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_COMBO_IP_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_TX_PORT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_RX_PORT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_COMBO_CODEC_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_LIST_CODEC_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_CODEC_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_LISTEN_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_PLAYOUT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_SEND_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_LISTEN_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_SEND_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_DTMF_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_TRANS_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_IN_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_VAD_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_MUTE_IN_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_SRTP_TX_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_SRTP_RX_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_CONFERENCE_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_ON_HOLD_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_DELAY_ESTIMATE_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_AGC_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_NS_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_RXVAD)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_FEC)->EnableWindow(FALSE);
+ SetDlgItemText(IDC_EDIT_RXVAD, _T(""));
+ GetDlgItem(IDC_EDIT_RXVAD)->EnableWindow(FALSE);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_TRANS_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_IN_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_VAD_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_MUTE_IN_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_TX_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_RX_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_CONFERENCE_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_ON_HOLD_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_DELAY_ESTIMATE_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_AGC_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_NS_1);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_RXVAD);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_FEC);
+ button->SetCheck(BST_UNCHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonDelete2()
+{
+ BOOL ret;
+ int channel = GetDlgItemInt(IDC_EDIT_2, &ret);
+ if (ret == TRUE)
+ {
+ _delayEstimate2 = false;
+ _veRtpRtcpPtr->DeRegisterRTPObserver(channel);
+ TEST(_veBasePtr->DeleteChannel(channel) == 0, _T("DeleteChannel(%d)"), channel);
+ SetDlgItemText(IDC_EDIT_2, _T(""));
+ GetDlgItem(IDC_BUTTON_CREATE_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_DELETE_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_COMBO_IP_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_TX_PORT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_EDIT_RX_PORT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_COMBO_CODEC_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_LISTEN_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_PLAYOUT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_START_SEND_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_LISTEN_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_SEND_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_TRANS_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_IN_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_MUTE_IN_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_VAD_3)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_SRTP_TX_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_SRTP_RX_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_CONFERENCE_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_DTMF_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_CHECK_ON_HOLD_2)->EnableWindow(FALSE);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_TRANS_2);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_IN_2);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_2);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_2);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_2);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_VAD_3);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_MUTE_IN_2);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_TX_2);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_RX_2);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_2);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_CONFERENCE_2);
+ button->SetCheck(BST_UNCHECKED);
+ button = (CButton*)GetDlgItem(IDC_CHECK_ON_HOLD_2);
+ button->SetCheck(BST_UNCHECKED);
+ }
+}
+
+void CWinTestDlg::OnCbnSelchangeComboIp1()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CString str;
+ int port = GetDlgItemInt(IDC_EDIT_TX_PORT_1);
+ CComboBox* comboIP = (CComboBox*)GetDlgItem(IDC_COMBO_IP_1);
+ int n = comboIP->GetLBTextLen(0);
+ comboIP->GetLBText(0, str.GetBuffer(n));
+ TEST(_veBasePtr->SetSendDestination(channel, port, TcharToChar(str.GetBuffer(n), -1)) == 0,
+ _T("SetSendDestination(channel=%d, port=%d, ip=%s)"), channel, port, str.GetBuffer(n));
+ str.ReleaseBuffer();
+}
+
+void CWinTestDlg::OnCbnSelchangeComboIp2()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CString str;
+ int port = GetDlgItemInt(IDC_EDIT_TX_PORT_2);
+ CComboBox* comboIP = (CComboBox*)GetDlgItem(IDC_COMBO_IP_2);
+ int n = comboIP->GetLBTextLen(0);
+ comboIP->GetLBText(0, str.GetBuffer(n));
+ TEST(_veBasePtr->SetSendDestination(channel, port, TcharToChar(str.GetBuffer(n), -1)) == 0,
+ _T("SetSendDestination(channel=%d, port=%d, ip=%s)"), channel, port, str.GetBuffer(n));
+ str.ReleaseBuffer();
+}
+
+void CWinTestDlg::OnCbnSelchangeComboCodec1()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+
+ CodecInst codec;
+ CComboBox* comboCodec(NULL);
+ comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_CODEC_1);
+ int index = comboCodec->GetCurSel();
+ _veCodecPtr->GetCodec(index, codec);
+ if (strncmp(codec.plname, "ISAC", 4) == 0)
+ {
+ // Set iSAC to adaptive mode by default.
+ codec.rate = -1;
+ }
+ TEST(_veCodecPtr->SetSendCodec(channel, codec) == 0,
+ _T("SetSendCodec(channel=%d, plname=%s, pltype=%d, plfreq=%d, rate=%d, pacsize=%d, channels=%d)"),
+ channel, CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq, codec.rate, codec.pacsize, codec.channels);
+
+ CListBox* list = (CListBox*)GetDlgItem(IDC_LIST_CODEC_1);
+ list->SetCurSel(0);
+ SetDlgItemInt(IDC_EDIT_CODEC_1, codec.pltype);
+}
+
+void CWinTestDlg::OnLbnSelchangeListCodec1()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+
+ CListBox* list = (CListBox*)GetDlgItem(IDC_LIST_CODEC_1);
+ int listIdx = list->GetCurSel();
+ if (listIdx < 0)
+ return;
+ CString str;
+ list->GetText(listIdx, str);
+
+ CodecInst codec;
+ _veCodecPtr->GetSendCodec(channel, codec);
+
+ int value = GetDlgItemInt(IDC_EDIT_CODEC_1);
+ if (str == _T("pltype"))
+ {
+ codec.pltype = value;
+ }
+ else if (str == _T("plfreq"))
+ {
+ codec.plfreq = value;
+ }
+ else if (str == _T("pacsize"))
+ {
+ codec.pacsize = value;
+ }
+ else if (str == _T("channels"))
+ {
+ codec.channels = value;
+ }
+ else if (str == _T("rate"))
+ {
+ codec.rate = value;
+ }
+ TEST(_veCodecPtr->SetSendCodec(channel, codec) == 0,
+ _T("SetSendCodec(channel=%d, plname=%s, pltype=%d, plfreq=%d, rate=%d, pacsize=%d, channels=%d)"),
+ channel, CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq, codec.rate, codec.pacsize, codec.channels);
+}
+
+void CWinTestDlg::OnCbnSelchangeComboCodec2()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+
+ CodecInst codec;
+ CComboBox* comboCodec(NULL);
+ comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_CODEC_2);
+ int index = comboCodec->GetCurSel();
+ _veCodecPtr->GetCodec(index, codec);
+ TEST(_veCodecPtr->SetSendCodec(channel, codec) == 0,
+ _T("SetSendCodec(channel=%d, plname=%s, pltype=%d, plfreq=%d, rate=%d, pacsize=%d, channels=%d)"),
+ channel, CharToTchar(codec.plname, 32), codec.pltype, codec.plfreq, codec.rate, codec.pacsize, codec.channels);
+}
+
+void CWinTestDlg::OnBnClickedButtonStartListen1()
+{
+ int ret1(0);
+ int ret2(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ int port = GetDlgItemInt(IDC_EDIT_RX_PORT_1);
+ TEST((ret1 = _veBasePtr->SetLocalReceiver(channel, port)) == 0, _T("SetLocalReceiver(channel=%d, port=%d)"), channel, port);
+ TEST((ret2 = _veBasePtr->StartReceive(channel)) == 0, _T("StartReceive(channel=%d)"), channel);
+ if (ret1 == 0 && ret2 == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_LISTEN_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_LISTEN_1)->EnableWindow(TRUE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonStartListen2()
+{
+ int ret1(0);
+ int ret2(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ int port = GetDlgItemInt(IDC_EDIT_RX_PORT_2);
+ TEST((ret1 = _veBasePtr->SetLocalReceiver(channel, port)) == 0, _T("SetLocalReceiver(channel=%d, port=%d)"), channel, port);
+ TEST((ret2 = _veBasePtr->StartReceive(channel)) == 0, _T("StartReceive(channel=%d)"), channel);
+ if (ret1 == 0 && ret2 == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_LISTEN_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_LISTEN_2)->EnableWindow(TRUE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopListen1()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ TEST((ret = _veBasePtr->StopReceive(channel)) == 0, _T("StopListen(channel=%d)"), channel);
+ if (ret == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_LISTEN_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_STOP_LISTEN_1)->EnableWindow(FALSE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopListen2()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ TEST((ret = _veBasePtr->StopReceive(channel)) == 0, _T("StopListen(channel=%d)"), channel);
+ if (ret == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_LISTEN_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_STOP_LISTEN_2)->EnableWindow(FALSE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonStartPlayout1()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ TEST((ret = _veBasePtr->StartPlayout(channel)) == 0, _T("StartPlayout(channel=%d)"), channel);
+ if (ret == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_PLAYOUT_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_1)->EnableWindow(TRUE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonStartPlayout2()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ TEST((ret = _veBasePtr->StartPlayout(channel)) == 0, _T("StartPlayout(channel=%d)"), channel);
+ if (ret == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_PLAYOUT_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_2)->EnableWindow(TRUE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopPlayout1()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ TEST((ret = _veBasePtr->StopPlayout(channel)) == 0, _T("StopPlayout(channel=%d)"), channel);
+ if (ret == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_PLAYOUT_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_1)->EnableWindow(FALSE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopPlayout2()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ TEST((ret = _veBasePtr->StopPlayout(channel)) == 0, _T("StopPlayout(channel=%d)"));
+ if (ret == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_PLAYOUT_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_STOP_PLAYOUT_2)->EnableWindow(FALSE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonStartSend1()
+{
+ UpdateData(TRUE); // update IP address
+
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ if (!_externalTransport)
+ {
+ CString str;
+ int port = GetDlgItemInt(IDC_EDIT_TX_PORT_1);
+ TEST(_veBasePtr->SetSendDestination(channel, port, TcharToChar(_strComboIp1.GetBuffer(7), -1)) == 0,
+ _T("SetSendDestination(channel=%d, port=%d, ip=%s)"), channel, port, _strComboIp1.GetBuffer(7));
+ str.ReleaseBuffer();
+ }
+
+ //_veVideoSyncPtr->SetInitTimestamp(0,0);
+ // OnCbnSelchangeComboCodec1();
+
+ TEST((ret = _veBasePtr->StartSend(channel)) == 0, _T("StartSend(channel=%d)"), channel);
+ if (ret == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_SEND_1)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_SEND_1)->EnableWindow(TRUE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonStartSend2()
+{
+ UpdateData(TRUE); // update IP address
+
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ if (!_externalTransport)
+ {
+ CString str;
+ int port = GetDlgItemInt(IDC_EDIT_TX_PORT_2);
+ TEST(_veBasePtr->SetSendDestination(channel, port, TcharToChar(_strComboIp2.GetBuffer(7), -1)) == 0,
+ _T("SetSendDestination(channel=%d, port=%d, ip=%s)"), channel, port, _strComboIp2.GetBuffer(7));
+ str.ReleaseBuffer();
+ }
+
+ // OnCbnSelchangeComboCodec2();
+
+ TEST((ret = _veBasePtr->StartSend(channel)) == 0, _T("StartSend(channel=%d)"), channel);
+ if (ret == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_SEND_2)->EnableWindow(FALSE);
+ GetDlgItem(IDC_BUTTON_STOP_SEND_2)->EnableWindow(TRUE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopSend1()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ TEST((ret = _veBasePtr->StopSend(channel)) == 0, _T("StopSend(channel=%d)"), channel);
+ if (ret == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_SEND_1)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_STOP_SEND_1)->EnableWindow(FALSE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonStopSend2()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ TEST((ret = _veBasePtr->StopSend(channel)) == 0, _T("StopSend(channel=%d)"), channel);
+ if (ret == 0)
+ {
+ GetDlgItem(IDC_BUTTON_START_SEND_2)->EnableWindow(TRUE);
+ GetDlgItem(IDC_BUTTON_STOP_SEND_2)->EnableWindow(FALSE);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtTrans1()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_TRANS_1);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ TEST((ret = _veNetworkPtr->RegisterExternalTransport(channel, *_transportPtr)) == 0,
+ _T("RegisterExternalTransport(channel=%d, transport=0x%x)"), channel, _transportPtr);
+ }
+ else
+ {
+ TEST((ret = _veNetworkPtr->DeRegisterExternalTransport(channel)) == 0,
+ _T("DeRegisterExternalTransport(channel=%d)"), channel);
+ }
+ if (ret == 0)
+ {
+ _externalTransport = enable;
+ }
+ else
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtTrans2()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_TRANS_2);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ TEST((ret = _veNetworkPtr->RegisterExternalTransport(channel, *_transportPtr)) == 0,
+ _T("RegisterExternalTransport(channel=%d, transport=0x%x)"), channel, _transportPtr);
+ }
+ else
+ {
+ TEST((ret = _veNetworkPtr->DeRegisterExternalTransport(channel)) == 0,
+ _T("DeRegisterExternalTransport(channel=%d)"), channel);
+ }
+ if (ret == 0)
+ {
+ _externalTransport = enable;
+ }
+ else
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckPlayFileIn1()
+{
+ std::string micFile = _long_audio_file_path + "audio_short16.pcm";
+
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_IN_1);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ bool mix;
+ const bool loop(true);
+ const FileFormats format = kFileFormatPcm16kHzFile;
+ const float scale(1.0);
+
+ (_checkPlayFileIn1 %2 == 0) ? mix = true : mix = false;
+ TEST((ret = _veFilePtr->StartPlayingFileAsMicrophone(channel,
+ micFile.c_str(), loop, mix, format, scale) == 0),
+ _T("StartPlayingFileAsMicrophone(channel=%d, file=%s, loop=%d, ")
+ _T("mix=%d, format=%d, scale=%2.1f)"),
+ channel, CharToTchar(micFile.c_str(), -1),
+ loop, mix, format, scale);
+ _checkPlayFileIn1++;
+ }
+ else
+ {
+ TEST((ret = _veFilePtr->StopPlayingFileAsMicrophone(channel) == 0),
+ _T("StopPlayingFileAsMicrophone(channel=%d)"), channel);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckPlayFileIn2()
+{
+ std::string micFile = _long_audio_file_path + "audio_long16.pcm";
+
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_IN_2);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ bool mix;
+ const bool loop(true);
+ const FileFormats format = kFileFormatPcm16kHzFile;
+ const float scale(1.0);
+
+ (_checkPlayFileIn2 %2 == 0) ? mix = true : mix = false;
+ TEST((ret = _veFilePtr->StartPlayingFileAsMicrophone(channel,
+ micFile.c_str(), loop, mix, format, scale) == 0),
+ _T("StartPlayingFileAsMicrophone(channel=%d, file=%s, loop=%d, ")
+ _T("mix=%d, format=%d, scale=%2.1f)"),
+ channel, CharToTchar(micFile.c_str(), -1),
+ loop, mix, format, scale);
+ _checkPlayFileIn2++;
+ }
+ else
+ {
+ TEST((ret = _veFilePtr->StopPlayingFileAsMicrophone(channel) == 0),
+ _T("StopPlayingFileAsMicrophone(channel=%d)"), channel);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckPlayFileOut1()
+{
+ const FileFormats formats[8] = {{kFileFormatPcm16kHzFile},
+ {kFileFormatWavFile},
+ {kFileFormatWavFile},
+ {kFileFormatWavFile},
+ {kFileFormatWavFile},
+ {kFileFormatWavFile},
+ {kFileFormatWavFile},
+ {kFileFormatWavFile}};
+ // File path is relative to the location of 'voice_engine.gyp'.
+ const char spkrFiles[8][64] = {{"audio_short16.pcm"},
+ {"audio_tiny8.wav"},
+ {"audio_tiny11.wav"},
+ {"audio_tiny16.wav"},
+ {"audio_tiny22.wav"},
+ {"audio_tiny32.wav"},
+ {"audio_tiny44.wav"},
+ {"audio_tiny48.wav"}};
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_1);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ const bool loop(true);
+ const float volumeScaling(1.0);
+ const int startPointMs(0);
+ const int stopPointMs(0);
+ const FileFormats format = formats[_checkPlayFileOut1 % 8];
+ std::string spkrFile = _long_audio_file_path +
+ spkrFiles[_checkPlayFileOut1 % 8];
+
+ CString str;
+ if (_checkPlayFileOut1 % 8 == 0)
+ {
+ str = _T("kFileFormatPcm16kHzFile");
+ }
+ else
+ {
+ str = _T("kFileFormatWavFile");
+ }
+ // (_checkPlayFileOut1 %2 == 0) ? mix = true : mix = false;
+ TEST((ret = _veFilePtr->StartPlayingFileLocally(channel,
+ spkrFile.c_str(), loop, format, volumeScaling,
+ startPointMs,stopPointMs) == 0),
+ _T("StartPlayingFileLocally(channel=%d, file=%s, loop=%d, ")
+ _T("format=%s, scale=%2.1f, start=%d, stop=%d)"),
+ channel, CharToTchar(spkrFile.c_str(), -1),
+ loop, str, volumeScaling, startPointMs, stopPointMs);
+ _checkPlayFileOut1++;
+ }
+ else
+ {
+ TEST((ret = _veFilePtr->StopPlayingFileLocally(channel) == 0),
+ _T("StopPlayingFileLocally(channel=%d)"), channel);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckPlayFileOut2()
+{
+ std::string spkrFile = _long_audio_file_path + "audio_long16.pcm";
+
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_OUT_2);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ const bool loop(true);
+ const FileFormats format = kFileFormatPcm16kHzFile;
+ const float volumeScaling(1.0);
+ const int startPointMs(0);
+ const int stopPointMs(0);
+
+ // (_checkPlayFileOut2 %2 == 0) ? mix = true : mix = false;
+ TEST((ret = _veFilePtr->StartPlayingFileLocally(channel,
+ spkrFile.c_str(), loop, format, volumeScaling,
+ startPointMs,stopPointMs) == 0),
+ _T("StartPlayingFileLocally(channel=%d, file=%s, loop=%d, ")
+ _T("format=%d, scale=%2.1f, start=%d, stop=%d)"),
+ channel, CharToTchar(spkrFile.c_str(), -1),
+ loop, format, volumeScaling, startPointMs, stopPointMs);
+ // _checkPlayFileIn2++;
+ }
+ else
+ {
+ TEST((ret = _veFilePtr->StopPlayingFileLocally(channel) == 0),
+ _T("StopPlayingFileLocally(channel=%d)"), channel);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaIn1()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* buttonExtTrans = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_1);
+ int check = buttonExtTrans->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kRecordingPerChannel, *_externalMediaPtr) == 0,
+ _T("RegisterExternalMediaProcessing(channel=%d, kRecordingPerChannel, processObject=0x%x)"), channel, _externalMediaPtr);
+ }
+ else
+ {
+ TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kRecordingPerChannel) == 0,
+ _T("DeRegisterExternalMediaProcessing(channel=%d, kRecordingPerChannel)"), channel);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaIn2()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CButton* buttonExtTrans = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_IN_2);
+ int check = buttonExtTrans->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kRecordingPerChannel, *_externalMediaPtr) == 0,
+ _T("RegisterExternalMediaProcessing(channel=%d, kRecordingPerChannel, processObject=0x%x)"), channel, _externalMediaPtr);
+ }
+ else
+ {
+ TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kRecordingPerChannel) == 0,
+ _T("DeRegisterExternalMediaProcessing(channel=%d, kRecordingPerChannel)"), channel);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaOut1()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* buttonExtTrans = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_1);
+ int check = buttonExtTrans->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kPlaybackPerChannel, *_externalMediaPtr) == 0,
+ _T("RegisterExternalMediaProcessing(channel=%d, kPlaybackPerChannel, processObject=0x%x)"), channel, _externalMediaPtr);
+ }
+ else
+ {
+ TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kPlaybackPerChannel) == 0,
+ _T("DeRegisterExternalMediaProcessing(channel=%d, kPlaybackPerChannel)"), channel);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaOut2()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CButton* buttonExtTrans = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT_2);
+ int check = buttonExtTrans->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kPlaybackPerChannel, *_externalMediaPtr) == 0,
+ _T("RegisterExternalMediaProcessing(channel=%d, kPlaybackPerChannel, processObject=0x%x)"), channel, _externalMediaPtr);
+ }
+ else
+ {
+ TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kPlaybackPerChannel) == 0,
+ _T("DeRegisterExternalMediaProcessing(channel=%d, kPlaybackPerChannel)"), channel);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckVad1()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_VAD_1);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ CString str;
+ VadModes mode(kVadConventional);
+ if (_checkVAD1 % 4 == 0)
+ {
+ mode = kVadConventional;
+ str = _T("kVadConventional");
+ }
+ else if (_checkVAD1 % 4 == 1)
+ {
+ mode = kVadAggressiveLow;
+ str = _T("kVadAggressiveLow");
+ }
+ else if (_checkVAD1 % 4 == 2)
+ {
+ mode = kVadAggressiveMid;
+ str = _T("kVadAggressiveMid");
+ }
+ else if (_checkVAD1 % 4 == 3)
+ {
+ mode = kVadAggressiveHigh;
+ str = _T("kVadAggressiveHigh");
+ }
+ const bool disableDTX(false);
+ TEST((ret = _veCodecPtr->SetVADStatus(channel, true, mode, disableDTX) == 0),
+ _T("SetVADStatus(channel=%d, enable=%d, mode=%s, disableDTX=%d)"), channel, enable, str, disableDTX);
+ _checkVAD1++;
+ }
+ else
+ {
+ TEST((ret = _veCodecPtr->SetVADStatus(channel, false)) == 0, _T("SetVADStatus(channel=%d, enable=%d)"), channel, false);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckVad2()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_VAD_2);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ CString str;
+ VadModes mode(kVadConventional);
+ if (_checkVAD2 % 4 == 0)
+ {
+ mode = kVadConventional;
+ str = _T("kVadConventional");
+ }
+ else if (_checkVAD2 % 4 == 1)
+ {
+ mode = kVadAggressiveLow;
+ str = _T("kVadAggressiveLow");
+ }
+ else if (_checkVAD2 % 4 == 2)
+ {
+ mode = kVadAggressiveMid;
+ str = _T("kVadAggressiveMid");
+ }
+ else if (_checkVAD2 % 4 == 3)
+ {
+ mode = kVadAggressiveHigh;
+ str = _T("kVadAggressiveHigh");
+ }
+ const bool disableDTX(false);
+ TEST((ret = _veCodecPtr->SetVADStatus(channel, true, mode, disableDTX)) == 0,
+ _T("SetVADStatus(channel=%d, enable=%d, mode=%s, disableDTX=%d)"), channel, enable, str, disableDTX);
+ _checkVAD2++;
+ }
+ else
+ {
+ TEST((ret = _veCodecPtr->SetVADStatus(channel, false) == 0), _T("SetVADStatus(channel=%d, enable=%d)"), channel, false);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckMuteIn1()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* buttonMute = (CButton*)GetDlgItem(IDC_CHECK_MUTE_IN_1);
+ int check = buttonMute->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ TEST(_veVolumeControlPtr->SetInputMute(channel, enable) == 0,
+ _T("SetInputMute(channel=%d, enable=%d)"), channel, enable);
+}
+
+void CWinTestDlg::OnBnClickedCheckMuteIn2()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CButton* buttonMute = (CButton*)GetDlgItem(IDC_CHECK_MUTE_IN_2);
+ int check = buttonMute->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ TEST(_veVolumeControlPtr->SetInputMute(channel, enable) == 0,
+ _T("SetInputMute(channel=%d, enable=%d)"), channel, enable);
+}
+
+void CWinTestDlg::OnBnClickedCheckSrtpTx1()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_TX_1);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ bool useForRTCP = false;
+ if (enable)
+ {
+ (_checkSrtpTx1++ %2 == 0) ? useForRTCP = false : useForRTCP = true;
+ TEST((ret = _veEncryptionPtr->EnableSRTPSend(channel,
+ kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP)) == 0,
+ _T("EnableSRTPSend(channel=%d, kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP=%d)"),
+ channel, useForRTCP);
+ }
+ else
+ {
+ TEST((ret = _veEncryptionPtr->DisableSRTPSend(channel) == 0), _T("DisableSRTPSend(channel=%d)"), channel);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckSrtpTx2()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_TX_2);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ bool useForRTCP = false;
+ if (enable)
+ {
+ (_checkSrtpTx2++ %2 == 0) ? useForRTCP = false : useForRTCP = true;
+ TEST((ret = _veEncryptionPtr->EnableSRTPSend(channel,
+ kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP)) == 0,
+ _T("EnableSRTPSend(channel=%d, kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP=%d)"),
+ channel, useForRTCP);
+ }
+ else
+ {
+ TEST((ret = _veEncryptionPtr->DisableSRTPSend(channel) == 0), _T("DisableSRTPSend(channel=%d)"), channel);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckSrtpRx1()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_RX_1);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ bool useForRTCP(false);
+ if (enable)
+ {
+ (_checkSrtpRx1++ %2 == 0) ? useForRTCP = false : useForRTCP = true;
+ TEST((ret = _veEncryptionPtr->EnableSRTPReceive(channel,
+ kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP)) == 0,
+ _T("EnableSRTPReceive(channel=%d, kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP=%d)"),
+ channel, useForRTCP);
+ }
+ else
+ {
+ TEST((ret = _veEncryptionPtr->DisableSRTPReceive(channel) == 0), _T("DisableSRTPReceive(channel=%d)"), channel);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckSrtpRx2()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_SRTP_RX_2);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ bool useForRTCP(false);
+ if (enable)
+ {
+ (_checkSrtpRx2++ %2 == 0) ? useForRTCP = false : useForRTCP = true;
+ TEST((ret = _veEncryptionPtr->EnableSRTPReceive(channel,
+ kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP)) == 0,
+ _T("EnableSRTPReceive(channel=%d, kCipherAes128CounterMode, 30, kAuthHmacSha1, 20, 4, kEncryptionAndAuthentication, key, useForRTCP=%d)"),
+ channel, useForRTCP);
+ }
+ else
+ {
+ TEST((ret = _veEncryptionPtr->DisableSRTPReceive(channel)) == 0, _T("DisableSRTPReceive(channel=%d)"), channel);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtEncryption1()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_1);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ TEST((ret = _veEncryptionPtr->RegisterExternalEncryption(channel, *_encryptionPtr)) == 0,
+ _T("RegisterExternalEncryption(channel=%d, encryption=0x%x)"), channel, _encryptionPtr);
+ }
+ else
+ {
+ TEST((ret = _veEncryptionPtr->DeRegisterExternalEncryption(channel)) == 0,
+ _T("DeRegisterExternalEncryption(channel=%d)"), channel);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtEncryption2()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_ENCRYPTION_2);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ TEST((ret = _veEncryptionPtr->RegisterExternalEncryption(channel, *_encryptionPtr)) == 0,
+ _T("RegisterExternalEncryption(channel=%d, encryption=0x%x)"), channel, _encryptionPtr);
+ }
+ else
+ {
+ TEST((ret = _veEncryptionPtr->DeRegisterExternalEncryption(channel)) == 0,
+ _T("DeRegisterExternalEncryption(channel=%d)"), channel);
+ }
+ if (ret == -1)
+ {
+ // restore inital state since API call failed
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+ }
+}
+
+void CWinTestDlg::OnBnClickedButtonDtmf1()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CTelephonyEvent dlgTelephoneEvent(_vePtr, channel, this);
+ dlgTelephoneEvent.DoModal();
+}
+
+void CWinTestDlg::OnBnClickedButtonDtmf2()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CTelephonyEvent dlgTelephoneEvent(_vePtr, channel, this);
+ dlgTelephoneEvent.DoModal();
+}
+
+void CWinTestDlg::OnBnClickedCheckConference1()
+{
+ // Not supported yet
+}
+
+void CWinTestDlg::OnBnClickedCheckConference2()
+{
+ // Not supported yet
+}
+
+void CWinTestDlg::OnBnClickedCheckOnHold1()
+{
+ SHORT shiftKeyIsPressed = ::GetAsyncKeyState(VK_SHIFT);
+
+ CString str;
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_ON_HOLD_1);
+ int check = button->GetCheck();
+
+ if (shiftKeyIsPressed)
+ {
+ bool enabled(false);
+ OnHoldModes mode(kHoldSendAndPlay);
+ TEST(_veBasePtr->GetOnHoldStatus(channel, enabled, mode) == 0,
+ _T("GetOnHoldStatus(channel=%d, enabled=?, mode=?)"), channel);
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+
+ switch (mode)
+ {
+ case kHoldSendAndPlay:
+ str = _T("kHoldSendAndPlay");
+ break;
+ case kHoldSendOnly:
+ str = _T("kHoldSendOnly");
+ break;
+ case kHoldPlayOnly:
+ str = _T("kHoldPlayOnly");
+ break;
+ default:
+ break;
+ }
+ PRINT_GET_RESULT(_T("enabled=%d, mode=%s"), enabled, str);
+ return;
+ }
+
+ int ret(0);
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ OnHoldModes mode(kHoldSendAndPlay);
+ if (_checkOnHold1 % 3 == 0)
+ {
+ mode = kHoldSendAndPlay;
+ str = _T("kHoldSendAndPlay");
+ }
+ else if (_checkOnHold1 % 3 == 1)
+ {
+ mode = kHoldSendOnly;
+ str = _T("kHoldSendOnly");
+ }
+ else if (_checkOnHold1 % 3 == 2)
+ {
+ mode = kHoldPlayOnly;
+ str = _T("kHoldPlayOnly");
+ }
+ TEST((ret = _veBasePtr->SetOnHoldStatus(channel, enable, mode)) == 0,
+ _T("SetOnHoldStatus(channel=%d, enable=%d, mode=%s)"), channel, enable, str);
+ _checkOnHold1++;
+ }
+ else
+ {
+ TEST((ret = _veBasePtr->SetOnHoldStatus(channel, enable)) == 0,
+ _T("SetOnHoldStatus(channel=%d, enable=%d)"), channel, enable);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckOnHold2()
+{
+ int ret(0);
+ int channel = GetDlgItemInt(IDC_EDIT_2);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_ON_HOLD_2);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ CString str;
+ OnHoldModes mode(kHoldSendAndPlay);
+ if (_checkOnHold1 % 3 == 0)
+ {
+ mode = kHoldSendAndPlay;
+ str = _T("kHoldSendAndPlay");
+ }
+ else if (_checkOnHold1 % 3 == 1)
+ {
+ mode = kHoldSendOnly;
+ str = _T("kHoldSendOnly");
+ }
+ else if (_checkOnHold1 % 3 == 2)
+ {
+ mode = kHoldPlayOnly;
+ str = _T("kHoldPlayOnly");
+ }
+ TEST((ret = _veBasePtr->SetOnHoldStatus(channel, enable, mode)) == 0,
+ _T("SetOnHoldStatus(channel=%d, enable=%d, mode=%s)"), channel, enable, str);
+ _checkOnHold1++;
+ }
+ else
+ {
+ TEST((ret = _veBasePtr->SetOnHoldStatus(channel, enable)) == 0,
+ _T("SetOnHoldStatus(channel=%d, enable=%d)"), channel, enable);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckDelayEstimate1()
+{
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_DELAY_ESTIMATE_1);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+
+ if (enable)
+ {
+ _delayEstimate1 = true;
+ SetDlgItemInt(IDC_EDIT_DELAY_ESTIMATE_1, 0);
+ }
+ else
+ {
+ _delayEstimate1 = false;
+ SetDlgItemText(IDC_EDIT_DELAY_ESTIMATE_1, _T(""));
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckRxvad()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_RXVAD);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+
+ if (enable)
+ {
+ _rxVad = true;
+ _veApmPtr->RegisterRxVadObserver(channel, *_rxVadObserverPtr);
+ SetDlgItemInt(IDC_EDIT_RXVAD, 0);
+ }
+ else
+ {
+ _rxVad = false;
+ _veApmPtr->DeRegisterRxVadObserver(channel);
+ SetDlgItemText(IDC_EDIT_RXVAD, _T(""));
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckAgc1()
+{
+ SHORT shiftKeyIsPressed = ::GetAsyncKeyState(VK_SHIFT);
+
+ CString str;
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_AGC_1);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+
+ if (shiftKeyIsPressed)
+ {
+ bool enabled(false);
+ AgcModes mode(kAgcAdaptiveDigital);
+ TEST(_veApmPtr->GetRxAgcStatus(channel, enabled, mode) == 0,
+ _T("GetRxAgcStatus(channel=%d, enabled=?, mode=?)"), channel);
+ button->SetCheck((check == BST_CHECKED) ? BST_UNCHECKED : BST_CHECKED);
+
+ switch (mode)
+ {
+ case kAgcAdaptiveAnalog:
+ str = _T("kAgcAdaptiveAnalog");
+ break;
+ case kAgcAdaptiveDigital:
+ str = _T("kAgcAdaptiveDigital");
+ break;
+ case kAgcFixedDigital:
+ str = _T("kAgcFixedDigital");
+ break;
+ default:
+ break;
+ }
+ PRINT_GET_RESULT(_T("enabled=%d, mode=%s"), enabled, str);
+ return;
+ }
+
+ if (enable)
+ {
+ CString str;
+ AgcModes mode(kAgcDefault);
+ if (_checkAGC1 % 3 == 0)
+ {
+ mode = kAgcDefault;
+ str = _T("kAgcDefault");
+ }
+ else if (_checkAGC1 % 3 == 1)
+ {
+ mode = kAgcAdaptiveDigital;
+ str = _T("kAgcAdaptiveDigital");
+ }
+ else if (_checkAGC1 % 3 == 2)
+ {
+ mode = kAgcFixedDigital;
+ str = _T("kAgcFixedDigital");
+ }
+ TEST(_veApmPtr->SetRxAgcStatus(channel, true, mode) == 0, _T("SetRxAgcStatus(channel=%d, enable=%d, %s)"), channel, enable, str);
+ _checkAGC1++;
+ }
+ else
+ {
+ TEST(_veApmPtr->SetRxAgcStatus(channel, false, kAgcUnchanged) == 0, _T("SetRxAgcStatus(channel=%d, enable=%d)"), channel, enable);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckNs1()
+{
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ CButton* buttonNS = (CButton*)GetDlgItem(IDC_CHECK_NS_1);
+ int check = buttonNS->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ CString str;
+ NsModes mode(kNsDefault);
+ if (_checkNS1 % 6 == 0)
+ {
+ mode = kNsDefault;
+ str = _T("kNsDefault");
+ }
+ else if (_checkNS1 % 6 == 1)
+ {
+ mode = kNsConference;
+ str = _T("kNsConference");
+ }
+ else if (_checkNS1 % 6 == 2)
+ {
+ mode = kNsLowSuppression;
+ str = _T("kNsLowSuppression");
+ }
+ else if (_checkNS1 % 6 == 3)
+ {
+ mode = kNsModerateSuppression;
+ str = _T("kNsModerateSuppression");
+ }
+ else if (_checkNS1 % 6 == 4)
+ {
+ mode = kNsHighSuppression;
+ str = _T("kNsHighSuppression");
+ }
+ else if (_checkNS1 % 6 == 5)
+ {
+ mode = kNsVeryHighSuppression;
+ str = _T("kNsVeryHighSuppression");
+ }
+ TEST(_veApmPtr->SetRxNsStatus(channel, true, mode) == 0, _T("SetRxNsStatus(channel=%d, enable=%d, %s)"), channel, enable, str);
+ _checkNS1++;
+ }
+ else
+ {
+ TEST(_veApmPtr->SetRxNsStatus(channel, false, kNsUnchanged) == 0, _T("SetRxNsStatus(channel=%d, enable=%d)"), enable, channel);
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Channel-independent Operations
+// ----------------------------------------------------------------------------
+
+void CWinTestDlg::OnBnClickedCheckPlayFileIn()
+{
+ std::string micFile = _long_audio_file_path + "audio_short16.pcm";
+ // std::string micFile = _long_audio_file_path + "audio_long16noise.pcm";
+
+ int channel(-1);
+ CButton* buttonExtTrans = (CButton*)GetDlgItem(IDC_CHECK_PLAY_FILE_IN);
+ int check = buttonExtTrans->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ bool mix;
+ const bool loop(true);
+ const FileFormats format = kFileFormatPcm16kHzFile;
+ const float scale(1.0);
+
+ (_checkPlayFileIn %2 == 0) ? mix = true : mix = false;
+ TEST(_veFilePtr->StartPlayingFileAsMicrophone(channel,
+ micFile.c_str(), loop, mix, format, scale) == 0,
+ _T("StartPlayingFileAsMicrophone(channel=%d, file=%s, ")
+ _T("loop=%d, mix=%d, format=%d, scale=%2.1f)"),
+ channel, CharToTchar(micFile.c_str(), -1),
+ loop, mix, format, scale);
+ _checkPlayFileIn++;
+ }
+ else
+ {
+ TEST(_veFilePtr->StopPlayingFileAsMicrophone(channel) == 0,
+ _T("StopPlayingFileAsMicrophone(channel=%d)"), channel);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckRecMic()
+{
+ std::string micFile = webrtc::test::OutputPath() +
+ "rec_mic_mono_16kHz.pcm";
+
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_REC_MIC);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ TEST(_veFilePtr->StartRecordingMicrophone(micFile.c_str(), NULL) == 0,
+ _T("StartRecordingMicrophone(file=%s)"),
+ CharToTchar(micFile.c_str(), -1));
+ }
+ else
+ {
+ TEST(_veFilePtr->StopRecordingMicrophone() == 0,
+ _T("StopRecordingMicrophone()"));
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckAgc()
+{
+ CButton* buttonAGC = (CButton*)GetDlgItem(IDC_CHECK_AGC);
+ int check = buttonAGC->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ CString str;
+ AgcModes mode(kAgcDefault);
+ if (_checkAGC % 4 == 0)
+ {
+ mode = kAgcDefault;
+ str = _T("kAgcDefault");
+ }
+ else if (_checkAGC % 4 == 1)
+ {
+ mode = kAgcAdaptiveAnalog;
+ str = _T("kAgcAdaptiveAnalog");
+ }
+ else if (_checkAGC % 4 == 2)
+ {
+ mode = kAgcAdaptiveDigital;
+ str = _T("kAgcAdaptiveDigital");
+ }
+ else if (_checkAGC % 4 == 3)
+ {
+ mode = kAgcFixedDigital;
+ str = _T("kAgcFixedDigital");
+ }
+ TEST(_veApmPtr->SetAgcStatus(true, mode) == 0, _T("SetAgcStatus(enable=%d, %s)"), enable, str);
+ _checkAGC++;
+ }
+ else
+ {
+ TEST(_veApmPtr->SetAgcStatus(false, kAgcUnchanged) == 0, _T("SetAgcStatus(enable=%d)"), enable);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckNs()
+{
+ CButton* buttonNS = (CButton*)GetDlgItem(IDC_CHECK_NS);
+ int check = buttonNS->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ CString str;
+ NsModes mode(kNsDefault);
+ if (_checkNS % 6 == 0)
+ {
+ mode = kNsDefault;
+ str = _T("kNsDefault");
+ }
+ else if (_checkNS % 6 == 1)
+ {
+ mode = kNsConference;
+ str = _T("kNsConference");
+ }
+ else if (_checkNS % 6 == 2)
+ {
+ mode = kNsLowSuppression;
+ str = _T("kNsLowSuppression");
+ }
+ else if (_checkNS % 6 == 3)
+ {
+ mode = kNsModerateSuppression;
+ str = _T("kNsModerateSuppression");
+ }
+ else if (_checkNS % 6 == 4)
+ {
+ mode = kNsHighSuppression;
+ str = _T("kNsHighSuppression");
+ }
+ else if (_checkNS % 6 == 5)
+ {
+ mode = kNsVeryHighSuppression;
+ str = _T("kNsVeryHighSuppression");
+ }
+ TEST(_veApmPtr->SetNsStatus(true, mode) == 0, _T("SetNsStatus(enable=%d, %s)"), enable, str);
+ _checkNS++;
+ }
+ else
+ {
+ TEST(_veApmPtr->SetNsStatus(false, kNsUnchanged) == 0, _T("SetNsStatus(enable=%d)"), enable);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckEc()
+{
+ CButton* buttonEC = (CButton*)GetDlgItem(IDC_CHECK_EC);
+ int check = buttonEC->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ CString str;
+ EcModes mode(kEcDefault);
+ if (_checkEC % 4 == 0)
+ {
+ mode = kEcDefault;
+ str = _T("kEcDefault");
+ }
+ else if (_checkEC % 4 == 1)
+ {
+ mode = kEcConference;
+ str = _T("kEcConference");
+ }
+ else if (_checkEC % 4 == 2)
+ {
+ mode = kEcAec;
+ str = _T("kEcAec");
+ }
+ else if (_checkEC % 4 == 3)
+ {
+ mode = kEcAecm;
+ str = _T("kEcAecm");
+ }
+ TEST(_veApmPtr->SetEcStatus(true, mode) == 0, _T("SetEcStatus(enable=%d, %s)"), enable, str);
+ _checkEC++;
+ }
+ else
+ {
+ TEST(_veApmPtr->SetEcStatus(false, kEcUnchanged) == 0, _T("SetEcStatus(enable=%d)"), enable);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckMuteIn()
+{
+ CButton* buttonMute = (CButton*)GetDlgItem(IDC_CHECK_MUTE_IN);
+ int check = buttonMute->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ const int channel(-1);
+ TEST(_veVolumeControlPtr->SetInputMute(channel, enable) == 0,
+ _T("SetInputMute(channel=%d, enable=%d)"), channel, enable);
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaIn()
+{
+ const int channel(-1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_IN);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kRecordingAllChannelsMixed, *_externalMediaPtr) == 0,
+ _T("RegisterExternalMediaProcessing(channel=%d, kRecordingAllChannelsMixed, processObject=0x%x)"), channel, _externalMediaPtr);
+ }
+ else
+ {
+ TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kRecordingAllChannelsMixed) == 0,
+ _T("DeRegisterExternalMediaProcessing(channel=%d, kRecordingAllChannelsMixed)"), channel);
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckExtMediaOut()
+{
+ const int channel(-1);
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_EXT_MEDIA_OUT);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ if (enable)
+ {
+ TEST(_veExternalMediaPtr->RegisterExternalMediaProcessing(channel, kPlaybackAllChannelsMixed, *_externalMediaPtr) == 0,
+ _T("RegisterExternalMediaProcessing(channel=%d, kPlaybackAllChannelsMixed, processObject=0x%x)"), channel, _externalMediaPtr);
+ }
+ else
+ {
+ TEST(_veExternalMediaPtr->DeRegisterExternalMediaProcessing(channel, kPlaybackAllChannelsMixed) == 0,
+ _T("DeRegisterExternalMediaProcessing(channel=%d, kPlaybackAllChannelsMixed)"), channel);
+ }
+}
+
+void CWinTestDlg::OnCbnSelchangeComboRecDevice()
+{
+ CComboBox* comboCodec(NULL);
+ comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_REC_DEVICE);
+ int index = comboCodec->GetCurSel();
+ TEST(_veHardwarePtr->SetRecordingDevice(index) == 0,
+ _T("SetRecordingDevice(index=%d)"), index);
+}
+
+void CWinTestDlg::OnCbnSelchangeComboPlayDevice()
+{
+ CComboBox* comboCodec(NULL);
+ comboCodec = (CComboBox*)GetDlgItem(IDC_COMBO_PLAY_DEVICE);
+ int index = comboCodec->GetCurSel();
+ TEST(_veHardwarePtr->SetPlayoutDevice(index) == 0,
+ _T("SetPlayoutDevice(index=%d)"), index);
+}
+
+void CWinTestDlg::OnNMReleasedcaptureSliderInputVolume(NMHDR *pNMHDR, LRESULT *pResult)
+{
+ CSliderCtrl* slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_INPUT_VOLUME);
+ slider->SetRangeMin(0);
+ slider->SetRangeMax(255);
+ int pos = slider->GetPos();
+
+ TEST(_veVolumeControlPtr->SetMicVolume(pos) == 0, _T("SetMicVolume(volume=%d)"), pos);
+
+ *pResult = 0;
+}
+
+void CWinTestDlg::OnNMReleasedcaptureSliderOutputVolume(NMHDR *pNMHDR, LRESULT *pResult)
+{
+ CSliderCtrl* slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_OUTPUT_VOLUME);
+ slider->SetRangeMin(0);
+ slider->SetRangeMax(255);
+ int pos = slider->GetPos();
+
+ TEST(_veVolumeControlPtr->SetSpeakerVolume(pos) == 0, _T("SetSpeakerVolume(volume=%d)"), pos);
+
+ *pResult = 0;
+}
+
+void CWinTestDlg::OnNMReleasedcaptureSliderPanLeft(NMHDR *pNMHDR, LRESULT *pResult)
+{
+ CSliderCtrl* slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_PAN_LEFT);
+ slider->SetRange(0,10);
+ int pos = 10 - slider->GetPos(); // 0 <=> lower end, 10 <=> upper end
+
+ float left(0.0);
+ float right(0.0);
+ const int channel(-1);
+
+ // Only left channel will be modified
+ _veVolumeControlPtr->GetOutputVolumePan(channel, left, right);
+
+ left = (float)((float)pos/10.0f);
+
+ TEST(_veVolumeControlPtr->SetOutputVolumePan(channel, left, right) == 0,
+ _T("SetOutputVolumePan(channel=%d, left=%2.1f, right=%2.1f)"), channel, left, right);
+
+ *pResult = 0;
+}
+
+void CWinTestDlg::OnNMReleasedcaptureSliderPanRight(NMHDR *pNMHDR, LRESULT *pResult)
+{
+ CSliderCtrl* slider = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_PAN_RIGHT);
+ slider->SetRange(0,10);
+ int pos = 10 - slider->GetPos(); // 0 <=> lower end, 10 <=> upper end
+
+ float left(0.0);
+ float right(0.0);
+ const int channel(-1);
+
+ // Only right channel will be modified
+ _veVolumeControlPtr->GetOutputVolumePan(channel, left, right);
+
+ right = (float)((float)pos/10.0f);
+
+ TEST(_veVolumeControlPtr->SetOutputVolumePan(channel, left, right) == 0,
+ _T("SetOutputVolumePan(channel=%d, left=%2.1f, right=%2.1f)"), channel, left, right);
+
+ *pResult = 0;
+}
+
+void CWinTestDlg::OnBnClickedButtonVersion()
+{
+ if (_veBasePtr)
+ {
+ char version[1024];
+ if (_veBasePtr->GetVersion(version) == 0)
+ {
+ AfxMessageBox(CString(version), MB_OK);
+ }
+ else
+ {
+ AfxMessageBox(_T("FAILED!"), MB_OK);
+ }
+ }
+}
+
+void CWinTestDlg::OnBnClickedCheckRecCall()
+{
+ // Not supported
+}
+
+void CWinTestDlg::OnBnClickedCheckTypingDetection()
+{
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_TYPING_DETECTION);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ TEST(_veApmPtr->SetTypingDetectionStatus(enable) == 0, _T("SetTypingDetectionStatus(enable=%d)"), enable);
+}
+
+void CWinTestDlg::OnBnClickedCheckFEC()
+{
+ CButton* button = (CButton*)GetDlgItem(IDC_CHECK_FEC);
+ int channel = GetDlgItemInt(IDC_EDIT_1);
+ int check = button->GetCheck();
+ const bool enable = (check == BST_CHECKED);
+ TEST(_veRtpRtcpPtr->SetFECStatus(channel, enable) == 0, _T("SetFECStatus(enable=%d)"), enable);
+}
+
+// ----------------------------------------------------------------------------
+// Message Handlers
+// ----------------------------------------------------------------------------
+
+void CWinTestDlg::OnTimer(UINT_PTR nIDEvent)
+{
+ CString str;
+
+ unsigned int svol(0);
+ unsigned int mvol(0);
+
+ _timerTicks++;
+
+ // Get speaker and microphone volumes
+ _veVolumeControlPtr->GetSpeakerVolume(svol);
+ _veVolumeControlPtr->GetMicVolume(mvol);
+
+ // Update speaker volume slider
+ CSliderCtrl* sliderSpkr = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_OUTPUT_VOLUME);
+ sliderSpkr->SetRangeMin(0);
+ sliderSpkr->SetRangeMax(255);
+ sliderSpkr->SetPos(svol);
+
+ // Update microphone volume slider
+ CSliderCtrl* sliderMic = (CSliderCtrl*)GetDlgItem(IDC_SLIDER_INPUT_VOLUME);
+ sliderMic->SetRangeMin(0);
+ sliderMic->SetRangeMax(255);
+ sliderMic->SetPos(mvol);
+
+ unsigned int micLevel;
+ unsigned int combinedOutputLevel;
+
+ // Get audio levels
+ _veVolumeControlPtr->GetSpeechInputLevel(micLevel);
+ _veVolumeControlPtr->GetSpeechOutputLevel(-1, combinedOutputLevel);
+
+ // Update audio level controls
+ CProgressCtrl* progressMic = (CProgressCtrl*)GetDlgItem(IDC_PROGRESS_AUDIO_LEVEL_IN);
+ progressMic->SetRange(0,9);
+ progressMic->SetStep(1);
+ progressMic->SetPos(micLevel);
+ CProgressCtrl* progressOut = (CProgressCtrl*)GetDlgItem(IDC_PROGRESS_AUDIO_LEVEL_OUT);
+ progressOut->SetRange(0,9);
+ progressOut->SetStep(1);
+ progressOut->SetPos(combinedOutputLevel);
+
+ // Update playout delay (buffer size)
+ if (_veVideoSyncPtr)
+ {
+ int bufferMs(0);
+ _veVideoSyncPtr->GetPlayoutBufferSize(bufferMs);
+ SetDlgItemInt(IDC_EDIT_PLAYOUT_BUFFER_SIZE, bufferMs);
+ }
+
+ if (_delayEstimate1 && _veVideoSyncPtr)
+ {
+ const int channel = GetDlgItemInt(IDC_EDIT_1);
+ int delayMs(0);
+ _veVideoSyncPtr->GetDelayEstimate(channel, delayMs);
+ SetDlgItemInt(IDC_EDIT_DELAY_ESTIMATE_1, delayMs);
+ }
+
+ if (_rxVad && _veApmPtr && _rxVadObserverPtr)
+ {
+ SetDlgItemInt(IDC_EDIT_RXVAD, _rxVadObserverPtr->vad_decision);
+ }
+
+ if (_veHardwarePtr)
+ {
+ int load1, load2;
+ _veHardwarePtr->GetSystemCPULoad(load1);
+ _veHardwarePtr->GetCPULoad(load2);
+ str.Format(_T("CPU load (system/VoE): %d/%d [%%]"), load1, load2);
+ SetDlgItemText(IDC_EDIT_CPU_LOAD, (LPCTSTR)str);
+ }
+
+ BOOL ret;
+ int channel = GetDlgItemInt(IDC_EDIT_1, &ret);
+
+ if (_veCodecPtr)
+ {
+ if (ret == TRUE)
+ {
+ CodecInst codec;
+ if (_veCodecPtr->GetRecCodec(channel, codec) == 0)
+ {
+ str.Format(_T("RX codec: %s, freq=%d, pt=%d, rate=%d, size=%d"), CharToTchar(codec.plname, 32), codec.plfreq, codec.pltype, codec.rate, codec.pacsize);
+ SetDlgItemText(IDC_EDIT_RX_CODEC_1, (LPCTSTR)str);
+ }
+ }
+ }
+
+ if (_veRtpRtcpPtr)
+ {
+ if (ret == TRUE)
+ {
+ CallStatistics stats;
+ if (_veRtpRtcpPtr->GetRTCPStatistics(channel, stats) == 0)
+ {
+ str.Format(_T("RTCP | RTP: cum=%u, ext=%d, frac=%u, jitter=%u | TX=%d, RX=%d, RTT=%d"),
+ stats.cumulativeLost, stats.extendedMax, stats.fractionLost, stats.jitterSamples, stats.packetsSent, stats.packetsReceived, stats.rttMs);
+ SetDlgItemText(IDC_EDIT_RTCP_STAT_1, (LPCTSTR)str);
+ }
+ }
+ }
+
+ SetTimer(0, 1000, NULL);
+ CDialog::OnTimer(nIDEvent);
+}
+
+void CWinTestDlg::OnBnClickedButtonClearErrorCallback()
+{
+ _nErrorCallbacks = 0;
+ SetDlgItemText(IDC_EDIT_ERROR_CALLBACK, _T(""));
+}
+
+// ----------------------------------------------------------------------------
+// TEST
+// ----------------------------------------------------------------------------
+
+void CWinTestDlg::OnBnClickedButtonTest1()
+{
+ // add tests here...
+}
+
diff --git a/voice_engine/test/win_test/WinTestDlg.h b/voice_engine/test/win_test/WinTestDlg.h
new file mode 100644
index 0000000..412c220
--- /dev/null
+++ b/voice_engine/test/win_test/WinTestDlg.h
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#pragma once
+
+#if (_MSC_VER >= 1400)
+#define PRINT_GET_RESULT(...) \
+ { \
+ _strMsg.Format(__VA_ARGS__); \
+ SetDlgItemText(IDC_EDIT_GET_OUTPUT, _strMsg); \
+ } \
+
+#define TEST(x, ...) \
+ if (!(x)) \
+ { \
+ _strMsg.Format(__VA_ARGS__); \
+ SetDlgItemText(IDC_EDIT_MESSAGE, _strMsg); \
+ _strErr.Format(_T("FAILED (error=%d)"), _veBasePtr->LastError()); \
+ SetDlgItemText(IDC_EDIT_RESULT, _strErr); \
+ _failCount++; \
+ SetDlgItemInt(IDC_EDIT_N_FAILS, _failCount); \
+ SetDlgItemInt(IDC_EDIT_LAST_ERROR, _veBasePtr->LastError()); \
+ } \
+ else \
+ { \
+ _strMsg.Format(__VA_ARGS__); \
+ SetDlgItemText(IDC_EDIT_MESSAGE, _strMsg); \
+ SetDlgItemText(IDC_EDIT_RESULT, _T("OK")); \
+ } \
+
+#define TEST2(x, ...) \
+ if (!(x)) \
+ { \
+ _strMsg.Format(__VA_ARGS__); \
+ ((CWinTestDlg*)_parentDialogPtr)->UpdateTest(true, _strMsg); \
+ } \
+ else \
+ { \
+ _strMsg.Format(__VA_ARGS__); \
+ ((CWinTestDlg*)_parentDialogPtr)->UpdateTest(false, _strMsg); \
+ }
+#else
+#define TEST(x, exp) \
+ if (!(x)) \
+ { \
+ _strMsg.Format(exp); \
+ SetDlgItemText(IDC_EDIT_MESSAGE, _strMsg); \
+ _strErr.Format("FAILED (error=%d)", _veBasePtr->LastError()); \
+ SetDlgItemText(IDC_EDIT_RESULT, _strErr); \
+ _failCount++; \
+ SetDlgItemInt(IDC_EDIT_N_FAILS, _failCount); \
+ SetDlgItemInt(IDC_EDIT_LAST_ERROR, _veBasePtr->LastError()); \
+ } \
+ else \
+ { \
+ _strMsg.Format(exp); \
+ SetDlgItemText(IDC_EDIT_MESSAGE, _strMsg); \
+ SetDlgItemText(IDC_EDIT_RESULT, _T("OK")); \
+ } \
+
+#define TEST2(x, exp) \
+ if (!(x)) \
+ { \
+ _strMsg.Format(exp); \
+ ((CWinTestDlg*)_parentDialogPtr)->UpdateTest(true, _strMsg); \
+ } \
+ else \
+ { \
+ _strMsg.Format(exp); \
+ ((CWinTestDlg*)_parentDialogPtr)->UpdateTest(false, _strMsg); \
+ }
+#endif
+
+#include <string>
+
+#include "voe_base.h"
+#include "voe_rtp_rtcp.h"
+#include "voe_codec.h"
+#include "voe_dtmf.h"
+#include "voe_encryption.h"
+#include "voe_external_media.h"
+#include "voe_file.h"
+#include "voe_hardware.h"
+#include "voe_network.h"
+#include "voe_video_sync.h"
+#include "voe_volume_control.h"
+
+#include "voe_audio_processing.h"
+#include "voe_rtp_rtcp.h"
+#include "voe_errors.h"
+
+class MediaProcessImpl;
+class ConnectionObserver;
+class MyEncryption;
+class RxCallback;
+class MyTransport;
+
+using namespace webrtc;
+
+#define MAX_NUM_OF_CHANNELS 10
+
+// CWinTestDlg dialog
+class CWinTestDlg : public CDialog,
+ public VoiceEngineObserver,
+ public VoERTPObserver
+{
+// Construction
+public:
+ CWinTestDlg(CWnd* pParent = NULL); // standard constructor
+ virtual ~CWinTestDlg();
+
+// Dialog Data
+ enum { IDD = IDD_WINTEST_DIALOG };
+
+ BOOL UpdateTest(bool failed, const CString& strMsg);
+
+protected:
+ virtual void DoDataExchange(CDataExchange* pDX); // DDX/DDV support
+
+protected: // VoiceEngineObserver
+ virtual void CallbackOnError(const int channel, const int errCode);
+
+protected: // VoERTPObserver
+ virtual void OnIncomingCSRCChanged(
+ const int channel, const unsigned int CSRC, const bool added);
+ virtual void OnIncomingSSRCChanged(
+ const int channel, const unsigned int SSRC);
+
+// Implementation
+protected:
+ HICON m_hIcon;
+
+ // Generated message map functions
+ virtual BOOL OnInitDialog();
+ afx_msg void OnSysCommand(UINT nID, LPARAM lParam);
+ afx_msg void OnPaint();
+ afx_msg HCURSOR OnQueryDragIcon();
+ DECLARE_MESSAGE_MAP()
+public:
+ afx_msg void OnBnClickedButtonCreate1();
+ afx_msg void OnBnClickedButtonDelete1();
+
+private:
+ VoiceEngine* _vePtr;
+
+ VoECodec* _veCodecPtr;
+ VoEExternalMedia* _veExternalMediaPtr;
+ VoEVolumeControl* _veVolumeControlPtr;
+ VoEEncryption* _veEncryptionPtr;
+ VoEHardware* _veHardwarePtr;
+ VoEVideoSync* _veVideoSyncPtr;
+ VoENetwork* _veNetworkPtr;
+ VoEFile* _veFilePtr;
+ VoEAudioProcessing* _veApmPtr;
+ VoEBase* _veBasePtr;
+ VoERTP_RTCP* _veRtpRtcpPtr;
+
+ MyTransport* _transportPtr;
+ MediaProcessImpl* _externalMediaPtr;
+ ConnectionObserver* _connectionObserverPtr;
+ MyEncryption* _encryptionPtr;
+ RxCallback* _rxVadObserverPtr;
+
+private:
+ int _failCount;
+ CString _strMsg;
+ CString _strErr;
+ bool _externalTransport;
+ bool _externalTransportBuild;
+ int _checkPlayFileIn;
+ int _checkPlayFileIn1;
+ int _checkPlayFileIn2;
+ int _checkPlayFileOut1;
+ int _checkPlayFileOut2;
+ int _checkAGC;
+ int _checkAGC1;
+ int _checkNS;
+ int _checkNS1;
+ int _checkEC;
+ int _checkVAD1;
+ int _checkVAD2;
+ int _checkSrtpTx1;
+ int _checkSrtpTx2;
+ int _checkSrtpRx1;
+ int _checkSrtpRx2;
+ int _checkConference1;
+ int _checkConference2;
+ int _checkOnHold1;
+ int _checkOnHold2;
+ bool _delayEstimate1;
+ bool _delayEstimate2;
+ bool _rxVad;
+ int _nErrorCallbacks;
+ int _timerTicks;
+ std::string _long_audio_file_path;
+
+public:
+ afx_msg void OnBnClickedButtonCreate2();
+ afx_msg void OnBnClickedButtonDelete2();
+ afx_msg void OnCbnSelchangeComboCodec1();
+ afx_msg void OnBnClickedButtonStartListen1();
+ afx_msg void OnBnClickedButtonStopListen1();
+ afx_msg void OnBnClickedButtonStartPlayout1();
+ afx_msg void OnBnClickedButtonStopPlayout1();
+ afx_msg void OnBnClickedButtonStartSend1();
+ afx_msg void OnBnClickedButtonStopSend1();
+ afx_msg void OnCbnSelchangeComboIp2();
+ afx_msg void OnCbnSelchangeComboIp1();
+ afx_msg void OnCbnSelchangeComboCodec2();
+ afx_msg void OnBnClickedButtonStartListen2();
+ afx_msg void OnBnClickedButtonStopListen2();
+ afx_msg void OnBnClickedButtonStartPlayout2();
+ afx_msg void OnBnClickedButtonStopPlayout2();
+ afx_msg void OnBnClickedButtonStartSend2();
+ afx_msg void OnBnClickedButtonStopSend2();
+ afx_msg void OnBnClickedButtonTest11();
+ afx_msg void OnBnClickedCheckExtTrans1();
+ afx_msg void OnBnClickedCheckPlayFileIn1();
+ afx_msg void OnBnClickedCheckPlayFileOut1();
+ afx_msg void OnBnClickedCheckExtTrans2();
+ afx_msg void OnBnClickedCheckPlayFileIn2();
+ afx_msg void OnBnClickedCheckPlayFileOut2();
+ afx_msg void OnBnClickedCheckPlayFileIn();
+ afx_msg void OnBnClickedCheckPlayFileOut();
+ afx_msg void OnCbnSelchangeComboRecDevice();
+ afx_msg void OnCbnSelchangeComboPlayDevice();
+ afx_msg void OnBnClickedCheckExtMediaIn1();
+ afx_msg void OnBnClickedCheckExtMediaOut1();
+ afx_msg void OnNMReleasedcaptureSliderInputVolume(NMHDR *pNMHDR, LRESULT *pResult);
+ afx_msg void OnNMReleasedcaptureSliderOutputVolume(NMHDR *pNMHDR, LRESULT *pResult);
+ afx_msg void OnTimer(UINT_PTR nIDEvent);
+ afx_msg void OnBnClickedCheckAgc();
+ CString _strComboIp1;
+ CString _strComboIp2;
+ afx_msg void OnBnClickedCheckNs();
+ afx_msg void OnBnClickedCheckEc();
+ afx_msg void OnBnClickedCheckVad1();
+ afx_msg void OnBnClickedCheckVad2();
+ afx_msg void OnBnClickedCheckExtMediaIn2();
+ afx_msg void OnBnClickedCheckExtMediaOut2();
+ afx_msg void OnBnClickedCheckMuteIn();
+ afx_msg void OnBnClickedCheckMuteIn1();
+ afx_msg void OnBnClickedCheckMuteIn2();
+ afx_msg void OnBnClickedCheckSrtpTx1();
+ afx_msg void OnBnClickedCheckSrtpRx1();
+ afx_msg void OnBnClickedCheckSrtpTx2();
+ afx_msg void OnBnClickedCheckSrtpRx2();
+ afx_msg void OnBnClickedCheckExtEncryption1();
+ afx_msg void OnBnClickedCheckExtEncryption2();
+ afx_msg void OnBnClickedButtonDtmf1();
+ afx_msg void OnBnClickedCheckRecMic();
+ afx_msg void OnBnClickedButtonDtmf2();
+ afx_msg void OnBnClickedButtonTest1();
+ afx_msg void OnBnClickedCheckConference1();
+ afx_msg void OnBnClickedCheckConference2();
+ afx_msg void OnBnClickedCheckOnHold1();
+ afx_msg void OnBnClickedCheckOnHold2();
+ afx_msg void OnBnClickedCheckExtMediaIn();
+ afx_msg void OnBnClickedCheckExtMediaOut();
+ afx_msg void OnLbnSelchangeListCodec1();
+ afx_msg void OnNMReleasedcaptureSliderPanLeft(NMHDR *pNMHDR, LRESULT *pResult);
+ afx_msg void OnNMReleasedcaptureSliderPanRight(NMHDR *pNMHDR, LRESULT *pResult);
+ afx_msg void OnBnClickedButtonVersion();
+ afx_msg void OnBnClickedCheckDelayEstimate1();
+ afx_msg void OnBnClickedCheckRxvad();
+ afx_msg void OnBnClickedCheckAgc1();
+ afx_msg void OnBnClickedCheckNs1();
+ afx_msg void OnBnClickedCheckRecCall();
+ afx_msg void OnBnClickedCheckTypingDetection();
+ afx_msg void OnBnClickedCheckFEC();
+ afx_msg void OnBnClickedButtonClearErrorCallback();
+ afx_msg void OnBnClickedCheckBwe1();
+};
+#pragma once
diff --git a/voice_engine/test/win_test/res/WinTest.ico b/voice_engine/test/win_test/res/WinTest.ico
new file mode 100644
index 0000000..8a84ca3
--- /dev/null
+++ b/voice_engine/test/win_test/res/WinTest.ico
Binary files differ
diff --git a/voice_engine/test/win_test/res/WinTest.rc2 b/voice_engine/test/win_test/res/WinTest.rc2
new file mode 100644
index 0000000..044bf7e
--- /dev/null
+++ b/voice_engine/test/win_test/res/WinTest.rc2
@@ -0,0 +1,13 @@
+//
+// WinTest.RC2 - resources Microsoft Visual C++ does not edit directly
+//
+
+#ifdef APSTUDIO_INVOKED
+#error this file is not editable by Microsoft Visual C++
+#endif //APSTUDIO_INVOKED
+
+
+/////////////////////////////////////////////////////////////////////////////
+// Add manually edited resources here...
+
+/////////////////////////////////////////////////////////////////////////////
diff --git a/voice_engine/test/win_test/stdafx.cc b/voice_engine/test/win_test/stdafx.cc
new file mode 100644
index 0000000..6cdb906
--- /dev/null
+++ b/voice_engine/test/win_test/stdafx.cc
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// stdafx.cpp : source file that includes just the standard includes
+// WinTest.pch will be the pre-compiled header
+// stdafx.obj will contain the pre-compiled type information
+
+#include "stdafx.h"
+
+
diff --git a/voice_engine/test/win_test/stdafx.h b/voice_engine/test/win_test/stdafx.h
new file mode 100644
index 0000000..b4d875c
--- /dev/null
+++ b/voice_engine/test/win_test/stdafx.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+// stdafx.h : include file for standard system include files,
+// or project specific include files that are used frequently,
+// but are changed infrequently
+
+#pragma once
+
+#ifndef _SECURE_ATL
+#define _SECURE_ATL 1
+#endif
+
+#ifndef VC_EXTRALEAN
+#define VC_EXTRALEAN // Exclude rarely-used stuff from Windows headers
+#endif
+
+// Modify the following defines if you have to target a platform prior to the ones specified below.
+// Refer to MSDN for the latest info on corresponding values for different platforms.
+#ifndef WINVER // Allow use of features specific to Windows XP or later.
+#define WINVER 0x0501 // Change this to the appropriate value to target other versions of Windows.
+#endif
+
+#ifndef _WIN32_WINNT // Allow use of features specific to Windows XP or later.
+#define _WIN32_WINNT 0x0501 // Change this to the appropriate value to target other versions of Windows.
+#endif
+
+#ifndef _WIN32_WINDOWS // Allow use of features specific to Windows 98 or later.
+#define _WIN32_WINDOWS 0x0410 // Change this to the appropriate value to target Windows Me or later.
+#endif
+
+#ifndef _WIN32_IE // Allow use of features specific to IE 6.0 or later.
+#define _WIN32_IE 0x0600 // Change this to the appropriate value to target other versions of IE.
+#endif
+
+#define _ATL_CSTRING_EXPLICIT_CONSTRUCTORS // some CString constructors will be explicit
+
+// turns off MFC's hiding of some common and often safely ignored warning messages
+#define _AFX_ALL_WARNINGS
+
+#include <afxwin.h> // MFC core and standard components
+#include <afxext.h> // MFC extensions
+
+
+
+
+
+#ifndef _AFX_NO_OLE_SUPPORT
+#include <afxdtctl.h> // MFC support for Internet Explorer 4 Common Controls
+#endif
+#ifndef _AFX_NO_AFXCMN_SUPPORT
+#include <afxcmn.h> // MFC support for Windows Common Controls
+#endif // _AFX_NO_AFXCMN_SUPPORT
+
+
+
+
+
+
+
+
+
+#ifdef _UNICODE
+#if defined _M_IX86
+#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='x86' publicKeyToken='6595b64144ccf1df' language='*'\"")
+#elif defined _M_IA64
+#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='ia64' publicKeyToken='6595b64144ccf1df' language='*'\"")
+#elif defined _M_X64
+#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='amd64' publicKeyToken='6595b64144ccf1df' language='*'\"")
+#else
+#pragma comment(linker,"/manifestdependency:\"type='win32' name='Microsoft.Windows.Common-Controls' version='6.0.0.0' processorArchitecture='*' publicKeyToken='6595b64144ccf1df' language='*'\"")
+#endif
+#endif
+
+
diff --git a/voice_engine/transmit_mixer.cc b/voice_engine/transmit_mixer.cc
new file mode 100644
index 0000000..452af1c
--- /dev/null
+++ b/voice_engine/transmit_mixer.cc
@@ -0,0 +1,1507 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "transmit_mixer.h"
+
+#include "audio_frame_operations.h"
+#include "channel.h"
+#include "channel_manager.h"
+#include "critical_section_wrapper.h"
+#include "event_wrapper.h"
+#include "statistics.h"
+#include "trace.h"
+#include "utility.h"
+#include "voe_base_impl.h"
+#include "voe_external_media.h"
+
+#define WEBRTC_ABS(a) (((a) < 0) ? -(a) : (a))
+
+namespace webrtc {
+
+namespace voe {
+
+// Used for downmixing before resampling.
+// TODO(andrew): audio_device should advertise the maximum sample rate it can
+// provide.
+static const int kMaxMonoDeviceDataSizeSamples = 960; // 10 ms, 96 kHz, mono.
+
+void
+TransmitMixer::OnPeriodicProcess()
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::OnPeriodicProcess()");
+
+#if defined(WEBRTC_VOICE_ENGINE_TYPING_DETECTION)
+ if (_typingNoiseWarning > 0)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (_voiceEngineObserverPtr)
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::OnPeriodicProcess() => "
+ "CallbackOnError(VE_TYPING_NOISE_WARNING)");
+ _voiceEngineObserverPtr->CallbackOnError(-1,
+ VE_TYPING_NOISE_WARNING);
+ }
+ _typingNoiseWarning = 0;
+ }
+#endif
+
+ if (_saturationWarning > 0)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (_voiceEngineObserverPtr)
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::OnPeriodicProcess() =>"
+ " CallbackOnError(VE_SATURATION_WARNING)");
+ _voiceEngineObserverPtr->CallbackOnError(-1, VE_SATURATION_WARNING);
+ }
+ _saturationWarning = 0;
+ }
+
+ if (_noiseWarning > 0)
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (_voiceEngineObserverPtr)
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::OnPeriodicProcess() =>"
+ "CallbackOnError(VE_NOISE_WARNING)");
+ _voiceEngineObserverPtr->CallbackOnError(-1, VE_NOISE_WARNING);
+ }
+ _noiseWarning = 0;
+ }
+}
+
+
+void TransmitMixer::PlayNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::PlayNotification(id=%d, durationMs=%d)",
+ id, durationMs);
+
+ // Not implement yet
+}
+
+void TransmitMixer::RecordNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
+ "TransmitMixer::RecordNotification(id=%d, durationMs=%d)",
+ id, durationMs);
+
+ // Not implement yet
+}
+
+void TransmitMixer::PlayFileEnded(const WebRtc_Word32 id)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::PlayFileEnded(id=%d)", id);
+
+ assert(id == _filePlayerId);
+
+ CriticalSectionScoped cs(&_critSect);
+
+ _filePlaying = false;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::PlayFileEnded() =>"
+ "file player module is shutdown");
+}
+
+void
+TransmitMixer::RecordFileEnded(const WebRtc_Word32 id)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::RecordFileEnded(id=%d)", id);
+
+ if (id == _fileRecorderId)
+ {
+ CriticalSectionScoped cs(&_critSect);
+ _fileRecording = false;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::RecordFileEnded() => fileRecorder module"
+ "is shutdown");
+ } else if (id == _fileCallRecorderId)
+ {
+ CriticalSectionScoped cs(&_critSect);
+ _fileCallRecording = false;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::RecordFileEnded() => fileCallRecorder"
+ "module is shutdown");
+ }
+}
+
+WebRtc_Word32
+TransmitMixer::Create(TransmitMixer*& mixer, const WebRtc_UWord32 instanceId)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
+ "TransmitMixer::Create(instanceId=%d)", instanceId);
+ mixer = new TransmitMixer(instanceId);
+ if (mixer == NULL)
+ {
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
+ "TransmitMixer::Create() unable to allocate memory"
+ "for mixer");
+ return -1;
+ }
+ return 0;
+}
+
+void
+TransmitMixer::Destroy(TransmitMixer*& mixer)
+{
+ if (mixer)
+ {
+ delete mixer;
+ mixer = NULL;
+ }
+}
+
+TransmitMixer::TransmitMixer(const WebRtc_UWord32 instanceId) :
+ _engineStatisticsPtr(NULL),
+ _channelManagerPtr(NULL),
+ _audioProcessingModulePtr(NULL),
+ _voiceEngineObserverPtr(NULL),
+ _processThreadPtr(NULL),
+ _filePlayerPtr(NULL),
+ _fileRecorderPtr(NULL),
+ _fileCallRecorderPtr(NULL),
+ // Avoid conflict with other channels by adding 1024 - 1026,
+ // won't use as much as 1024 channels.
+ _filePlayerId(instanceId + 1024),
+ _fileRecorderId(instanceId + 1025),
+ _fileCallRecorderId(instanceId + 1026),
+ _filePlaying(false),
+ _fileRecording(false),
+ _fileCallRecording(false),
+ _audioLevel(),
+ _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ _timeActive(0),
+ _timeSinceLastTyping(0),
+ _penaltyCounter(0),
+ _typingNoiseWarning(0),
+ _timeWindow(10), // 10ms slots accepted to count as a hit
+ _costPerTyping(100), // Penalty added for a typing + activity coincide
+ _reportingThreshold(300), // Threshold for _penaltyCounter
+ _penaltyDecay(1), // how much we reduce _penaltyCounter every 10 ms.
+ _typeEventDelay(2), // how "old" event we check for
+#endif
+ _saturationWarning(0),
+ _noiseWarning(0),
+ _instanceId(instanceId),
+ _mixFileWithMicrophone(false),
+ _captureLevel(0),
+ external_postproc_ptr_(NULL),
+ external_preproc_ptr_(NULL),
+ _mute(false),
+ _remainingMuteMicTimeMs(0),
+ _mixingFrequency(0),
+ stereo_codec_(false),
+ swap_stereo_channels_(false)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::TransmitMixer() - ctor");
+}
+
+TransmitMixer::~TransmitMixer()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::~TransmitMixer() - dtor");
+ _monitorModule.DeRegisterObserver();
+ if (_processThreadPtr)
+ {
+ _processThreadPtr->DeRegisterModule(&_monitorModule);
+ }
+ DeRegisterExternalMediaProcessing(kRecordingAllChannelsMixed);
+ DeRegisterExternalMediaProcessing(kRecordingPreprocessing);
+ {
+ CriticalSectionScoped cs(&_critSect);
+ if (_fileRecorderPtr)
+ {
+ _fileRecorderPtr->RegisterModuleFileCallback(NULL);
+ _fileRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+ _fileRecorderPtr = NULL;
+ }
+ if (_fileCallRecorderPtr)
+ {
+ _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
+ _fileCallRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+ _fileCallRecorderPtr = NULL;
+ }
+ if (_filePlayerPtr)
+ {
+ _filePlayerPtr->RegisterModuleFileCallback(NULL);
+ _filePlayerPtr->StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+ _filePlayerPtr = NULL;
+ }
+ }
+ delete &_critSect;
+ delete &_callbackCritSect;
+}
+
+WebRtc_Word32
+TransmitMixer::SetEngineInformation(ProcessThread& processThread,
+ Statistics& engineStatistics,
+ ChannelManager& channelManager)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::SetEngineInformation()");
+
+ _processThreadPtr = &processThread;
+ _engineStatisticsPtr = &engineStatistics;
+ _channelManagerPtr = &channelManager;
+
+ if (_processThreadPtr->RegisterModule(&_monitorModule) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::SetEngineInformation() failed to"
+ "register the monitor module");
+ } else
+ {
+ _monitorModule.RegisterObserver(*this);
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::RegisterVoiceEngineObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+
+ if (_voiceEngineObserverPtr)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "RegisterVoiceEngineObserver() observer already enabled");
+ return -1;
+ }
+ _voiceEngineObserverPtr = &observer;
+ return 0;
+}
+
+WebRtc_Word32
+TransmitMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::SetAudioProcessingModule("
+ "audioProcessingModule=0x%x)",
+ audioProcessingModule);
+ _audioProcessingModulePtr = audioProcessingModule;
+ return 0;
+}
+
+void TransmitMixer::CheckForSendCodecChanges() {
+ ScopedChannel sc(*_channelManagerPtr);
+ void* iterator = NULL;
+ Channel* channel = sc.GetFirstChannel(iterator);
+ _mixingFrequency = 8000;
+ stereo_codec_ = false;
+ while (channel != NULL) {
+ if (channel->Sending()) {
+ CodecInst codec;
+ channel->GetSendCodec(codec);
+
+ if (codec.channels == 2)
+ stereo_codec_ = true;
+
+ // TODO(tlegrand): Remove once we have full 48 kHz support in
+ // Audio Coding Module.
+ if (codec.plfreq > 32000) {
+ _mixingFrequency = 32000;
+ } else if (codec.plfreq > _mixingFrequency) {
+ _mixingFrequency = codec.plfreq;
+ }
+ }
+ channel = sc.GetNextChannel(iterator);
+ }
+}
+
+WebRtc_Word32
+TransmitMixer::PrepareDemux(const void* audioSamples,
+ const WebRtc_UWord32 nSamples,
+ const WebRtc_UWord8 nChannels,
+ const WebRtc_UWord32 samplesPerSec,
+ const WebRtc_UWord16 totalDelayMS,
+ const WebRtc_Word32 clockDrift,
+ const WebRtc_UWord16 currentMicLevel)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::PrepareDemux(nSamples=%u, nChannels=%u,"
+ "samplesPerSec=%u, totalDelayMS=%u, clockDrift=%u,"
+ "currentMicLevel=%u)", nSamples, nChannels, samplesPerSec,
+ totalDelayMS, clockDrift, currentMicLevel);
+
+ CheckForSendCodecChanges();
+
+ // --- Resample input audio and create/store the initial audio frame
+
+ if (GenerateAudioFrame(static_cast<const WebRtc_Word16*>(audioSamples),
+ nSamples,
+ nChannels,
+ samplesPerSec) == -1)
+ {
+ return -1;
+ }
+
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (external_preproc_ptr_) {
+ external_preproc_ptr_->Process(-1, kRecordingPreprocessing,
+ _audioFrame.data_,
+ _audioFrame.samples_per_channel_,
+ _audioFrame.sample_rate_hz_,
+ _audioFrame.num_channels_ == 2);
+ }
+ }
+
+ // --- Near-end Voice Quality Enhancement (APM) processing
+
+ APMProcessStream(totalDelayMS, clockDrift, currentMicLevel);
+
+ if (swap_stereo_channels_ && stereo_codec_)
+ // Only bother swapping if we're using a stereo codec.
+ AudioFrameOperations::SwapStereoChannels(&_audioFrame);
+
+ // --- Annoying typing detection (utilizes the APM/VAD decision)
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ TypingDetection();
+#endif
+
+ // --- Mute during DTMF tone if direct feedback is enabled
+
+ if (_remainingMuteMicTimeMs > 0)
+ {
+ AudioFrameOperations::Mute(_audioFrame);
+ _remainingMuteMicTimeMs -= 10;
+ if (_remainingMuteMicTimeMs < 0)
+ {
+ _remainingMuteMicTimeMs = 0;
+ }
+ }
+
+ // --- Mute signal
+
+ if (_mute)
+ {
+ AudioFrameOperations::Mute(_audioFrame);
+ }
+
+ // --- Measure audio level of speech after APM processing
+
+ _audioLevel.ComputeLevel(_audioFrame);
+
+ // --- Mix with file (does not affect the mixing frequency)
+
+ if (_filePlaying)
+ {
+ MixOrReplaceAudioWithFile(_mixingFrequency);
+ }
+
+ // --- Record to file
+
+ if (_fileRecording)
+ {
+ RecordAudioToFile(_mixingFrequency);
+ }
+
+ {
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (external_postproc_ptr_) {
+ external_postproc_ptr_->Process(-1, kRecordingAllChannelsMixed,
+ _audioFrame.data_,
+ _audioFrame.samples_per_channel_,
+ _audioFrame.sample_rate_hz_,
+ _audioFrame.num_channels_ == 2);
+ }
+ }
+
+ return 0;
+}
+
+WebRtc_Word32
+TransmitMixer::DemuxAndMix()
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::DemuxAndMix()");
+
+ ScopedChannel sc(*_channelManagerPtr);
+ void* iterator(NULL);
+ Channel* channelPtr = sc.GetFirstChannel(iterator);
+ while (channelPtr != NULL)
+ {
+ if (channelPtr->InputIsOnHold())
+ {
+ channelPtr->UpdateLocalTimeStamp();
+ } else if (channelPtr->Sending())
+ {
+ // load temporary audioframe with current (mixed) microphone signal
+ AudioFrame tmpAudioFrame = _audioFrame;
+
+ channelPtr->Demultiplex(tmpAudioFrame);
+ channelPtr->PrepareEncodeAndSend(_mixingFrequency);
+ }
+ channelPtr = sc.GetNextChannel(iterator);
+ }
+ return 0;
+}
+
+WebRtc_Word32
+TransmitMixer::EncodeAndSend()
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::EncodeAndSend()");
+
+ ScopedChannel sc(*_channelManagerPtr);
+ void* iterator(NULL);
+ Channel* channelPtr = sc.GetFirstChannel(iterator);
+ while (channelPtr != NULL)
+ {
+ if (channelPtr->Sending() && !channelPtr->InputIsOnHold())
+ {
+ channelPtr->EncodeAndSend();
+ }
+ channelPtr = sc.GetNextChannel(iterator);
+ }
+ return 0;
+}
+
+WebRtc_UWord32 TransmitMixer::CaptureLevel() const
+{
+ return _captureLevel;
+}
+
+void
+TransmitMixer::UpdateMuteMicrophoneTime(const WebRtc_UWord32 lengthMs)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::UpdateMuteMicrophoneTime(lengthMs=%d)",
+ lengthMs);
+ _remainingMuteMicTimeMs = lengthMs;
+}
+
+WebRtc_Word32
+TransmitMixer::StopSend()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::StopSend()");
+ _audioLevel.Clear();
+ return 0;
+}
+
+int TransmitMixer::StartPlayingFileAsMicrophone(const char* fileName,
+ const bool loop,
+ const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::StartPlayingFileAsMicrophone("
+ "fileNameUTF8[]=%s,loop=%d, format=%d, volumeScaling=%5.3f,"
+ " startPosition=%d, stopPosition=%d)", fileName, loop,
+ format, volumeScaling, startPosition, stopPosition);
+
+ if (_filePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ALREADY_PLAYING, kTraceWarning,
+ "StartPlayingFileAsMicrophone() is already playing");
+ return 0;
+ }
+
+ CriticalSectionScoped cs(&_critSect);
+
+ // Destroy the old instance
+ if (_filePlayerPtr)
+ {
+ _filePlayerPtr->RegisterModuleFileCallback(NULL);
+ FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+ _filePlayerPtr = NULL;
+ }
+
+ // Dynamically create the instance
+ _filePlayerPtr
+ = FilePlayer::CreateFilePlayer(_filePlayerId,
+ (const FileFormats) format);
+
+ if (_filePlayerPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
+ return -1;
+ }
+
+ const WebRtc_UWord32 notificationTime(0);
+
+ if (_filePlayerPtr->StartPlayingFile(
+ fileName,
+ loop,
+ startPosition,
+ volumeScaling,
+ notificationTime,
+ stopPosition,
+ (const CodecInst*) codecInst) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartPlayingFile() failed to start file playout");
+ _filePlayerPtr->StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+ _filePlayerPtr = NULL;
+ return -1;
+ }
+
+ _filePlayerPtr->RegisterModuleFileCallback(this);
+ _filePlaying = true;
+
+ return 0;
+}
+
+int TransmitMixer::StartPlayingFileAsMicrophone(InStream* stream,
+ const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "TransmitMixer::StartPlayingFileAsMicrophone(format=%d,"
+ " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
+ format, volumeScaling, startPosition, stopPosition);
+
+ if (stream == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartPlayingFileAsMicrophone() NULL as input stream");
+ return -1;
+ }
+
+ if (_filePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_ALREADY_PLAYING, kTraceWarning,
+ "StartPlayingFileAsMicrophone() is already playing");
+ return 0;
+ }
+
+ CriticalSectionScoped cs(&_critSect);
+
+ // Destroy the old instance
+ if (_filePlayerPtr)
+ {
+ _filePlayerPtr->RegisterModuleFileCallback(NULL);
+ FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+ _filePlayerPtr = NULL;
+ }
+
+ // Dynamically create the instance
+ _filePlayerPtr
+ = FilePlayer::CreateFilePlayer(_filePlayerId,
+ (const FileFormats) format);
+
+ if (_filePlayerPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceWarning,
+ "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
+ return -1;
+ }
+
+ const WebRtc_UWord32 notificationTime(0);
+
+ if (_filePlayerPtr->StartPlayingFile(
+ (InStream&) *stream,
+ startPosition,
+ volumeScaling,
+ notificationTime,
+ stopPosition,
+ (const CodecInst*) codecInst) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartPlayingFile() failed to start file playout");
+ _filePlayerPtr->StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+ _filePlayerPtr = NULL;
+ return -1;
+ }
+ _filePlayerPtr->RegisterModuleFileCallback(this);
+ _filePlaying = true;
+
+ return 0;
+}
+
+int TransmitMixer::StopPlayingFileAsMicrophone()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
+ "TransmitMixer::StopPlayingFileAsMicrophone()");
+
+ if (!_filePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceWarning,
+ "StopPlayingFileAsMicrophone() isnot playing");
+ return 0;
+ }
+
+ CriticalSectionScoped cs(&_critSect);
+
+ if (_filePlayerPtr->StopPlayingFile() != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_CANNOT_STOP_PLAYOUT, kTraceError,
+ "StopPlayingFile() couldnot stop playing file");
+ return -1;
+ }
+
+ _filePlayerPtr->RegisterModuleFileCallback(NULL);
+ FilePlayer::DestroyFilePlayer(_filePlayerPtr);
+ _filePlayerPtr = NULL;
+ _filePlaying = false;
+
+ return 0;
+}
+
+int TransmitMixer::IsPlayingFileAsMicrophone() const
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::IsPlayingFileAsMicrophone()");
+ return _filePlaying;
+}
+
+int TransmitMixer::ScaleFileAsMicrophonePlayout(const float scale)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::ScaleFileAsMicrophonePlayout(scale=%5.3f)",
+ scale);
+
+ CriticalSectionScoped cs(&_critSect);
+
+ if (!_filePlaying)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_OPERATION, kTraceError,
+ "ScaleFileAsMicrophonePlayout() isnot playing file");
+ return -1;
+ }
+
+ if ((_filePlayerPtr == NULL) ||
+ (_filePlayerPtr->SetAudioScaling(scale) != 0))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_ARGUMENT, kTraceError,
+ "SetAudioScaling() failed to scale playout");
+ return -1;
+ }
+
+ return 0;
+}
+
+int TransmitMixer::StartRecordingMicrophone(const char* fileName,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::StartRecordingMicrophone(fileName=%s)",
+ fileName);
+
+ if (_fileRecording)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "StartRecordingMicrophone() is already recording");
+ return 0;
+ }
+
+ FileFormats format;
+ const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+ CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
+
+ if (codecInst != NULL &&
+ (codecInst->channels < 0 || codecInst->channels > 2))
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_ARGUMENT, kTraceError,
+ "StartRecordingMicrophone() invalid compression");
+ return (-1);
+ }
+ if (codecInst == NULL)
+ {
+ format = kFileFormatPcm16kHzFile;
+ codecInst = &dummyCodec;
+ } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+ {
+ format = kFileFormatWavFile;
+ } else
+ {
+ format = kFileFormatCompressedFile;
+ }
+
+ CriticalSectionScoped cs(&_critSect);
+
+ // Destroy the old instance
+ if (_fileRecorderPtr)
+ {
+ _fileRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+ _fileRecorderPtr = NULL;
+ }
+
+ _fileRecorderPtr =
+ FileRecorder::CreateFileRecorder(_fileRecorderId,
+ (const FileFormats) format);
+ if (_fileRecorderPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartRecordingMicrophone() fileRecorder format isnot correct");
+ return -1;
+ }
+
+ if (_fileRecorderPtr->StartRecordingAudioFile(
+ fileName,
+ (const CodecInst&) *codecInst,
+ notificationTime) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartRecordingAudioFile() failed to start file recording");
+ _fileRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+ _fileRecorderPtr = NULL;
+ return -1;
+ }
+ _fileRecorderPtr->RegisterModuleFileCallback(this);
+ _fileRecording = true;
+
+ return 0;
+}
+
+int TransmitMixer::StartRecordingMicrophone(OutStream* stream,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::StartRecordingMicrophone()");
+
+ if (_fileRecording)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "StartRecordingMicrophone() is already recording");
+ return 0;
+ }
+
+ FileFormats format;
+ const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+ CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
+
+ if (codecInst != NULL && codecInst->channels != 1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_ARGUMENT, kTraceError,
+ "StartRecordingMicrophone() invalid compression");
+ return (-1);
+ }
+ if (codecInst == NULL)
+ {
+ format = kFileFormatPcm16kHzFile;
+ codecInst = &dummyCodec;
+ } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+ {
+ format = kFileFormatWavFile;
+ } else
+ {
+ format = kFileFormatCompressedFile;
+ }
+
+ CriticalSectionScoped cs(&_critSect);
+
+ // Destroy the old instance
+ if (_fileRecorderPtr)
+ {
+ _fileRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+ _fileRecorderPtr = NULL;
+ }
+
+ _fileRecorderPtr =
+ FileRecorder::CreateFileRecorder(_fileRecorderId,
+ (const FileFormats) format);
+ if (_fileRecorderPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartRecordingMicrophone() fileRecorder format isnot correct");
+ return -1;
+ }
+
+ if (_fileRecorderPtr->StartRecordingAudioFile(*stream,
+ *codecInst,
+ notificationTime) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+ "StartRecordingAudioFile() failed to start file recording");
+ _fileRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+ _fileRecorderPtr = NULL;
+ return -1;
+ }
+
+ _fileRecorderPtr->RegisterModuleFileCallback(this);
+ _fileRecording = true;
+
+ return 0;
+}
+
+
+int TransmitMixer::StopRecordingMicrophone()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::StopRecordingMicrophone()");
+
+ if (!_fileRecording)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "StopRecordingMicrophone() isnot recording");
+ return 0;
+ }
+
+ CriticalSectionScoped cs(&_critSect);
+
+ if (_fileRecorderPtr->StopRecording() != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_STOP_RECORDING_FAILED, kTraceError,
+ "StopRecording(), could not stop recording");
+ return -1;
+ }
+ _fileRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_fileRecorderPtr);
+ _fileRecorderPtr = NULL;
+ _fileRecording = false;
+
+ return 0;
+}
+
+int TransmitMixer::StartRecordingCall(const char* fileName,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::StartRecordingCall(fileName=%s)", fileName);
+
+ if (_fileCallRecording)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "StartRecordingCall() is already recording");
+ return 0;
+ }
+
+ FileFormats format;
+ const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+ CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
+
+ if (codecInst != NULL && codecInst->channels != 1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_ARGUMENT, kTraceError,
+ "StartRecordingCall() invalid compression");
+ return (-1);
+ }
+ if (codecInst == NULL)
+ {
+ format = kFileFormatPcm16kHzFile;
+ codecInst = &dummyCodec;
+ } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+ {
+ format = kFileFormatWavFile;
+ } else
+ {
+ format = kFileFormatCompressedFile;
+ }
+
+ CriticalSectionScoped cs(&_critSect);
+
+ // Destroy the old instance
+ if (_fileCallRecorderPtr)
+ {
+ _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+ _fileCallRecorderPtr = NULL;
+ }
+
+ _fileCallRecorderPtr
+ = FileRecorder::CreateFileRecorder(_fileCallRecorderId,
+ (const FileFormats) format);
+ if (_fileCallRecorderPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartRecordingCall() fileRecorder format isnot correct");
+ return -1;
+ }
+
+ if (_fileCallRecorderPtr->StartRecordingAudioFile(
+ fileName,
+ (const CodecInst&) *codecInst,
+ notificationTime) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_FILE, kTraceError,
+ "StartRecordingAudioFile() failed to start file recording");
+ _fileCallRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+ _fileCallRecorderPtr = NULL;
+ return -1;
+ }
+ _fileCallRecorderPtr->RegisterModuleFileCallback(this);
+ _fileCallRecording = true;
+
+ return 0;
+}
+
+int TransmitMixer::StartRecordingCall(OutStream* stream,
+ const CodecInst* codecInst)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::StartRecordingCall()");
+
+ if (_fileCallRecording)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "StartRecordingCall() is already recording");
+ return 0;
+ }
+
+ FileFormats format;
+ const WebRtc_UWord32 notificationTime(0); // Not supported in VoE
+ CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
+
+ if (codecInst != NULL && codecInst->channels != 1)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_BAD_ARGUMENT, kTraceError,
+ "StartRecordingCall() invalid compression");
+ return (-1);
+ }
+ if (codecInst == NULL)
+ {
+ format = kFileFormatPcm16kHzFile;
+ codecInst = &dummyCodec;
+ } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
+ (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
+ {
+ format = kFileFormatWavFile;
+ } else
+ {
+ format = kFileFormatCompressedFile;
+ }
+
+ CriticalSectionScoped cs(&_critSect);
+
+ // Destroy the old instance
+ if (_fileCallRecorderPtr)
+ {
+ _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+ _fileCallRecorderPtr = NULL;
+ }
+
+ _fileCallRecorderPtr =
+ FileRecorder::CreateFileRecorder(_fileCallRecorderId,
+ (const FileFormats) format);
+ if (_fileCallRecorderPtr == NULL)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_INVALID_ARGUMENT, kTraceError,
+ "StartRecordingCall() fileRecorder format isnot correct");
+ return -1;
+ }
+
+ if (_fileCallRecorderPtr->StartRecordingAudioFile(*stream,
+ *codecInst,
+ notificationTime) != 0)
+ {
+ _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
+ "StartRecordingAudioFile() failed to start file recording");
+ _fileCallRecorderPtr->StopRecording();
+ FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+ _fileCallRecorderPtr = NULL;
+ return -1;
+ }
+
+ _fileCallRecorderPtr->RegisterModuleFileCallback(this);
+ _fileCallRecording = true;
+
+ return 0;
+}
+
+int TransmitMixer::StopRecordingCall()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::StopRecordingCall()");
+
+ if (!_fileCallRecording)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+ "StopRecordingCall() file isnot recording");
+ return -1;
+ }
+
+ CriticalSectionScoped cs(&_critSect);
+
+ if (_fileCallRecorderPtr->StopRecording() != 0)
+ {
+ _engineStatisticsPtr->SetLastError(
+ VE_STOP_RECORDING_FAILED, kTraceError,
+ "StopRecording(), could not stop recording");
+ return -1;
+ }
+
+ _fileCallRecorderPtr->RegisterModuleFileCallback(NULL);
+ FileRecorder::DestroyFileRecorder(_fileCallRecorderPtr);
+ _fileCallRecorderPtr = NULL;
+ _fileCallRecording = false;
+
+ return 0;
+}
+
+void
+TransmitMixer::SetMixWithMicStatus(bool mix)
+{
+ _mixFileWithMicrophone = mix;
+}
+
+int TransmitMixer::RegisterExternalMediaProcessing(
+ VoEMediaProcess* object,
+ ProcessingTypes type) {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::RegisterExternalMediaProcessing()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (!object) {
+ return -1;
+ }
+
+ // Store the callback object according to the processing type.
+ if (type == kRecordingAllChannelsMixed) {
+ external_postproc_ptr_ = object;
+ } else if (type == kRecordingPreprocessing) {
+ external_preproc_ptr_ = object;
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+int TransmitMixer::DeRegisterExternalMediaProcessing(ProcessingTypes type) {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::DeRegisterExternalMediaProcessing()");
+
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (type == kRecordingAllChannelsMixed) {
+ external_postproc_ptr_ = NULL;
+ } else if (type == kRecordingPreprocessing) {
+ external_preproc_ptr_ = NULL;
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+int
+TransmitMixer::SetMute(bool enable)
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::SetMute(enable=%d)", enable);
+ _mute = enable;
+ return 0;
+}
+
+bool
+TransmitMixer::Mute() const
+{
+ return _mute;
+}
+
+WebRtc_Word8 TransmitMixer::AudioLevel() const
+{
+ // Speech + file level [0,9]
+ return _audioLevel.Level();
+}
+
+WebRtc_Word16 TransmitMixer::AudioLevelFullRange() const
+{
+ // Speech + file level [0,32767]
+ return _audioLevel.LevelFullRange();
+}
+
+bool TransmitMixer::IsRecordingCall()
+{
+ return _fileCallRecording;
+}
+
+bool TransmitMixer::IsRecordingMic()
+{
+
+ return _fileRecording;
+}
+
+// TODO(andrew): use RemixAndResample for this.
+int TransmitMixer::GenerateAudioFrame(const int16_t audio[],
+ int samples_per_channel,
+ int num_channels,
+ int sample_rate_hz)
+{
+ const int16_t* audio_ptr = audio;
+ int16_t mono_audio[kMaxMonoDeviceDataSizeSamples];
+ assert(samples_per_channel <= kMaxMonoDeviceDataSizeSamples);
+ // If no stereo codecs are in use, we downmix a stereo stream from the
+ // device early in the chain, before resampling.
+ if (num_channels == 2 && !stereo_codec_) {
+ AudioFrameOperations::StereoToMono(audio, samples_per_channel,
+ mono_audio);
+ audio_ptr = mono_audio;
+ num_channels = 1;
+ }
+
+ ResamplerType resampler_type = (num_channels == 1) ?
+ kResamplerSynchronous : kResamplerSynchronousStereo;
+
+ if (_audioResampler.ResetIfNeeded(sample_rate_hz,
+ _mixingFrequency,
+ resampler_type) != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::GenerateAudioFrame() unable to resample");
+ return -1;
+ }
+ if (_audioResampler.Push(audio_ptr,
+ samples_per_channel * num_channels,
+ _audioFrame.data_,
+ AudioFrame::kMaxDataSizeSamples,
+ _audioFrame.samples_per_channel_) == -1)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::GenerateAudioFrame() resampling failed");
+ return -1;
+ }
+
+ _audioFrame.samples_per_channel_ /= num_channels;
+ _audioFrame.id_ = _instanceId;
+ _audioFrame.timestamp_ = -1;
+ _audioFrame.sample_rate_hz_ = _mixingFrequency;
+ _audioFrame.speech_type_ = AudioFrame::kNormalSpeech;
+ _audioFrame.vad_activity_ = AudioFrame::kVadUnknown;
+ _audioFrame.num_channels_ = num_channels;
+
+ return 0;
+}
+
+WebRtc_Word32 TransmitMixer::RecordAudioToFile(
+ const WebRtc_UWord32 mixingFrequency)
+{
+ CriticalSectionScoped cs(&_critSect);
+ if (_fileRecorderPtr == NULL)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::RecordAudioToFile() filerecorder doesnot"
+ "exist");
+ return -1;
+ }
+
+ if (_fileRecorderPtr->RecordAudioToFile(_audioFrame) != 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::RecordAudioToFile() file recording"
+ "failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+WebRtc_Word32 TransmitMixer::MixOrReplaceAudioWithFile(
+ const int mixingFrequency)
+{
+ scoped_array<WebRtc_Word16> fileBuffer(new WebRtc_Word16[640]);
+
+ int fileSamples(0);
+ {
+ CriticalSectionScoped cs(&_critSect);
+ if (_filePlayerPtr == NULL)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, -1),
+ "TransmitMixer::MixOrReplaceAudioWithFile()"
+ "fileplayer doesnot exist");
+ return -1;
+ }
+
+ if (_filePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
+ fileSamples,
+ mixingFrequency) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::MixOrReplaceAudioWithFile() file"
+ " mixing failed");
+ return -1;
+ }
+ }
+
+ assert(_audioFrame.samples_per_channel_ == fileSamples);
+
+ if (_mixFileWithMicrophone)
+ {
+ // Currently file stream is always mono.
+ // TODO(xians): Change the code when FilePlayer supports real stereo.
+ Utility::MixWithSat(_audioFrame.data_,
+ _audioFrame.num_channels_,
+ fileBuffer.get(),
+ 1,
+ fileSamples);
+ } else
+ {
+ // Replace ACM audio with file.
+ // Currently file stream is always mono.
+ // TODO(xians): Change the code when FilePlayer supports real stereo.
+ _audioFrame.UpdateFrame(-1,
+ -1,
+ fileBuffer.get(),
+ fileSamples,
+ mixingFrequency,
+ AudioFrame::kNormalSpeech,
+ AudioFrame::kVadUnknown,
+ 1);
+ }
+ return 0;
+}
+
+WebRtc_Word32 TransmitMixer::APMProcessStream(
+ const WebRtc_UWord16 totalDelayMS,
+ const WebRtc_Word32 clockDrift,
+ const WebRtc_UWord16 currentMicLevel)
+{
+ WebRtc_UWord16 captureLevel(currentMicLevel);
+
+ // Check if the number of incoming channels has changed. This has taken
+ // both the capture device and send codecs into account.
+ if (_audioFrame.num_channels_ !=
+ _audioProcessingModulePtr->num_input_channels())
+ {
+ if (_audioProcessingModulePtr->set_num_channels(
+ _audioFrame.num_channels_,
+ _audioFrame.num_channels_))
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "AudioProcessing::set_num_channels(%d, %d) => error",
+ _audioFrame.num_channels_,
+ _audioProcessingModulePtr->num_output_channels());
+ }
+ }
+
+ // If the frequency has changed we need to change APM settings
+ // Sending side is "master"
+ if (_audioProcessingModulePtr->sample_rate_hz() !=
+ _audioFrame.sample_rate_hz_)
+ {
+ if (_audioProcessingModulePtr->set_sample_rate_hz(
+ _audioFrame.sample_rate_hz_))
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "AudioProcessing::set_sample_rate_hz(%u) => error",
+ _audioFrame.sample_rate_hz_);
+ }
+ }
+
+ if (_audioProcessingModulePtr->set_stream_delay_ms(totalDelayMS) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "AudioProcessing::set_stream_delay_ms(%u) => error",
+ totalDelayMS);
+ }
+ if (_audioProcessingModulePtr->gain_control()->set_stream_analog_level(
+ captureLevel) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "AudioProcessing::set_stream_analog_level(%u) => error",
+ captureLevel);
+ }
+ if (_audioProcessingModulePtr->echo_cancellation()->
+ is_drift_compensation_enabled())
+ {
+ if (_audioProcessingModulePtr->echo_cancellation()->
+ set_stream_drift_samples(clockDrift) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "AudioProcessing::set_stream_drift_samples(%u) => error",
+ clockDrift);
+ }
+ }
+ if (_audioProcessingModulePtr->ProcessStream(&_audioFrame) == -1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "AudioProcessing::ProcessStream() => error");
+ }
+ captureLevel =
+ _audioProcessingModulePtr->gain_control()->stream_analog_level();
+
+ // Store new capture level (only updated when analog AGC is enabled)
+ _captureLevel = captureLevel;
+
+ // Log notifications
+ if (_audioProcessingModulePtr->gain_control()->stream_is_saturated())
+ {
+ if (_saturationWarning == 1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::APMProcessStream() pending "
+ "saturation warning exists");
+ }
+ _saturationWarning = 1; // triggers callback from moduleprocess thread
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::APMProcessStream() VE_SATURATION_WARNING "
+ "message has been posted for callback");
+ }
+
+ return 0;
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+int TransmitMixer::TypingDetection()
+{
+
+ // We let the VAD determine if we're using this feature or not.
+ if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown)
+ {
+ return (0);
+ }
+
+ int keyPressed = EventWrapper::KeyPressed();
+
+ if (keyPressed < 0)
+ {
+ return (-1);
+ }
+
+ if (_audioFrame.vad_activity_ == AudioFrame::kVadActive)
+ _timeActive++;
+ else
+ _timeActive = 0;
+
+ // Keep track if time since last typing event
+ if (keyPressed)
+ {
+ _timeSinceLastTyping = 0;
+ }
+ else
+ {
+ ++_timeSinceLastTyping;
+ }
+
+ if ((_timeSinceLastTyping < _typeEventDelay)
+ && (_audioFrame.vad_activity_ == AudioFrame::kVadActive)
+ && (_timeActive < _timeWindow))
+ {
+ _penaltyCounter += _costPerTyping;
+ if (_penaltyCounter > _reportingThreshold)
+ {
+ if (_typingNoiseWarning == 1)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_instanceId, -1),
+ "TransmitMixer::TypingDetection() pending "
+ "noise-saturation warning exists");
+ }
+ // triggers callback from the module process thread
+ _typingNoiseWarning = 1;
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
+ "TransmitMixer::TypingDetection() "
+ "VE_TYPING_NOISE_WARNING message has been posted for"
+ "callback");
+ }
+ }
+
+ if (_penaltyCounter > 0)
+ _penaltyCounter-=_penaltyDecay;
+
+ return (0);
+}
+#endif
+
+int TransmitMixer::GetMixingFrequency()
+{
+ assert(_mixingFrequency!=0);
+ return (_mixingFrequency);
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+int TransmitMixer::TimeSinceLastTyping(int &seconds)
+{
+ // We check in VoEAudioProcessingImpl that this is only called when
+ // typing detection is active.
+
+ // Round to whole seconds
+ seconds = (_timeSinceLastTyping + 50) / 100;
+ return(0);
+}
+#endif
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+int TransmitMixer::SetTypingDetectionParameters(int timeWindow,
+ int costPerTyping,
+ int reportingThreshold,
+ int penaltyDecay,
+ int typeEventDelay)
+{
+ if(timeWindow != 0)
+ _timeWindow = timeWindow;
+ if(costPerTyping != 0)
+ _costPerTyping = costPerTyping;
+ if(reportingThreshold != 0)
+ _reportingThreshold = reportingThreshold;
+ if(penaltyDecay != 0)
+ _penaltyDecay = penaltyDecay;
+ if(typeEventDelay != 0)
+ _typeEventDelay = typeEventDelay;
+
+
+ return(0);
+}
+#endif
+
+void TransmitMixer::EnableStereoChannelSwapping(bool enable) {
+ swap_stereo_channels_ = enable;
+}
+
+bool TransmitMixer::IsStereoChannelSwappingEnabled() {
+ return swap_stereo_channels_;
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/transmit_mixer.h b/voice_engine/transmit_mixer.h
new file mode 100644
index 0000000..0dac049
--- /dev/null
+++ b/voice_engine/transmit_mixer.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_TRANSMIT_MIXER_H
+#define WEBRTC_VOICE_ENGINE_TRANSMIT_MIXER_H
+
+#include "common_types.h"
+#include "voe_base.h"
+#include "file_player.h"
+#include "file_recorder.h"
+#include "level_indicator.h"
+#include "module_common_types.h"
+#include "monitor_module.h"
+#include "resampler.h"
+#include "voice_engine_defines.h"
+
+
+namespace webrtc {
+
+class AudioProcessing;
+class ProcessThread;
+class VoEExternalMedia;
+class VoEMediaProcess;
+
+namespace voe {
+
+class ChannelManager;
+class MixedAudio;
+class Statistics;
+
+class TransmitMixer : public MonitorObserver,
+ public FileCallback
+
+{
+public:
+ static WebRtc_Word32 Create(TransmitMixer*& mixer,
+ const WebRtc_UWord32 instanceId);
+
+ static void Destroy(TransmitMixer*& mixer);
+
+ WebRtc_Word32 SetEngineInformation(ProcessThread& processThread,
+ Statistics& engineStatistics,
+ ChannelManager& channelManager);
+
+ WebRtc_Word32 SetAudioProcessingModule(
+ AudioProcessing* audioProcessingModule);
+
+ WebRtc_Word32 PrepareDemux(const void* audioSamples,
+ const WebRtc_UWord32 nSamples,
+ const WebRtc_UWord8 nChannels,
+ const WebRtc_UWord32 samplesPerSec,
+ const WebRtc_UWord16 totalDelayMS,
+ const WebRtc_Word32 clockDrift,
+ const WebRtc_UWord16 currentMicLevel);
+
+
+ WebRtc_Word32 DemuxAndMix();
+
+ WebRtc_Word32 EncodeAndSend();
+
+ WebRtc_UWord32 CaptureLevel() const;
+
+ WebRtc_Word32 StopSend();
+
+ // VoEDtmf
+ void UpdateMuteMicrophoneTime(const WebRtc_UWord32 lengthMs);
+
+ // VoEExternalMedia
+ int RegisterExternalMediaProcessing(VoEMediaProcess* object,
+ ProcessingTypes type);
+ int DeRegisterExternalMediaProcessing(ProcessingTypes type);
+
+ int GetMixingFrequency();
+
+ // VoEVolumeControl
+ int SetMute(const bool enable);
+
+ bool Mute() const;
+
+ WebRtc_Word8 AudioLevel() const;
+
+ WebRtc_Word16 AudioLevelFullRange() const;
+
+ bool IsRecordingCall();
+
+ bool IsRecordingMic();
+
+ int StartPlayingFileAsMicrophone(const char* fileName,
+ const bool loop,
+ const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst);
+
+ int StartPlayingFileAsMicrophone(InStream* stream,
+ const FileFormats format,
+ const int startPosition,
+ const float volumeScaling,
+ const int stopPosition,
+ const CodecInst* codecInst);
+
+ int StopPlayingFileAsMicrophone();
+
+ int IsPlayingFileAsMicrophone() const;
+
+ int ScaleFileAsMicrophonePlayout(const float scale);
+
+ int StartRecordingMicrophone(const char* fileName,
+ const CodecInst* codecInst);
+
+ int StartRecordingMicrophone(OutStream* stream,
+ const CodecInst* codecInst);
+
+ int StopRecordingMicrophone();
+
+ int StartRecordingCall(const char* fileName, const CodecInst* codecInst);
+
+ int StartRecordingCall(OutStream* stream, const CodecInst* codecInst);
+
+ int StopRecordingCall();
+
+ void SetMixWithMicStatus(bool mix);
+
+ WebRtc_Word32 RegisterVoiceEngineObserver(VoiceEngineObserver& observer);
+
+ virtual ~TransmitMixer();
+
+ // MonitorObserver
+ void OnPeriodicProcess();
+
+
+ // FileCallback
+ void PlayNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs);
+
+ void RecordNotification(const WebRtc_Word32 id,
+ const WebRtc_UWord32 durationMs);
+
+ void PlayFileEnded(const WebRtc_Word32 id);
+
+ void RecordFileEnded(const WebRtc_Word32 id);
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ // Typing detection
+ int TimeSinceLastTyping(int &seconds);
+ int SetTypingDetectionParameters(int timeWindow,
+ int costPerTyping,
+ int reportingThreshold,
+ int penaltyDecay,
+ int typeEventDelay);
+#endif
+
+ void EnableStereoChannelSwapping(bool enable);
+ bool IsStereoChannelSwappingEnabled();
+
+private:
+ TransmitMixer(const WebRtc_UWord32 instanceId);
+
+ void CheckForSendCodecChanges();
+
+ int GenerateAudioFrame(const int16_t audioSamples[],
+ int nSamples,
+ int nChannels,
+ int samplesPerSec);
+ WebRtc_Word32 RecordAudioToFile(const WebRtc_UWord32 mixingFrequency);
+
+ WebRtc_Word32 MixOrReplaceAudioWithFile(
+ const int mixingFrequency);
+
+ WebRtc_Word32 APMProcessStream(const WebRtc_UWord16 totalDelayMS,
+ const WebRtc_Word32 clockDrift,
+ const WebRtc_UWord16 currentMicLevel);
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ int TypingDetection();
+#endif
+
+ // uses
+ Statistics* _engineStatisticsPtr;
+ ChannelManager* _channelManagerPtr;
+ AudioProcessing* _audioProcessingModulePtr;
+ VoiceEngineObserver* _voiceEngineObserverPtr;
+ ProcessThread* _processThreadPtr;
+
+ // owns
+ MonitorModule _monitorModule;
+ AudioFrame _audioFrame;
+ Resampler _audioResampler; // ADM sample rate -> mixing rate
+ FilePlayer* _filePlayerPtr;
+ FileRecorder* _fileRecorderPtr;
+ FileRecorder* _fileCallRecorderPtr;
+ int _filePlayerId;
+ int _fileRecorderId;
+ int _fileCallRecorderId;
+ bool _filePlaying;
+ bool _fileRecording;
+ bool _fileCallRecording;
+ voe::AudioLevel _audioLevel;
+ // protect file instances and their variables in MixedParticipants()
+ CriticalSectionWrapper& _critSect;
+ CriticalSectionWrapper& _callbackCritSect;
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ WebRtc_Word32 _timeActive;
+ WebRtc_Word32 _timeSinceLastTyping;
+ WebRtc_Word32 _penaltyCounter;
+ WebRtc_UWord32 _typingNoiseWarning;
+
+ // Tunable treshold values
+ int _timeWindow; // nr of10ms slots accepted to count as a hit.
+ int _costPerTyping; // Penalty added for a typing + activity coincide.
+ int _reportingThreshold; // Threshold for _penaltyCounter.
+ int _penaltyDecay; // How much we reduce _penaltyCounter every 10 ms.
+ int _typeEventDelay; // How old typing events we allow
+
+#endif
+ WebRtc_UWord32 _saturationWarning;
+ WebRtc_UWord32 _noiseWarning;
+
+ int _instanceId;
+ bool _mixFileWithMicrophone;
+ WebRtc_UWord32 _captureLevel;
+ VoEMediaProcess* external_postproc_ptr_;
+ VoEMediaProcess* external_preproc_ptr_;
+ bool _mute;
+ WebRtc_Word32 _remainingMuteMicTimeMs;
+ int _mixingFrequency;
+ bool stereo_codec_;
+ bool swap_stereo_channels_;
+};
+
+#endif // WEBRTC_VOICE_ENGINE_TRANSMIT_MIXER_H
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/transmit_mixer_unittest.cc b/voice_engine/transmit_mixer_unittest.cc
new file mode 100644
index 0000000..d8d85b6
--- /dev/null
+++ b/voice_engine/transmit_mixer_unittest.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/transmit_mixer.h"
+
+#include "gtest/gtest.h"
+#include "voice_engine/include/voe_external_media.h"
+
+namespace webrtc {
+namespace voe {
+namespace {
+
+class MediaCallback : public VoEMediaProcess {
+ public:
+ virtual void Process(const int channel, const ProcessingTypes type,
+ int16_t audio[], const int samples_per_channel,
+ const int sample_rate_hz, const bool is_stereo) {
+ }
+};
+
+// TODO(andrew): Mock VoEMediaProcess, and verify the behavior when calling
+// PrepareDemux().
+TEST(TransmitMixerTest, RegisterExternalMediaCallback) {
+ TransmitMixer* tm = NULL;
+ ASSERT_EQ(0, TransmitMixer::Create(tm, 0));
+ ASSERT_TRUE(tm != NULL);
+ MediaCallback callback;
+ EXPECT_EQ(-1, tm->RegisterExternalMediaProcessing(NULL,
+ kRecordingPreprocessing));
+ EXPECT_EQ(-1, tm->RegisterExternalMediaProcessing(&callback,
+ kPlaybackPerChannel));
+ EXPECT_EQ(-1, tm->RegisterExternalMediaProcessing(&callback,
+ kPlaybackAllChannelsMixed));
+ EXPECT_EQ(-1, tm->RegisterExternalMediaProcessing(&callback,
+ kRecordingPerChannel));
+ EXPECT_EQ(0, tm->RegisterExternalMediaProcessing(&callback,
+ kRecordingAllChannelsMixed));
+ EXPECT_EQ(0, tm->RegisterExternalMediaProcessing(&callback,
+ kRecordingPreprocessing));
+ EXPECT_EQ(-1, tm->DeRegisterExternalMediaProcessing(kPlaybackPerChannel));
+ EXPECT_EQ(-1, tm->DeRegisterExternalMediaProcessing(
+ kPlaybackAllChannelsMixed));
+ EXPECT_EQ(-1, tm->DeRegisterExternalMediaProcessing(kRecordingPerChannel));
+ EXPECT_EQ(0, tm->DeRegisterExternalMediaProcessing(
+ kRecordingAllChannelsMixed));
+ EXPECT_EQ(0, tm->DeRegisterExternalMediaProcessing(kRecordingPreprocessing));
+ TransmitMixer::Destroy(tm);
+}
+
+} // namespace
+} // namespace voe
+} // namespace webrtc
diff --git a/voice_engine/utility.cc b/voice_engine/utility.cc
new file mode 100644
index 0000000..1ef108e
--- /dev/null
+++ b/voice_engine/utility.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "utility.h"
+
+#include "module.h"
+#include "trace.h"
+#include "signal_processing_library.h"
+
+namespace webrtc
+{
+
+namespace voe
+{
+enum{kMaxTargetLen = 2*32*10}; // stereo 32KHz 10ms
+
+void Utility::MixWithSat(WebRtc_Word16 target[],
+ int target_channel,
+ const WebRtc_Word16 source[],
+ int source_channel,
+ int source_len)
+{
+ assert((target_channel == 1) || (target_channel == 2));
+ assert((source_channel == 1) || (source_channel == 2));
+ assert(source_len <= kMaxTargetLen);
+
+ if ((target_channel == 2) && (source_channel == 1))
+ {
+ // Convert source from mono to stereo.
+ WebRtc_Word32 left = 0;
+ WebRtc_Word32 right = 0;
+ for (int i = 0; i < source_len; ++i) {
+ left = source[i] + target[i*2];
+ right = source[i] + target[i*2 + 1];
+ target[i*2] = WebRtcSpl_SatW32ToW16(left);
+ target[i*2 + 1] = WebRtcSpl_SatW32ToW16(right);
+ }
+ }
+ else if ((target_channel == 1) && (source_channel == 2))
+ {
+ // Convert source from stereo to mono.
+ WebRtc_Word32 temp = 0;
+ for (int i = 0; i < source_len/2; ++i) {
+ temp = ((source[i*2] + source[i*2 + 1])>>1) + target[i];
+ target[i] = WebRtcSpl_SatW32ToW16(temp);
+ }
+ }
+ else
+ {
+ WebRtc_Word32 temp = 0;
+ for (int i = 0; i < source_len; ++i) {
+ temp = source[i] + target[i];
+ target[i] = WebRtcSpl_SatW32ToW16(temp);
+ }
+ }
+}
+
+void Utility::MixSubtractWithSat(WebRtc_Word16 target[],
+ const WebRtc_Word16 source[],
+ WebRtc_UWord16 len)
+{
+ WebRtc_Word32 temp(0);
+ for (int i = 0; i < len; i++)
+ {
+ temp = target[i] - source[i];
+ if (temp > 32767)
+ target[i] = 32767;
+ else if (temp < -32768)
+ target[i] = -32768;
+ else
+ target[i] = (WebRtc_Word16) temp;
+ }
+}
+
+void Utility::MixAndScaleWithSat(WebRtc_Word16 target[],
+ const WebRtc_Word16 source[], float scale,
+ WebRtc_UWord16 len)
+{
+ WebRtc_Word32 temp(0);
+ for (int i = 0; i < len; i++)
+ {
+ temp = (WebRtc_Word32) (target[i] + scale * source[i]);
+ if (temp > 32767)
+ target[i] = 32767;
+ else if (temp < -32768)
+ target[i] = -32768;
+ else
+ target[i] = (WebRtc_Word16) temp;
+ }
+}
+
+void Utility::Scale(WebRtc_Word16 vector[], float scale, WebRtc_UWord16 len)
+{
+ for (int i = 0; i < len; i++)
+ {
+ vector[i] = (WebRtc_Word16) (scale * vector[i]);
+ }
+}
+
+void Utility::ScaleWithSat(WebRtc_Word16 vector[], float scale,
+ WebRtc_UWord16 len)
+{
+ WebRtc_Word32 temp(0);
+ for (int i = 0; i < len; i++)
+ {
+ temp = (WebRtc_Word32) (scale * vector[i]);
+ if (temp > 32767)
+ vector[i] = 32767;
+ else if (temp < -32768)
+ vector[i] = -32768;
+ else
+ vector[i] = (WebRtc_Word16) temp;
+ }
+}
+
+} // namespace voe
+
+} // namespace webrtc
diff --git a/voice_engine/utility.h b/voice_engine/utility.h
new file mode 100644
index 0000000..a8af8bd
--- /dev/null
+++ b/voice_engine/utility.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * Contains functions often used by different parts of VoiceEngine.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_UTILITY_H
+#define WEBRTC_VOICE_ENGINE_UTILITY_H
+
+#include "typedefs.h"
+#include "voice_engine_defines.h"
+
+namespace webrtc
+{
+
+class Module;
+
+namespace voe
+{
+
+class Utility
+{
+public:
+ static void MixWithSat(WebRtc_Word16 target[],
+ int target_channel,
+ const WebRtc_Word16 source[],
+ int source_channel,
+ int source_len);
+
+ static void MixSubtractWithSat(WebRtc_Word16 target[],
+ const WebRtc_Word16 source[],
+ WebRtc_UWord16 len);
+
+ static void MixAndScaleWithSat(WebRtc_Word16 target[],
+ const WebRtc_Word16 source[],
+ float scale,
+ WebRtc_UWord16 len);
+
+ static void Scale(WebRtc_Word16 vector[], float scale, WebRtc_UWord16 len);
+
+ static void ScaleWithSat(WebRtc_Word16 vector[],
+ float scale,
+ WebRtc_UWord16 len);
+};
+
+} // namespace voe
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_UTILITY_H
diff --git a/voice_engine/voe_audio_processing_impl.cc b/voice_engine/voe_audio_processing_impl.cc
new file mode 100644
index 0000000..6390970
--- /dev/null
+++ b/voice_engine/voe_audio_processing_impl.cc
@@ -0,0 +1,1154 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_audio_processing_impl.h"
+
+#include "audio_processing.h"
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+// TODO(andrew): move to a common place.
+#define WEBRTC_TRACE_VOICE_API() \
+ do { \
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, \
+ VoEId(_shared->instance_id(), -1), __FUNCTION__); \
+ } while (0)
+
+#define WEBRTC_VOICE_INIT_CHECK() \
+ do { \
+ if (!_shared->statistics().Initialized()) { \
+ _shared->SetLastError(VE_NOT_INITED, kTraceError); \
+ return -1; \
+ } \
+ } while (0)
+
+#define WEBRTC_VOICE_INIT_CHECK_BOOL() \
+ do { \
+ if (!_shared->statistics().Initialized()) { \
+ _shared->SetLastError(VE_NOT_INITED, kTraceError); \
+ return false; \
+ } \
+ } while (0)
+
+
+namespace webrtc {
+
+#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
+static const EcModes kDefaultEcMode = kEcAecm;
+#else
+static const EcModes kDefaultEcMode = kEcAec;
+#endif
+
+VoEAudioProcessing* VoEAudioProcessing::GetInterface(VoiceEngine* voiceEngine) {
+#ifndef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+ return NULL;
+#else
+ if (NULL == voiceEngine) {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+VoEAudioProcessingImpl::VoEAudioProcessingImpl(voe::SharedData* shared)
+ : _isAecMode(kDefaultEcMode == kEcAec),
+ _shared(shared) {
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEAudioProcessingImpl::VoEAudioProcessingImpl() - ctor");
+}
+
+VoEAudioProcessingImpl::~VoEAudioProcessingImpl() {
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEAudioProcessingImpl::~VoEAudioProcessingImpl() - dtor");
+}
+
+int VoEAudioProcessingImpl::SetNsStatus(bool enable, NsModes mode) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetNsStatus(enable=%d, mode=%d)", enable, mode);
+#ifdef WEBRTC_VOICE_ENGINE_NR
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ NoiseSuppression::Level nsLevel(
+ (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE);
+ switch (mode) {
+ case kNsDefault:
+ nsLevel = (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE;
+ break;
+ case kNsUnchanged:
+ nsLevel = _shared->audio_processing()->noise_suppression()->level();
+ break;
+ case kNsConference:
+ nsLevel = NoiseSuppression::kHigh;
+ break;
+ case kNsLowSuppression:
+ nsLevel = NoiseSuppression::kLow;
+ break;
+ case kNsModerateSuppression:
+ nsLevel = NoiseSuppression::kModerate;
+ break;
+ case kNsHighSuppression:
+ nsLevel = NoiseSuppression::kHigh;
+ break;
+ case kNsVeryHighSuppression:
+ nsLevel = NoiseSuppression::kVeryHigh;
+ break;
+ }
+
+ if (_shared->audio_processing()->noise_suppression()->
+ set_level(nsLevel) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetNsStatus() failed to set Ns mode");
+ return -1;
+ }
+ if (_shared->audio_processing()->noise_suppression()->Enable(enable) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetNsStatus() failed to set Ns state");
+ return -1;
+ }
+
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetNsStatus() Ns is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetNsStatus(bool& enabled, NsModes& mode) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetNsStatus(enabled=?, mode=?)");
+#ifdef WEBRTC_VOICE_ENGINE_NR
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ bool enable(false);
+ NoiseSuppression::Level nsLevel(
+ (NoiseSuppression::Level)WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE);
+
+ enable = _shared->audio_processing()->noise_suppression()->is_enabled();
+ nsLevel = _shared->audio_processing()->noise_suppression()->level();
+
+ enabled = enable;
+
+ switch (nsLevel) {
+ case NoiseSuppression::kLow:
+ mode = kNsLowSuppression;
+ break;
+ case NoiseSuppression::kModerate:
+ mode = kNsModerateSuppression;
+ break;
+ case NoiseSuppression::kHigh:
+ mode = kNsHighSuppression;
+ break;
+ case NoiseSuppression::kVeryHigh:
+ mode = kNsVeryHighSuppression;
+ break;
+ }
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetNsStatus() => enabled=% d, mode=%d", enabled, mode);
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "GetNsStatus() Ns is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetAgcStatus(bool enable, AgcModes mode) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetAgcStatus(enable=%d, mode=%d)", enable, mode);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+#if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID)
+ if (mode == kAgcAdaptiveAnalog) {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetAgcStatus() invalid Agc mode for mobile device");
+ return -1;
+ }
+#endif
+
+ GainControl::Mode agcMode(
+ (GainControl::Mode)WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE);
+ switch (mode) {
+ case kAgcDefault:
+ agcMode = (GainControl::Mode)WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE;
+ break;
+ case kAgcUnchanged:
+ agcMode = _shared->audio_processing()->gain_control()->mode();;
+ break;
+ case kAgcFixedDigital:
+ agcMode = GainControl::kFixedDigital;
+ break;
+ case kAgcAdaptiveAnalog:
+ agcMode = GainControl::kAdaptiveAnalog;
+ break;
+ case kAgcAdaptiveDigital:
+ agcMode = GainControl::kAdaptiveDigital;
+ break;
+ }
+
+ if (_shared->audio_processing()->gain_control()->set_mode(agcMode) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetAgcStatus() failed to set Agc mode");
+ return -1;
+ }
+ if (_shared->audio_processing()->gain_control()->Enable(enable) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetAgcStatus() failed to set Agc state");
+ return -1;
+ }
+
+ if (agcMode != GainControl::kFixedDigital) {
+ // Set Agc state in the ADM when adaptive Agc mode has been selected.
+ // Note that we also enable the ADM Agc when Adaptive Digital mode is
+ // used since we want to be able to provide the APM with updated mic
+ // levels when the user modifies the mic level manually.
+ if (_shared->audio_device()->SetAGC(enable) != 0) {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR,
+ kTraceWarning, "SetAgcStatus() failed to set Agc mode");
+ }
+ }
+
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetAgcStatus() Agc is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetAgcStatus(bool& enabled, AgcModes& mode) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetAgcStatus(enabled=?, mode=?)");
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ bool enable(false);
+ GainControl::Mode agcMode(
+ (GainControl::Mode)WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE);
+
+ enable = _shared->audio_processing()->gain_control()->is_enabled();
+ agcMode = _shared->audio_processing()->gain_control()->mode();
+
+ enabled = enable;
+
+ switch (agcMode) {
+ case GainControl::kFixedDigital:
+ mode = kAgcFixedDigital;
+ break;
+ case GainControl::kAdaptiveAnalog:
+ mode = kAgcAdaptiveAnalog;
+ break;
+ case GainControl::kAdaptiveDigital:
+ mode = kAgcAdaptiveDigital;
+ break;
+ }
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetAgcStatus() => enabled=%d, mode=%d", enabled, mode);
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "GetAgcStatus() Agc is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetAgcConfig(const AgcConfig config) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetAgcConfig()");
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ if (_shared->audio_processing()->gain_control()->set_target_level_dbfs(
+ config.targetLeveldBOv) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetAgcConfig() failed to set target peak |level|"
+ " (or envelope) of the Agc");
+ return -1;
+ }
+ if (_shared->audio_processing()->gain_control()->set_compression_gain_db(
+ config.digitalCompressionGaindB) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetAgcConfig() failed to set the range in |gain| "
+ "the digital compression stage may apply");
+ return -1;
+ }
+ if (_shared->audio_processing()->gain_control()->enable_limiter(
+ config.limiterEnable) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetAgcConfig() failed to set hard limiter to the signal");
+ return -1;
+ }
+
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetAgcConfig() EC is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetAgcConfig(AgcConfig& config) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetAgcConfig(config=?)");
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ config.targetLeveldBOv =
+ _shared->audio_processing()->gain_control()->target_level_dbfs();
+ config.digitalCompressionGaindB =
+ _shared->audio_processing()->gain_control()->compression_gain_db();
+ config.limiterEnable =
+ _shared->audio_processing()->gain_control()->is_limiter_enabled();
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetAgcConfig() => targetLeveldBOv=%u, "
+ "digitalCompressionGaindB=%u, limiterEnable=%d",
+ config.targetLeveldBOv,
+ config.digitalCompressionGaindB,
+ config.limiterEnable);
+
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "GetAgcConfig() EC is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetRxNsStatus(int channel,
+ bool enable,
+ NsModes mode) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetRxNsStatus(channel=%d, enable=%d, mode=%d)",
+ channel, (int)enable, (int)mode);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetRxNsStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetRxNsStatus(enable, mode);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetRxNsStatus() AGC is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetRxNsStatus(int channel,
+ bool& enabled,
+ NsModes& mode) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRxNsStatus(channel=%d, enable=?, mode=?)", channel);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRxNsStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRxNsStatus(enabled, mode);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "GetRxNsStatus() Agc is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetRxAgcStatus(int channel,
+ bool enable,
+ AgcModes mode) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetRxAgcStatus(channel=%d, enable=%d, mode=%d)",
+ channel, (int)enable, (int)mode);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetRxAgcStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetRxAgcStatus(enable, mode);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetRxAgcStatus() Agc is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetRxAgcStatus(int channel,
+ bool& enabled,
+ AgcModes& mode) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRxAgcStatus(channel=%d, enable=?, mode=?)", channel);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRxAgcStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRxAgcStatus(enabled, mode);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "GetRxAgcStatus() Agc is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetRxAgcConfig(int channel,
+ const AgcConfig config) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetRxAgcConfig(channel=%d)", channel);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetRxAgcConfig() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetRxAgcConfig(config);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetRxAgcConfig() Agc is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetRxAgcConfig(int channel, AgcConfig& config) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRxAgcConfig(channel=%d)", channel);
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRxAgcConfig() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRxAgcConfig(config);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "GetRxAgcConfig() Agc is not supported");
+ return -1;
+#endif
+}
+
+bool VoEAudioProcessing::DriftCompensationSupported() {
+#if defined(WEBRTC_DRIFT_COMPENSATION_SUPPORTED)
+ return true;
+#else
+ return false;
+#endif
+}
+
+int VoEAudioProcessingImpl::EnableDriftCompensation(bool enable) {
+ WEBRTC_TRACE_VOICE_API();
+ WEBRTC_VOICE_INIT_CHECK();
+
+ if (!DriftCompensationSupported()) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
+ "Drift compensation is not supported on this platform.");
+ return -1;
+ }
+
+ EchoCancellation* aec = _shared->audio_processing()->echo_cancellation();
+ if (aec->enable_drift_compensation(enable) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "aec->enable_drift_compensation() failed");
+ return -1;
+ }
+ return 0;
+}
+
+bool VoEAudioProcessingImpl::DriftCompensationEnabled() {
+ WEBRTC_TRACE_VOICE_API();
+ WEBRTC_VOICE_INIT_CHECK_BOOL();
+
+ EchoCancellation* aec = _shared->audio_processing()->echo_cancellation();
+ return aec->is_drift_compensation_enabled();
+}
+
+int VoEAudioProcessingImpl::SetEcStatus(bool enable, EcModes mode) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetEcStatus(enable=%d, mode=%d)", enable, mode);
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ // AEC mode
+ if ((mode == kEcDefault) ||
+ (mode == kEcConference) ||
+ (mode == kEcAec) ||
+ ((mode == kEcUnchanged) &&
+ (_isAecMode == true))) {
+ if (enable) {
+ // Disable the AECM before enable the AEC
+ if (_shared->audio_processing()->echo_control_mobile()->is_enabled()) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
+ "SetEcStatus() disable AECM before enabling AEC");
+ if (_shared->audio_processing()->echo_control_mobile()->
+ Enable(false) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetEcStatus() failed to disable AECM");
+ return -1;
+ }
+ }
+ }
+ if (_shared->audio_processing()->echo_cancellation()->Enable(enable) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetEcStatus() failed to set AEC state");
+ return -1;
+ }
+ if (mode == kEcConference) {
+ if (_shared->audio_processing()->echo_cancellation()->
+ set_suppression_level(EchoCancellation::kHighSuppression) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetEcStatus() failed to set aggressiveness to high");
+ return -1;
+ }
+ } else {
+ if (_shared->audio_processing()->echo_cancellation()->
+ set_suppression_level(
+ EchoCancellation::kModerateSuppression) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetEcStatus() failed to set aggressiveness to moderate");
+ return -1;
+ }
+ }
+
+ _isAecMode = true;
+ } else if ((mode == kEcAecm) ||
+ ((mode == kEcUnchanged) &&
+ (_isAecMode == false))) {
+ if (enable) {
+ // Disable the AEC before enable the AECM
+ if (_shared->audio_processing()->echo_cancellation()->is_enabled()) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
+ "SetEcStatus() disable AEC before enabling AECM");
+ if (_shared->audio_processing()->echo_cancellation()->
+ Enable(false) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetEcStatus() failed to disable AEC");
+ return -1;
+ }
+ }
+ }
+ if (_shared->audio_processing()->echo_control_mobile()->
+ Enable(enable) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetEcStatus() failed to set AECM state");
+ return -1;
+ }
+ _isAecMode = false;
+ } else {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetEcStatus() invalid EC mode");
+ return -1;
+ }
+
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetEcStatus() EC is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetEcStatus(bool& enabled, EcModes& mode) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetEcStatus()");
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ if (_isAecMode == true) {
+ mode = kEcAec;
+ enabled = _shared->audio_processing()->echo_cancellation()->is_enabled();
+ } else {
+ mode = kEcAecm;
+ enabled = _shared->audio_processing()->echo_control_mobile()->
+ is_enabled();
+ }
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetEcStatus() => enabled=%i, mode=%i",
+ enabled, (int)mode);
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "GetEcStatus() EC is not supported");
+ return -1;
+#endif
+}
+
+void VoEAudioProcessingImpl::SetDelayOffsetMs(int offset) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetDelayOffsetMs(offset = %d)", offset);
+ _shared->audio_processing()->set_delay_offset_ms(offset);
+}
+
+int VoEAudioProcessingImpl::DelayOffsetMs() {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DelayOffsetMs()");
+ return _shared->audio_processing()->delay_offset_ms();
+}
+
+int VoEAudioProcessingImpl::SetAecmMode(AecmModes mode, bool enableCNG) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetAECMMode(mode = %d)", mode);
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ EchoControlMobile::RoutingMode aecmMode(
+ EchoControlMobile::kQuietEarpieceOrHeadset);
+
+ switch (mode) {
+ case kAecmQuietEarpieceOrHeadset:
+ aecmMode = EchoControlMobile::kQuietEarpieceOrHeadset;
+ break;
+ case kAecmEarpiece:
+ aecmMode = EchoControlMobile::kEarpiece;
+ break;
+ case kAecmLoudEarpiece:
+ aecmMode = EchoControlMobile::kLoudEarpiece;
+ break;
+ case kAecmSpeakerphone:
+ aecmMode = EchoControlMobile::kSpeakerphone;
+ break;
+ case kAecmLoudSpeakerphone:
+ aecmMode = EchoControlMobile::kLoudSpeakerphone;
+ break;
+ }
+
+
+ if (_shared->audio_processing()->echo_control_mobile()->
+ set_routing_mode(aecmMode) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetAECMMode() failed to set AECM routing mode");
+ return -1;
+ }
+ if (_shared->audio_processing()->echo_control_mobile()->
+ enable_comfort_noise(enableCNG) != 0) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetAECMMode() failed to set comfort noise state for AECM");
+ return -1;
+ }
+
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetAECMMode() EC is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetAecmMode(AecmModes& mode, bool& enabledCNG) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetAECMMode(mode=?)");
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ enabledCNG = false;
+
+ EchoControlMobile::RoutingMode aecmMode =
+ _shared->audio_processing()->echo_control_mobile()->routing_mode();
+ enabledCNG = _shared->audio_processing()->echo_control_mobile()->
+ is_comfort_noise_enabled();
+
+ switch (aecmMode) {
+ case EchoControlMobile::kQuietEarpieceOrHeadset:
+ mode = kAecmQuietEarpieceOrHeadset;
+ break;
+ case EchoControlMobile::kEarpiece:
+ mode = kAecmEarpiece;
+ break;
+ case EchoControlMobile::kLoudEarpiece:
+ mode = kAecmLoudEarpiece;
+ break;
+ case EchoControlMobile::kSpeakerphone:
+ mode = kAecmSpeakerphone;
+ break;
+ case EchoControlMobile::kLoudSpeakerphone:
+ mode = kAecmLoudSpeakerphone;
+ break;
+ }
+
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "GetAECMMode() EC is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::EnableHighPassFilter(bool enable) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "EnableHighPassFilter(%d)", enable);
+ if (_shared->audio_processing()->high_pass_filter()->Enable(enable) !=
+ AudioProcessing::kNoError) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "HighPassFilter::Enable() failed.");
+ return -1;
+ }
+
+ return 0;
+}
+
+bool VoEAudioProcessingImpl::IsHighPassFilterEnabled() {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "IsHighPassFilterEnabled()");
+ return _shared->audio_processing()->high_pass_filter()->is_enabled();
+}
+
+int VoEAudioProcessingImpl::RegisterRxVadObserver(
+ int channel,
+ VoERxVadCallback& observer) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "RegisterRxVadObserver()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "RegisterRxVadObserver() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->RegisterRxVadObserver(observer);
+}
+
+int VoEAudioProcessingImpl::DeRegisterRxVadObserver(int channel) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DeRegisterRxVadObserver()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DeRegisterRxVadObserver() failed to locate channel");
+ return -1;
+ }
+
+ return channelPtr->DeRegisterRxVadObserver();
+}
+
+int VoEAudioProcessingImpl::VoiceActivityIndicator(int channel) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoiceActivityIndicator(channel=%d)", channel);
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DeRegisterRxVadObserver() failed to locate channel");
+ return -1;
+ }
+ int activity(-1);
+ channelPtr->VoiceActivityIndicator(activity);
+
+ return activity;
+}
+
+int VoEAudioProcessingImpl::SetEcMetricsStatus(bool enable) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetEcMetricsStatus(enable=%d)", enable);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ if ((_shared->audio_processing()->echo_cancellation()->enable_metrics(enable)
+ != 0) ||
+ (_shared->audio_processing()->echo_cancellation()->enable_delay_logging(
+ enable) != 0)) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "SetEcMetricsStatus() unable to set EC metrics mode");
+ return -1;
+ }
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetEcStatus() EC is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetEcMetricsStatus(bool& enabled) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetEcMetricsStatus(enabled=?)");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ bool echo_mode =
+ _shared->audio_processing()->echo_cancellation()->are_metrics_enabled();
+ bool delay_mode = _shared->audio_processing()->echo_cancellation()->
+ is_delay_logging_enabled();
+
+ if (echo_mode != delay_mode) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "GetEcMetricsStatus() delay logging and echo mode are not the same");
+ return -1;
+ }
+
+ enabled = echo_mode;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetEcMetricsStatus() => enabled=%d", enabled);
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetEcStatus() EC is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL,
+ int& ERLE,
+ int& RERL,
+ int& A_NLP) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetEchoMetrics(ERL=?, ERLE=?, RERL=?, A_NLP=?)");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
+ "GetEchoMetrics() AudioProcessingModule AEC is not enabled");
+ return -1;
+ }
+
+ // Get Echo Metrics from Audio Processing Module.
+ EchoCancellation::Metrics echoMetrics;
+ if (_shared->audio_processing()->echo_cancellation()->GetMetrics(
+ &echoMetrics)) {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetEchoMetrics(), AudioProcessingModule metrics error");
+ return -1;
+ }
+
+ // Echo quality metrics.
+ ERL = echoMetrics.echo_return_loss.instant;
+ ERLE = echoMetrics.echo_return_loss_enhancement.instant;
+ RERL = echoMetrics.residual_echo_return_loss.instant;
+ A_NLP = echoMetrics.a_nlp.instant;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetEchoMetrics() => ERL=%d, ERLE=%d, RERL=%d, A_NLP=%d",
+ ERL, ERLE, RERL, A_NLP);
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetEcStatus() EC is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetEcDelayMetrics(int& delay_median,
+ int& delay_std) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetEcDelayMetrics(median=?, std=?)");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+#ifdef WEBRTC_VOICE_ENGINE_ECHO
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
+ "GetEcDelayMetrics() AudioProcessingModule AEC is not enabled");
+ return -1;
+ }
+
+ int median = 0;
+ int std = 0;
+ // Get delay-logging values from Audio Processing Module.
+ if (_shared->audio_processing()->echo_cancellation()->GetDelayMetrics(
+ &median, &std)) {
+ WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetEcDelayMetrics(), AudioProcessingModule delay-logging "
+ "error");
+ return -1;
+ }
+
+ // EC delay-logging metrics
+ delay_median = median;
+ delay_std = std;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetEcDelayMetrics() => delay_median=%d, delay_std=%d",
+ delay_median, delay_std);
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetEcStatus() EC is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::StartDebugRecording(const char* fileNameUTF8) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartDebugRecording()");
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ return _shared->audio_processing()->StartDebugRecording(fileNameUTF8);
+
+}
+
+int VoEAudioProcessingImpl::StopDebugRecording() {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StopDebugRecording()");
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ return _shared->audio_processing()->StopDebugRecording();
+}
+
+int VoEAudioProcessingImpl::SetTypingDetectionStatus(bool enable) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetTypingDetectionStatus()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ // Just use the VAD state to determine if we should enable typing detection
+ // or not
+
+ if (_shared->audio_processing()->voice_detection()->Enable(enable)) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
+ "SetTypingDetectionStatus() failed to set VAD state");
+ return -1;
+ }
+ if (_shared->audio_processing()->voice_detection()->set_likelihood(
+ VoiceDetection::kVeryLowLikelihood)) {
+ _shared->SetLastError(VE_APM_ERROR, kTraceWarning,
+ "SetTypingDetectionStatus() failed to set VAD likelihood to low");
+ return -1;
+ }
+
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetTypingDetectionStatus is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::GetTypingDetectionStatus(bool& enabled) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetTypingDetectionStatus()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ // Just use the VAD state to determine if we should enable typing
+ // detection or not
+
+ enabled = _shared->audio_processing()->voice_detection()->is_enabled();
+
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetTypingDetectionStatus is not supported");
+ return -1;
+#endif
+}
+
+
+int VoEAudioProcessingImpl::TimeSinceLastTyping(int &seconds) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "TimeSinceLastTyping()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ // Check if typing detection is enabled
+ bool enabled = _shared->audio_processing()->voice_detection()->is_enabled();
+ if (enabled)
+ {
+ _shared->transmit_mixer()->TimeSinceLastTyping(seconds);
+ return 0;
+ }
+ else
+ {
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetTypingDetectionStatus is not enabled");
+ return -1;
+ }
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetTypingDetectionStatus is not supported");
+ return -1;
+#endif
+}
+
+int VoEAudioProcessingImpl::SetTypingDetectionParameters(int timeWindow,
+ int costPerTyping,
+ int reportingThreshold,
+ int penaltyDecay,
+ int typeEventDelay) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetTypingDetectionParameters()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+#ifdef WEBRTC_VOICE_ENGINE_TYPING_DETECTION
+ if (!_shared->statistics().Initialized()) {
+ _shared->statistics().SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ return (_shared->transmit_mixer()->SetTypingDetectionParameters(timeWindow,
+ costPerTyping, reportingThreshold, penaltyDecay, typeEventDelay));
+
+#else
+ _shared->statistics().SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetTypingDetectionParameters is not supported");
+ return -1;
+#endif
+
+}
+
+void VoEAudioProcessingImpl::EnableStereoChannelSwapping(bool enable) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "EnableStereoChannelSwapping(enable=%d)", enable);
+ _shared->transmit_mixer()->EnableStereoChannelSwapping(enable);
+}
+
+bool VoEAudioProcessingImpl::IsStereoChannelSwappingEnabled() {
+ WEBRTC_TRACE_VOICE_API();
+ return _shared->transmit_mixer()->IsStereoChannelSwappingEnabled();
+}
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_audio_processing_impl.h b/voice_engine/voe_audio_processing_impl.h
new file mode 100644
index 0000000..3d6b64d
--- /dev/null
+++ b/voice_engine/voe_audio_processing_impl.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H
+
+#include "voe_audio_processing.h"
+
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoEAudioProcessingImpl : public VoEAudioProcessing {
+ public:
+ virtual int SetNsStatus(bool enable, NsModes mode = kNsUnchanged);
+
+ virtual int GetNsStatus(bool& enabled, NsModes& mode);
+
+ virtual int SetAgcStatus(bool enable, AgcModes mode = kAgcUnchanged);
+
+ virtual int GetAgcStatus(bool& enabled, AgcModes& mode);
+
+ virtual int SetAgcConfig(const AgcConfig config);
+
+ virtual int GetAgcConfig(AgcConfig& config);
+
+ virtual int SetRxNsStatus(int channel,
+ bool enable,
+ NsModes mode = kNsUnchanged);
+
+ virtual int GetRxNsStatus(int channel, bool& enabled, NsModes& mode);
+
+ virtual int SetRxAgcStatus(int channel,
+ bool enable,
+ AgcModes mode = kAgcUnchanged);
+
+ virtual int GetRxAgcStatus(int channel, bool& enabled, AgcModes& mode);
+
+ virtual int SetRxAgcConfig(int channel, const AgcConfig config);
+
+ virtual int GetRxAgcConfig(int channel, AgcConfig& config);
+
+ virtual int SetEcStatus(bool enable, EcModes mode = kEcUnchanged);
+ virtual int GetEcStatus(bool& enabled, EcModes& mode);
+ virtual int EnableDriftCompensation(bool enable);
+ virtual bool DriftCompensationEnabled();
+
+ virtual void SetDelayOffsetMs(int offset);
+ virtual int DelayOffsetMs();
+
+ virtual int SetAecmMode(AecmModes mode = kAecmSpeakerphone,
+ bool enableCNG = true);
+
+ virtual int GetAecmMode(AecmModes& mode, bool& enabledCNG);
+
+ virtual int EnableHighPassFilter(bool enable);
+ virtual bool IsHighPassFilterEnabled();
+
+ virtual int RegisterRxVadObserver(int channel,
+ VoERxVadCallback& observer);
+
+ virtual int DeRegisterRxVadObserver(int channel);
+
+ virtual int VoiceActivityIndicator(int channel);
+
+ virtual int SetEcMetricsStatus(bool enable);
+
+ virtual int GetEcMetricsStatus(bool& enabled);
+
+ virtual int GetEchoMetrics(int& ERL, int& ERLE, int& RERL, int& A_NLP);
+
+ virtual int GetEcDelayMetrics(int& delay_median, int& delay_std);
+
+ virtual int StartDebugRecording(const char* fileNameUTF8);
+
+ virtual int StopDebugRecording();
+
+ virtual int SetTypingDetectionStatus(bool enable);
+
+ virtual int GetTypingDetectionStatus(bool& enabled);
+
+ virtual int TimeSinceLastTyping(int &seconds);
+
+ // TODO(niklase) Remove default argument as soon as libJingle is updated!
+ virtual int SetTypingDetectionParameters(int timeWindow,
+ int costPerTyping,
+ int reportingThreshold,
+ int penaltyDecay,
+ int typeEventDelay = 0);
+
+ virtual void EnableStereoChannelSwapping(bool enable);
+ virtual bool IsStereoChannelSwappingEnabled();
+
+ protected:
+ VoEAudioProcessingImpl(voe::SharedData* shared);
+ virtual ~VoEAudioProcessingImpl();
+
+ private:
+ bool _isAecMode;
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_AUDIO_PROCESSING_IMPL_H
+
diff --git a/voice_engine/voe_audio_processing_unittest.cc b/voice_engine/voe_audio_processing_unittest.cc
new file mode 100644
index 0000000..8c66d88
--- /dev/null
+++ b/voice_engine/voe_audio_processing_unittest.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voice_engine/include/voe_audio_processing.h"
+
+#include "gtest/gtest.h"
+#include "voice_engine/include/voe_base.h"
+
+namespace webrtc {
+namespace voe {
+namespace {
+
+class VoEAudioProcessingTest : public ::testing::Test {
+ protected:
+ VoEAudioProcessingTest()
+ : voe_(VoiceEngine::Create()),
+ base_(VoEBase::GetInterface(voe_)),
+ audioproc_(VoEAudioProcessing::GetInterface(voe_)) {
+ }
+
+ virtual ~VoEAudioProcessingTest() {
+ base_->Terminate();
+ audioproc_->Release();
+ base_->Release();
+ VoiceEngine::Delete(voe_);
+ }
+
+ VoiceEngine* voe_;
+ VoEBase* base_;
+ VoEAudioProcessing* audioproc_;
+};
+
+TEST_F(VoEAudioProcessingTest, FailureIfNotInitialized) {
+ EXPECT_EQ(-1, audioproc_->EnableDriftCompensation(true));
+ EXPECT_EQ(-1, audioproc_->EnableDriftCompensation(false));
+ EXPECT_FALSE(audioproc_->DriftCompensationEnabled());
+}
+
+// TODO(andrew): Investigate race conditions triggered by this test:
+// https://code.google.com/p/webrtc/issues/detail?id=788
+TEST_F(VoEAudioProcessingTest, DISABLED_DriftCompensationIsEnabledIfSupported) {
+ ASSERT_EQ(0, base_->Init());
+ // TODO(andrew): Ideally, DriftCompensationSupported() would be mocked.
+ bool supported = VoEAudioProcessing::DriftCompensationSupported();
+ if (supported) {
+ EXPECT_EQ(0, audioproc_->EnableDriftCompensation(true));
+ EXPECT_TRUE(audioproc_->DriftCompensationEnabled());
+ EXPECT_EQ(0, audioproc_->EnableDriftCompensation(false));
+ EXPECT_FALSE(audioproc_->DriftCompensationEnabled());
+ } else {
+ EXPECT_EQ(-1, audioproc_->EnableDriftCompensation(true));
+ EXPECT_FALSE(audioproc_->DriftCompensationEnabled());
+ EXPECT_EQ(-1, audioproc_->EnableDriftCompensation(false));
+ EXPECT_FALSE(audioproc_->DriftCompensationEnabled());
+ }
+}
+
+} // namespace
+} // namespace voe
+} // namespace webrtc
diff --git a/voice_engine/voe_base_impl.cc b/voice_engine/voe_base_impl.cc
new file mode 100644
index 0000000..b635646
--- /dev/null
+++ b/voice_engine/voe_base_impl.cc
@@ -0,0 +1,1625 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_base_impl.h"
+
+#include "audio_coding_module.h"
+#include "audio_processing.h"
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "file_wrapper.h"
+#include "modules/audio_device/audio_device_impl.h"
+#include "output_mixer.h"
+#include "signal_processing_library.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "utility.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+#if (defined(_WIN32) && defined(_DLL) && (_MSC_VER == 1400))
+// Fix for VS 2005 MD/MDd link problem
+#include <stdio.h>
+extern "C"
+ { FILE _iob[3] = { __iob_func()[0], __iob_func()[1], __iob_func()[2]}; }
+#endif
+
+namespace webrtc
+{
+
+VoEBase* VoEBase::GetInterface(VoiceEngine* voiceEngine)
+{
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+}
+
+VoEBaseImpl::VoEBaseImpl(voe::SharedData* shared) :
+ _voiceEngineObserverPtr(NULL),
+ _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _voiceEngineObserver(false), _oldVoEMicLevel(0), _oldMicLevel(0),
+ _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl() - ctor");
+}
+
+VoEBaseImpl::~VoEBaseImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "~VoEBaseImpl() - dtor");
+
+ TerminateInternal();
+
+ delete &_callbackCritSect;
+}
+
+void VoEBaseImpl::OnErrorIsReported(const ErrorCode error)
+{
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (_voiceEngineObserver)
+ {
+ if (_voiceEngineObserverPtr)
+ {
+ int errCode(0);
+ if (error == AudioDeviceObserver::kRecordingError)
+ {
+ errCode = VE_RUNTIME_REC_ERROR;
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl::OnErrorIsReported() => VE_RUNTIME_REC_ERROR");
+ }
+ else if (error == AudioDeviceObserver::kPlayoutError)
+ {
+ errCode = VE_RUNTIME_PLAY_ERROR;
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl::OnErrorIsReported() => "
+ "VE_RUNTIME_PLAY_ERROR");
+ }
+ // Deliver callback (-1 <=> no channel dependency)
+ _voiceEngineObserverPtr->CallbackOnError(-1, errCode);
+ }
+ }
+}
+
+void VoEBaseImpl::OnWarningIsReported(const WarningCode warning)
+{
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (_voiceEngineObserver)
+ {
+ if (_voiceEngineObserverPtr)
+ {
+ int warningCode(0);
+ if (warning == AudioDeviceObserver::kRecordingWarning)
+ {
+ warningCode = VE_RUNTIME_REC_WARNING;
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl::OnErrorIsReported() => "
+ "VE_RUNTIME_REC_WARNING");
+ }
+ else if (warning == AudioDeviceObserver::kPlayoutWarning)
+ {
+ warningCode = VE_RUNTIME_PLAY_WARNING;
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl::OnErrorIsReported() => "
+ "VE_RUNTIME_PLAY_WARNING");
+ }
+ // Deliver callback (-1 <=> no channel dependency)
+ _voiceEngineObserverPtr->CallbackOnError(-1, warningCode);
+ }
+ }
+}
+
+WebRtc_Word32 VoEBaseImpl::RecordedDataIsAvailable(
+ const void* audioSamples,
+ const WebRtc_UWord32 nSamples,
+ const WebRtc_UWord8 nBytesPerSample,
+ const WebRtc_UWord8 nChannels,
+ const WebRtc_UWord32 samplesPerSec,
+ const WebRtc_UWord32 totalDelayMS,
+ const WebRtc_Word32 clockDrift,
+ const WebRtc_UWord32 currentMicLevel,
+ WebRtc_UWord32& newMicLevel)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl::RecordedDataIsAvailable(nSamples=%u, "
+ "nBytesPerSample=%u, nChannels=%u, samplesPerSec=%u, "
+ "totalDelayMS=%u, clockDrift=%d, currentMicLevel=%u)",
+ nSamples, nBytesPerSample, nChannels, samplesPerSec,
+ totalDelayMS, clockDrift, currentMicLevel);
+
+ assert(_shared->transmit_mixer() != NULL);
+ assert(_shared->audio_device() != NULL);
+
+ bool isAnalogAGC(false);
+ WebRtc_UWord32 maxVolume(0);
+ WebRtc_UWord16 currentVoEMicLevel(0);
+ WebRtc_UWord32 newVoEMicLevel(0);
+
+ if (_shared->audio_processing() &&
+ (_shared->audio_processing()->gain_control()->mode()
+ == GainControl::kAdaptiveAnalog))
+ {
+ isAnalogAGC = true;
+ }
+
+ // Will only deal with the volume in adaptive analog mode
+ if (isAnalogAGC)
+ {
+ // Scale from ADM to VoE level range
+ if (_shared->audio_device()->MaxMicrophoneVolume(&maxVolume) == 0)
+ {
+ if (0 != maxVolume)
+ {
+ currentVoEMicLevel = (WebRtc_UWord16) ((currentMicLevel
+ * kMaxVolumeLevel + (int) (maxVolume / 2))
+ / (maxVolume));
+ }
+ }
+ // We learned that on certain systems (e.g Linux) the currentVoEMicLevel
+ // can be greater than the maxVolumeLevel therefore
+ // we are going to cap the currentVoEMicLevel to the maxVolumeLevel
+ // and change the maxVolume to currentMicLevel if it turns out that
+ // the currentVoEMicLevel is indeed greater than the maxVolumeLevel.
+ if (currentVoEMicLevel > kMaxVolumeLevel)
+ {
+ currentVoEMicLevel = kMaxVolumeLevel;
+ maxVolume = currentMicLevel;
+ }
+ }
+
+ // Keep track if the MicLevel has been changed by the AGC, if not,
+ // use the old value AGC returns to let AGC continue its trend,
+ // so eventually the AGC is able to change the mic level. This handles
+ // issues with truncation introduced by the scaling.
+ if (_oldMicLevel == currentMicLevel)
+ {
+ currentVoEMicLevel = (WebRtc_UWord16) _oldVoEMicLevel;
+ }
+
+ // Perform channel-independent operations
+ // (APM, mix with file, record to file, mute, etc.)
+ _shared->transmit_mixer()->PrepareDemux(audioSamples, nSamples, nChannels,
+ samplesPerSec, static_cast<WebRtc_UWord16>(totalDelayMS), clockDrift,
+ currentVoEMicLevel);
+
+ // Copy the audio frame to each sending channel and perform
+ // channel-dependent operations (file mixing, mute, etc.) to prepare
+ // for encoding.
+ _shared->transmit_mixer()->DemuxAndMix();
+ // Do the encoding and packetize+transmit the RTP packet when encoding
+ // is done.
+ _shared->transmit_mixer()->EncodeAndSend();
+
+ // Will only deal with the volume in adaptive analog mode
+ if (isAnalogAGC)
+ {
+ // Scale from VoE to ADM level range
+ newVoEMicLevel = _shared->transmit_mixer()->CaptureLevel();
+ if (newVoEMicLevel != currentVoEMicLevel)
+ {
+ // Add (kMaxVolumeLevel/2) to round the value
+ newMicLevel = (WebRtc_UWord32) ((newVoEMicLevel * maxVolume
+ + (int) (kMaxVolumeLevel / 2)) / (kMaxVolumeLevel));
+ }
+ else
+ {
+ // Pass zero if the level is unchanged
+ newMicLevel = 0;
+ }
+
+ // Keep track of the value AGC returns
+ _oldVoEMicLevel = newVoEMicLevel;
+ _oldMicLevel = currentMicLevel;
+ }
+
+ return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::NeedMorePlayData(
+ const WebRtc_UWord32 nSamples,
+ const WebRtc_UWord8 nBytesPerSample,
+ const WebRtc_UWord8 nChannels,
+ const WebRtc_UWord32 samplesPerSec,
+ void* audioSamples,
+ WebRtc_UWord32& nSamplesOut)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl::NeedMorePlayData(nSamples=%u, "
+ "nBytesPerSample=%d, nChannels=%d, samplesPerSec=%u)",
+ nSamples, nBytesPerSample, nChannels, samplesPerSec);
+
+ assert(_shared->output_mixer() != NULL);
+
+ // TODO(andrew): if the device is running in mono, we should tell the mixer
+ // here so that it will only request mono from AudioCodingModule.
+ // Perform mixing of all active participants (channel-based mixing)
+ _shared->output_mixer()->MixActiveChannels();
+
+ // Additional operations on the combined signal
+ _shared->output_mixer()->DoOperationsOnCombinedSignal();
+
+ // Retrieve the final output mix (resampled to match the ADM)
+ _shared->output_mixer()->GetMixedAudio(samplesPerSec, nChannels,
+ &_audioFrame);
+
+ assert(static_cast<int>(nSamples) == _audioFrame.samples_per_channel_);
+ assert(samplesPerSec ==
+ static_cast<WebRtc_UWord32>(_audioFrame.sample_rate_hz_));
+
+ // Deliver audio (PCM) samples to the ADM
+ memcpy(
+ (WebRtc_Word16*) audioSamples,
+ (const WebRtc_Word16*) _audioFrame.data_,
+ sizeof(WebRtc_Word16) * (_audioFrame.samples_per_channel_
+ * _audioFrame.num_channels_));
+
+ nSamplesOut = _audioFrame.samples_per_channel_;
+
+ return 0;
+}
+
+int VoEBaseImpl::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "RegisterVoiceEngineObserver(observer=0x%d)", &observer);
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (_voiceEngineObserverPtr)
+ {
+ _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
+ "RegisterVoiceEngineObserver() observer already enabled");
+ return -1;
+ }
+
+ // Register the observer in all active channels
+ voe::ScopedChannel sc(_shared->channel_manager());
+ void* iterator(NULL);
+ voe::Channel* channelPtr = sc.GetFirstChannel(iterator);
+ while (channelPtr != NULL)
+ {
+ channelPtr->RegisterVoiceEngineObserver(observer);
+ channelPtr = sc.GetNextChannel(iterator);
+ }
+ _shared->transmit_mixer()->RegisterVoiceEngineObserver(observer);
+
+ _voiceEngineObserverPtr = &observer;
+ _voiceEngineObserver = true;
+
+ return 0;
+}
+
+int VoEBaseImpl::DeRegisterVoiceEngineObserver()
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DeRegisterVoiceEngineObserver()");
+ CriticalSectionScoped cs(&_callbackCritSect);
+ if (!_voiceEngineObserverPtr)
+ {
+ _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
+ "DeRegisterVoiceEngineObserver() observer already disabled");
+ return 0;
+ }
+
+ _voiceEngineObserver = false;
+ _voiceEngineObserverPtr = NULL;
+
+ // Deregister the observer in all active channels
+ voe::ScopedChannel sc(_shared->channel_manager());
+ void* iterator(NULL);
+ voe::Channel* channelPtr = sc.GetFirstChannel(iterator);
+ while (channelPtr != NULL)
+ {
+ channelPtr->DeRegisterVoiceEngineObserver();
+ channelPtr = sc.GetNextChannel(iterator);
+ }
+
+ return 0;
+}
+
+int VoEBaseImpl::Init(AudioDeviceModule* external_adm)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "Init(external_adm=0x%p)", external_adm);
+ CriticalSectionScoped cs(_shared->crit_sec());
+
+ WebRtcSpl_Init();
+
+ if (_shared->statistics().Initialized())
+ {
+ return 0;
+ }
+
+ if (_shared->process_thread())
+ {
+ if (_shared->process_thread()->Start() != 0)
+ {
+ _shared->SetLastError(VE_THREAD_ERROR, kTraceError,
+ "Init() failed to start module process thread");
+ return -1;
+ }
+ }
+
+ // Create an internal ADM if the user has not added an external
+ // ADM implementation as input to Init().
+ if (external_adm == NULL)
+ {
+ // Create the internal ADM implementation.
+ _shared->set_audio_device(AudioDeviceModuleImpl::Create(
+ VoEId(_shared->instance_id(), -1), _shared->audio_device_layer()));
+
+ if (_shared->audio_device() == NULL)
+ {
+ _shared->SetLastError(VE_NO_MEMORY, kTraceCritical,
+ "Init() failed to create the ADM");
+ return -1;
+ }
+ }
+ else
+ {
+ // Use the already existing external ADM implementation.
+ _shared->set_audio_device(external_adm);
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "An external ADM implementation will be used in VoiceEngine");
+ }
+
+ // Register the ADM to the process thread, which will drive the error
+ // callback mechanism
+ if (_shared->process_thread() &&
+ _shared->process_thread()->RegisterModule(_shared->audio_device()) != 0)
+ {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+ "Init() failed to register the ADM");
+ return -1;
+ }
+
+ bool available(false);
+
+ // --------------------
+ // Reinitialize the ADM
+
+ // Register the AudioObserver implementation
+ if (_shared->audio_device()->RegisterEventObserver(this) != 0) {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
+ "Init() failed to register event observer for the ADM");
+ }
+
+ // Register the AudioTransport implementation
+ if (_shared->audio_device()->RegisterAudioCallback(this) != 0) {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
+ "Init() failed to register audio callback for the ADM");
+ }
+
+ // ADM initialization
+ if (_shared->audio_device()->Init() != 0)
+ {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+ "Init() failed to initialize the ADM");
+ return -1;
+ }
+
+ // Initialize the default speaker
+ if (_shared->audio_device()->SetPlayoutDevice(
+ WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE) != 0)
+ {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceInfo,
+ "Init() failed to set the default output device");
+ }
+ if (_shared->audio_device()->SpeakerIsAvailable(&available) != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL, kTraceInfo,
+ "Init() failed to check speaker availability, trying to "
+ "initialize speaker anyway");
+ }
+ else if (!available)
+ {
+ _shared->SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL, kTraceInfo,
+ "Init() speaker not available, trying to initialize speaker "
+ "anyway");
+ }
+ if (_shared->audio_device()->InitSpeaker() != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL, kTraceInfo,
+ "Init() failed to initialize the speaker");
+ }
+
+ // Initialize the default microphone
+ if (_shared->audio_device()->SetRecordingDevice(
+ WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE) != 0)
+ {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceInfo,
+ "Init() failed to set the default input device");
+ }
+ if (_shared->audio_device()->MicrophoneIsAvailable(&available) != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceInfo,
+ "Init() failed to check microphone availability, trying to "
+ "initialize microphone anyway");
+ }
+ else if (!available)
+ {
+ _shared->SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceInfo,
+ "Init() microphone not available, trying to initialize "
+ "microphone anyway");
+ }
+ if (_shared->audio_device()->InitMicrophone() != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceInfo,
+ "Init() failed to initialize the microphone");
+ }
+
+ // Set number of channels
+ if (_shared->audio_device()->StereoPlayoutIsAvailable(&available) != 0) {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+ "Init() failed to query stereo playout mode");
+ }
+ if (_shared->audio_device()->SetStereoPlayout(available) != 0)
+ {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+ "Init() failed to set mono/stereo playout mode");
+ }
+
+ // TODO(andrew): These functions don't tell us whether stereo recording
+ // is truly available. We simply set the AudioProcessing input to stereo
+ // here, because we have to wait until receiving the first frame to
+ // determine the actual number of channels anyway.
+ //
+ // These functions may be changed; tracked here:
+ // http://code.google.com/p/webrtc/issues/detail?id=204
+ _shared->audio_device()->StereoRecordingIsAvailable(&available);
+ if (_shared->audio_device()->SetStereoRecording(available) != 0)
+ {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+ "Init() failed to set mono/stereo recording mode");
+ }
+
+ // APM initialization done after sound card since we need
+ // to know if we support stereo recording or not.
+
+ // Create the AudioProcessing Module if it does not exist.
+
+ if (_shared->audio_processing() == NULL)
+ {
+ _shared->set_audio_processing(AudioProcessing::Create(
+ VoEId(_shared->instance_id(), -1)));
+ if (_shared->audio_processing() == NULL)
+ {
+ _shared->SetLastError(VE_NO_MEMORY, kTraceCritical,
+ "Init() failed to create the AP module");
+ return -1;
+ }
+ // Ensure that mixers in both directions has access to the created APM
+ _shared->transmit_mixer()->SetAudioProcessingModule(
+ _shared->audio_processing());
+ _shared->output_mixer()->SetAudioProcessingModule(
+ _shared->audio_processing());
+
+ if (_shared->audio_processing()->echo_cancellation()->
+ set_device_sample_rate_hz(
+ kVoiceEngineAudioProcessingDeviceSampleRateHz))
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "Init() failed to set the device sample rate to 48K for AP "
+ " module");
+ return -1;
+ }
+ // Using 8 kHz as inital Fs. Might be changed already at first call.
+ if (_shared->audio_processing()->set_sample_rate_hz(8000))
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "Init() failed to set the sample rate to 8K for AP module");
+ return -1;
+ }
+
+ // Assume mono until the audio frames are received from the capture
+ // device, at which point this can be updated.
+ if (_shared->audio_processing()->set_num_channels(1, 1) != 0)
+ {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceError,
+ "Init() failed to set channels for the primary audio stream");
+ return -1;
+ }
+
+ if (_shared->audio_processing()->set_num_reverse_channels(1) != 0)
+ {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceError,
+ "Init() failed to set channels for the primary audio stream");
+ return -1;
+ }
+ // high-pass filter
+ if (_shared->audio_processing()->high_pass_filter()->Enable(
+ WEBRTC_VOICE_ENGINE_HP_DEFAULT_STATE) != 0)
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "Init() failed to set the high-pass filter for AP module");
+ return -1;
+ }
+ // Echo Cancellation
+ if (_shared->audio_processing()->echo_cancellation()->
+ enable_drift_compensation(false) != 0)
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "Init() failed to set drift compensation for AP module");
+ return -1;
+ }
+ if (_shared->audio_processing()->echo_cancellation()->Enable(
+ WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE))
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "Init() failed to set echo cancellation state for AP module");
+ return -1;
+ }
+ // Noise Reduction
+ if (_shared->audio_processing()->noise_suppression()->set_level(
+ (NoiseSuppression::Level) WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE)
+ != 0)
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "Init() failed to set noise reduction level for AP module");
+ return -1;
+ }
+ if (_shared->audio_processing()->noise_suppression()->Enable(
+ WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE) != 0)
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "Init() failed to set noise reduction state for AP module");
+ return -1;
+ }
+ // Automatic Gain control
+ if (_shared->audio_processing()->gain_control()->
+ set_analog_level_limits(kMinVolumeLevel,kMaxVolumeLevel) != 0)
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "Init() failed to set AGC analog level for AP module");
+ return -1;
+ }
+ if (_shared->audio_processing()->gain_control()->set_mode(
+ (GainControl::Mode) WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE)
+ != 0)
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "Init() failed to set AGC mode for AP module");
+ return -1;
+ }
+ if (_shared->audio_processing()->gain_control()->Enable(
+ WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE)
+ != 0)
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "Init() failed to set AGC state for AP module");
+ return -1;
+ }
+ // VAD
+ if (_shared->audio_processing()->voice_detection()->Enable(
+ WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE)
+ != 0)
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "Init() failed to set VAD state for AP module");
+ return -1;
+ }
+ }
+
+ // Set default AGC mode for the ADM
+#ifdef WEBRTC_VOICE_ENGINE_AGC
+ bool enable(false);
+ if (_shared->audio_processing()->gain_control()->mode()
+ != GainControl::kFixedDigital)
+ {
+ enable = _shared->audio_processing()->gain_control()->is_enabled();
+ // Only set the AGC mode for the ADM when Adaptive AGC mode is selected
+ if (_shared->audio_device()->SetAGC(enable) != 0)
+ {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR,
+ kTraceError, "Init() failed to set default AGC mode in ADM 0");
+ }
+ }
+#endif
+
+ return _shared->statistics().SetInitialized();
+}
+
+int VoEBaseImpl::Terminate()
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "Terminate()");
+ CriticalSectionScoped cs(_shared->crit_sec());
+ return TerminateInternal();
+}
+
+int VoEBaseImpl::MaxNumOfChannels()
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "MaxNumOfChannels()");
+ WebRtc_Word32 maxNumOfChannels =
+ _shared->channel_manager().MaxNumOfChannels();
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "MaxNumOfChannels() => %d", maxNumOfChannels);
+ return (maxNumOfChannels);
+}
+
+int VoEBaseImpl::CreateChannel()
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "CreateChannel()");
+ CriticalSectionScoped cs(_shared->crit_sec());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ WebRtc_Word32 channelId = -1;
+
+ if (!_shared->channel_manager().CreateChannel(channelId))
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_CREATED, kTraceError,
+ "CreateChannel() failed to allocate memory for channel");
+ return -1;
+ }
+
+ bool destroyChannel(false);
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channelId);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_CREATED, kTraceError,
+ "CreateChannel() failed to allocate memory for channel");
+ return -1;
+ }
+ else if (channelPtr->SetEngineInformation(_shared->statistics(),
+ *_shared->output_mixer(),
+ *_shared->transmit_mixer(),
+ *_shared->process_thread(),
+ *_shared->audio_device(),
+ _voiceEngineObserverPtr,
+ &_callbackCritSect) != 0)
+ {
+ destroyChannel = true;
+ _shared->SetLastError(VE_CHANNEL_NOT_CREATED, kTraceError,
+ "CreateChannel() failed to associate engine and channel."
+ " Destroying channel.");
+ }
+ else if (channelPtr->Init() != 0)
+ {
+ destroyChannel = true;
+ _shared->SetLastError(VE_CHANNEL_NOT_CREATED, kTraceError,
+ "CreateChannel() failed to initialize channel. Destroying"
+ " channel.");
+ }
+ }
+ if (destroyChannel)
+ {
+ _shared->channel_manager().DestroyChannel(channelId);
+ return -1;
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "CreateChannel() => %d", channelId);
+ return channelId;
+}
+
+int VoEBaseImpl::DeleteChannel(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DeleteChannel(channel=%d)", channel);
+ CriticalSectionScoped cs(_shared->crit_sec());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DeleteChannel() failed to locate channel");
+ return -1;
+ }
+ }
+
+ if (_shared->channel_manager().DestroyChannel(channel) != 0)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DeleteChannel() failed to destroy channel");
+ return -1;
+ }
+
+ if (StopSend() != 0)
+ {
+ return -1;
+ }
+
+ if (StopPlayout() != 0)
+ {
+ return -1;
+ }
+
+ return 0;
+}
+
+int VoEBaseImpl::SetLocalReceiver(int channel, int port, int RTCPport,
+ const char ipAddr[64],
+ const char multiCastAddr[64])
+{
+ // Inititialize local receive sockets (RTP and RTCP).
+ //
+ // The sockets are always first closed and then created again by this
+ // function call. The created sockets are by default also used for
+ // transmission (unless source port is set in SetSendDestination).
+ //
+ // Note that, sockets can also be created automatically if a user calls
+ // SetSendDestination and StartSend without having called SetLocalReceiver
+ // first. The sockets are then created at the first packet transmission.
+
+ CriticalSectionScoped cs(_shared->crit_sec());
+ if (ipAddr == NULL && multiCastAddr == NULL)
+ {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetLocalReceiver(channel=%d, port=%d, RTCPport=%d)",
+ channel, port, RTCPport);
+ }
+ else if (ipAddr != NULL && multiCastAddr == NULL)
+ {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetLocalReceiver(channel=%d, port=%d, RTCPport=%d, ipAddr=%s)",
+ channel, port, RTCPport, ipAddr);
+ }
+ else if (ipAddr == NULL && multiCastAddr != NULL)
+ {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetLocalReceiver(channel=%d, port=%d, RTCPport=%d, "
+ "multiCastAddr=%s)", channel, port, RTCPport, multiCastAddr);
+ }
+ else
+ {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetLocalReceiver(channel=%d, port=%d, RTCPport=%d, "
+ "ipAddr=%s, multiCastAddr=%s)", channel, port, RTCPport, ipAddr,
+ multiCastAddr);
+ }
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if ((port < 0) || (port > 65535))
+ {
+ _shared->SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+ "SetLocalReceiver() invalid RTP port");
+ return -1;
+ }
+ if (((RTCPport != kVoEDefault) && (RTCPport < 0)) || ((RTCPport
+ != kVoEDefault) && (RTCPport > 65535)))
+ {
+ _shared->SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+ "SetLocalReceiver() invalid RTCP port");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetLocalReceiver() failed to locate channel");
+ return -1;
+ }
+
+ // Cast RTCP port. In the RTP module 0 corresponds to RTP port + 1 in
+ // the module, which is the default.
+ WebRtc_UWord16 rtcpPortUW16(0);
+ if (RTCPport != kVoEDefault)
+ {
+ rtcpPortUW16 = static_cast<WebRtc_UWord16> (RTCPport);
+ }
+
+ return channelPtr->SetLocalReceiver(port, rtcpPortUW16, ipAddr,
+ multiCastAddr);
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED,
+ kTraceWarning, "SetLocalReceiver() VoE is built for external "
+ "transport");
+ return -1;
+#endif
+}
+
+int VoEBaseImpl::GetLocalReceiver(int channel, int& port, int& RTCPport,
+ char ipAddr[64])
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetLocalReceiver(channel=%d, ipAddr[]=?)", channel);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetLocalReceiver() failed to locate channel");
+ return -1;
+ }
+ WebRtc_Word32 ret = channelPtr->GetLocalReceiver(port, RTCPport, ipAddr);
+ if (ipAddr != NULL)
+ {
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetLocalReceiver() => port=%d, RTCPport=%d, ipAddr=%s",
+ port, RTCPport, ipAddr);
+ }
+ else
+ {
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetLocalReceiver() => port=%d, RTCPport=%d", port, RTCPport);
+ }
+ return ret;
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "SetLocalReceiver() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+int VoEBaseImpl::SetSendDestination(int channel, int port, const char* ipaddr,
+ int sourcePort, int RTCPport)
+{
+ WEBRTC_TRACE(
+ kTraceApiCall,
+ kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetSendDestination(channel=%d, port=%d, ipaddr=%s,"
+ "sourcePort=%d, RTCPport=%d)",
+ channel, port, ipaddr, sourcePort, RTCPport);
+ CriticalSectionScoped cs(_shared->crit_sec());
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetSendDestination() failed to locate channel");
+ return -1;
+ }
+ if ((port < 0) || (port > 65535))
+ {
+ _shared->SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+ "SetSendDestination() invalid RTP port");
+ return -1;
+ }
+ if (((RTCPport != kVoEDefault) && (RTCPport < 0)) || ((RTCPport
+ != kVoEDefault) && (RTCPport > 65535)))
+ {
+ _shared->SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+ "SetSendDestination() invalid RTCP port");
+ return -1;
+ }
+ if (((sourcePort != kVoEDefault) && (sourcePort < 0)) || ((sourcePort
+ != kVoEDefault) && (sourcePort > 65535)))
+ {
+ _shared->SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+ "SetSendDestination() invalid source port");
+ return -1;
+ }
+
+ // Cast RTCP port. In the RTP module 0 corresponds to RTP port + 1 in the
+ // module, which is the default.
+ WebRtc_UWord16 rtcpPortUW16(0);
+ if (RTCPport != kVoEDefault)
+ {
+ rtcpPortUW16 = static_cast<WebRtc_UWord16> (RTCPport);
+ WEBRTC_TRACE(
+ kTraceInfo,
+ kTraceVoice,
+ VoEId(_shared->instance_id(), channel),
+ "SetSendDestination() non default RTCP port %u will be "
+ "utilized",
+ rtcpPortUW16);
+ }
+
+ return channelPtr->SetSendDestination(port, ipaddr, sourcePort,
+ rtcpPortUW16);
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "SetSendDestination() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+int VoEBaseImpl::GetSendDestination(int channel, int& port, char ipAddr[64],
+ int& sourcePort, int& RTCPport)
+{
+ WEBRTC_TRACE(
+ kTraceApiCall,
+ kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetSendDestination(channel=%d, ipAddr[]=?, sourcePort=?,"
+ "RTCPport=?)",
+ channel);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetSendDestination() failed to locate channel");
+ return -1;
+ }
+ WebRtc_Word32 ret = channelPtr->GetSendDestination(port, ipAddr,
+ sourcePort, RTCPport);
+ if (ipAddr != NULL)
+ {
+ WEBRTC_TRACE(
+ kTraceStateInfo,
+ kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetSendDestination() => port=%d, RTCPport=%d, ipAddr=%s, "
+ "sourcePort=%d, RTCPport=%d",
+ port, RTCPport, ipAddr, sourcePort, RTCPport);
+ }
+ else
+ {
+ WEBRTC_TRACE(
+ kTraceStateInfo,
+ kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetSendDestination() => port=%d, RTCPport=%d, "
+ "sourcePort=%d, RTCPport=%d",
+ port, RTCPport, sourcePort, RTCPport);
+ }
+ return ret;
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "GetSendDestination() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+int VoEBaseImpl::StartReceive(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartReceive(channel=%d)", channel);
+ CriticalSectionScoped cs(_shared->crit_sec());
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StartReceive() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->StartReceiving();
+}
+
+int VoEBaseImpl::StopReceive(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StopListen(channel=%d)", channel);
+ CriticalSectionScoped cs(_shared->crit_sec());
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetLocalReceiver() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->StopReceiving();
+}
+
+int VoEBaseImpl::StartPlayout(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartPlayout(channel=%d)", channel);
+ CriticalSectionScoped cs(_shared->crit_sec());
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StartPlayout() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->Playing())
+ {
+ return 0;
+ }
+ if (StartPlayout() != 0)
+ {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+ "StartPlayout() failed to start playout");
+ return -1;
+ }
+ return channelPtr->StartPlayout();
+}
+
+int VoEBaseImpl::StopPlayout(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StopPlayout(channel=%d)", channel);
+ CriticalSectionScoped cs(_shared->crit_sec());
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StopPlayout() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->StopPlayout() != 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StopPlayout() failed to stop playout for channel %d", channel);
+ }
+ return StopPlayout();
+}
+
+int VoEBaseImpl::StartSend(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartSend(channel=%d)", channel);
+ CriticalSectionScoped cs(_shared->crit_sec());
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StartSend() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->Sending())
+ {
+ return 0;
+ }
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!channelPtr->ExternalTransport()
+ && !channelPtr->SendSocketsInitialized())
+ {
+ _shared->SetLastError(VE_DESTINATION_NOT_INITED, kTraceError,
+ "StartSend() must set send destination first");
+ return -1;
+ }
+#endif
+ if (StartSend() != 0)
+ {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+ "StartSend() failed to start recording");
+ return -1;
+ }
+ return channelPtr->StartSend();
+}
+
+int VoEBaseImpl::StopSend(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StopSend(channel=%d)", channel);
+ CriticalSectionScoped cs(_shared->crit_sec());
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StopSend() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->StopSend() != 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StopSend() failed to stop sending for channel %d", channel);
+ }
+ return StopSend();
+}
+
+int VoEBaseImpl::GetVersion(char version[1024])
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetVersion(version=?)");
+ assert(kVoiceEngineVersionMaxMessageSize == 1024);
+
+ if (version == NULL)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError);
+ return (-1);
+ }
+
+ char versionBuf[kVoiceEngineVersionMaxMessageSize];
+ char* versionPtr = versionBuf;
+
+ WebRtc_Word32 len = 0;
+ WebRtc_Word32 accLen = 0;
+
+ len = AddVoEVersion(versionPtr);
+ if (len == -1)
+ {
+ return -1;
+ }
+ versionPtr += len;
+ accLen += len;
+ assert(accLen < kVoiceEngineVersionMaxMessageSize);
+
+ len = AddBuildInfo(versionPtr);
+ if (len == -1)
+ {
+ return -1;
+ }
+ versionPtr += len;
+ accLen += len;
+ assert(accLen < kVoiceEngineVersionMaxMessageSize);
+
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+ len = AddExternalTransportBuild(versionPtr);
+ if (len == -1)
+ {
+ return -1;
+ }
+ versionPtr += len;
+ accLen += len;
+ assert(accLen < kVoiceEngineVersionMaxMessageSize);
+#endif
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+ len = AddExternalRecAndPlayoutBuild(versionPtr);
+ if (len == -1)
+ {
+ return -1;
+ }
+ versionPtr += len;
+ accLen += len;
+ assert(accLen < kVoiceEngineVersionMaxMessageSize);
+ #endif
+
+ memcpy(version, versionBuf, accLen);
+ version[accLen] = '\0';
+
+ // to avoid the truncation in the trace, split the string into parts
+ char partOfVersion[256];
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1), "GetVersion() =>");
+ for (int partStart = 0; partStart < accLen;)
+ {
+ memset(partOfVersion, 0, sizeof(partOfVersion));
+ int partEnd = partStart + 180;
+ while (version[partEnd] != '\n' && version[partEnd] != '\0')
+ {
+ partEnd--;
+ }
+ if (partEnd < accLen)
+ {
+ memcpy(partOfVersion, &version[partStart], partEnd - partStart);
+ }
+ else
+ {
+ memcpy(partOfVersion, &version[partStart], accLen - partStart);
+ }
+ partStart = partEnd;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1), "%s", partOfVersion);
+ }
+
+ return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::AddBuildInfo(char* str) const
+{
+ return sprintf(str, "Build: svn:%s %s\n", WEBRTC_SVNREVISION, BUILDINFO);
+}
+
+WebRtc_Word32 VoEBaseImpl::AddVoEVersion(char* str) const
+{
+ return sprintf(str, "VoiceEngine 4.1.0\n");
+}
+
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+WebRtc_Word32 VoEBaseImpl::AddExternalTransportBuild(char* str) const
+{
+ return sprintf(str, "External transport build\n");
+}
+#endif
+
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+WebRtc_Word32 VoEBaseImpl::AddExternalRecAndPlayoutBuild(char* str) const
+{
+ return sprintf(str, "External recording and playout build\n");
+}
+#endif
+
+int VoEBaseImpl::LastError()
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "LastError()");
+ return (_shared->statistics().LastError());
+}
+
+
+int VoEBaseImpl::SetNetEQPlayoutMode(int channel, NetEqModes mode)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetNetEQPlayoutMode(channel=%i, mode=%i)", channel, mode);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetNetEQPlayoutMode() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetNetEQPlayoutMode(mode);
+}
+
+int VoEBaseImpl::GetNetEQPlayoutMode(int channel, NetEqModes& mode)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetNetEQPlayoutMode(channel=%i, mode=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetNetEQPlayoutMode() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetNetEQPlayoutMode(mode);
+}
+
+int VoEBaseImpl::SetNetEQBGNMode(int channel, NetEqBgnModes mode)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetNetEQBGNMode(channel=%i, mode=%i)", channel, mode);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetNetEQBGNMode() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetNetEQBGNMode(mode);
+}
+
+int VoEBaseImpl::GetNetEQBGNMode(int channel, NetEqBgnModes& mode)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetNetEQBGNMode(channel=%i, mode=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetNetEQBGNMode() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetNetEQBGNMode(mode);
+}
+
+int VoEBaseImpl::SetOnHoldStatus(int channel, bool enable, OnHoldModes mode)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetOnHoldStatus(channel=%d, enable=%d, mode=%d)", channel,
+ enable, mode);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetOnHoldStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetOnHoldStatus(enable, mode);
+}
+
+int VoEBaseImpl::GetOnHoldStatus(int channel, bool& enabled, OnHoldModes& mode)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetOnHoldStatus(channel=%d, enabled=?, mode=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetOnHoldStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetOnHoldStatus(enabled, mode);
+}
+
+WebRtc_Word32 VoEBaseImpl::StartPlayout()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl::StartPlayout()");
+ if (_shared->audio_device()->Playing())
+ {
+ return 0;
+ }
+ if (!_shared->ext_playout())
+ {
+ if (_shared->audio_device()->InitPlayout() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartPlayout() failed to initialize playout");
+ return -1;
+ }
+ if (_shared->audio_device()->StartPlayout() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartPlayout() failed to start playout");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::StopPlayout()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl::StopPlayout()");
+
+ WebRtc_Word32 numOfChannels = _shared->channel_manager().NumOfChannels();
+ if (numOfChannels <= 0)
+ {
+ return 0;
+ }
+
+ WebRtc_UWord16 nChannelsPlaying(0);
+ WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
+
+ // Get number of playing channels
+ _shared->channel_manager().GetChannelIds(channelsArray, numOfChannels);
+ for (int i = 0; i < numOfChannels; i++)
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channelsArray[i]);
+ voe::Channel* chPtr = sc.ChannelPtr();
+ if (chPtr)
+ {
+ if (chPtr->Playing())
+ {
+ nChannelsPlaying++;
+ }
+ }
+ }
+ delete[] channelsArray;
+
+ // Stop audio-device playing if no channel is playing out
+ if (nChannelsPlaying == 0)
+ {
+ if (_shared->audio_device()->StopPlayout() != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_STOP_PLAYOUT, kTraceError,
+ "StopPlayout() failed to stop playout");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::StartSend()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl::StartSend()");
+ if (_shared->audio_device()->Recording())
+ {
+ return 0;
+ }
+ if (!_shared->ext_recording())
+ {
+ if (_shared->audio_device()->InitRecording() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartSend() failed to initialize recording");
+ return -1;
+ }
+ if (_shared->audio_device()->StartRecording() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartSend() failed to start recording");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::StopSend()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl::StopSend()");
+
+ if (_shared->NumOfSendingChannels() == 0 &&
+ !_shared->transmit_mixer()->IsRecordingMic())
+ {
+ // Stop audio-device recording if no channel is recording
+ if (_shared->audio_device()->StopRecording() != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_STOP_RECORDING, kTraceError,
+ "StopSend() failed to stop recording");
+ return -1;
+ }
+ _shared->transmit_mixer()->StopSend();
+ }
+
+ return 0;
+}
+
+WebRtc_Word32 VoEBaseImpl::TerminateInternal()
+{
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEBaseImpl::TerminateInternal()");
+
+ // Delete any remaining channel objects
+ WebRtc_Word32 numOfChannels = _shared->channel_manager().NumOfChannels();
+ if (numOfChannels > 0)
+ {
+ WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
+ _shared->channel_manager().GetChannelIds(channelsArray, numOfChannels);
+ for (int i = 0; i < numOfChannels; i++)
+ {
+ DeleteChannel(channelsArray[i]);
+ }
+ delete[] channelsArray;
+ }
+
+ if (_shared->process_thread())
+ {
+ if (_shared->audio_device())
+ {
+ if (_shared->process_thread()->
+ DeRegisterModule(_shared->audio_device()) != 0)
+ {
+ _shared->SetLastError(VE_THREAD_ERROR, kTraceError,
+ "TerminateInternal() failed to deregister ADM");
+ }
+ }
+ if (_shared->process_thread()->Stop() != 0)
+ {
+ _shared->SetLastError(VE_THREAD_ERROR, kTraceError,
+ "TerminateInternal() failed to stop module process thread");
+ }
+ }
+
+ // Audio Device Module
+
+ if (_shared->audio_device() != NULL)
+ {
+ if (_shared->audio_device()->StopPlayout() != 0)
+ {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+ "TerminateInternal() failed to stop playout");
+ }
+ if (_shared->audio_device()->StopRecording() != 0)
+ {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+ "TerminateInternal() failed to stop recording");
+ }
+ if (_shared->audio_device()->RegisterEventObserver(NULL) != 0) {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
+ "TerminateInternal() failed to de-register event observer "
+ "for the ADM");
+ }
+ if (_shared->audio_device()->RegisterAudioCallback(NULL) != 0) {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
+ "TerminateInternal() failed to de-register audio callback "
+ "for the ADM");
+ }
+ if (_shared->audio_device()->Terminate() != 0)
+ {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+ "TerminateInternal() failed to terminate the ADM");
+ }
+
+ _shared->set_audio_device(NULL);
+ }
+
+ // AP module
+
+ if (_shared->audio_processing() != NULL)
+ {
+ _shared->transmit_mixer()->SetAudioProcessingModule(NULL);
+ _shared->set_audio_processing(NULL);
+ }
+
+ return _shared->statistics().SetUnInitialized();
+}
+
+} // namespace webrtc
diff --git a/voice_engine/voe_base_impl.h b/voice_engine/voe_base_impl.h
new file mode 100644
index 0000000..0eb44fa
--- /dev/null
+++ b/voice_engine/voe_base_impl.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_BASE_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_BASE_IMPL_H
+
+#include "voe_base.h"
+
+#include "module_common_types.h"
+#include "shared_data.h"
+
+namespace webrtc
+{
+
+class ProcessThread;
+
+class VoEBaseImpl: public VoEBase,
+ public AudioTransport,
+ public AudioDeviceObserver
+{
+public:
+ virtual int RegisterVoiceEngineObserver(VoiceEngineObserver& observer);
+
+ virtual int DeRegisterVoiceEngineObserver();
+
+ virtual int Init(AudioDeviceModule* external_adm = NULL);
+
+ virtual int Terminate();
+
+ virtual int MaxNumOfChannels();
+
+ virtual int CreateChannel();
+
+ virtual int DeleteChannel(int channel);
+
+ virtual int SetLocalReceiver(int channel, int port,
+ int RTCPport = kVoEDefault,
+ const char ipAddr[64] = NULL,
+ const char multiCastAddr[64] = NULL);
+
+ virtual int GetLocalReceiver(int channel, int& port, int& RTCPport,
+ char ipAddr[64]);
+
+ virtual int SetSendDestination(int channel, int port,
+ const char ipAddr[64],
+ int sourcePort = kVoEDefault,
+ int RTCPport = kVoEDefault);
+
+ virtual int GetSendDestination(int channel,
+ int& port,
+ char ipAddr[64],
+ int& sourcePort,
+ int& RTCPport);
+
+ virtual int StartReceive(int channel);
+
+ virtual int StartPlayout(int channel);
+
+ virtual int StartSend(int channel);
+
+ virtual int StopReceive(int channel);
+
+ virtual int StopPlayout(int channel);
+
+ virtual int StopSend(int channel);
+
+ virtual int SetNetEQPlayoutMode(int channel, NetEqModes mode);
+
+ virtual int GetNetEQPlayoutMode(int channel, NetEqModes& mode);
+
+ virtual int SetNetEQBGNMode(int channel, NetEqBgnModes mode);
+
+ virtual int GetNetEQBGNMode(int channel, NetEqBgnModes& mode);
+
+
+ virtual int SetOnHoldStatus(int channel,
+ bool enable,
+ OnHoldModes mode = kHoldSendAndPlay);
+
+ virtual int GetOnHoldStatus(int channel, bool& enabled, OnHoldModes& mode);
+
+ virtual int GetVersion(char version[1024]);
+
+ virtual int LastError();
+
+ // AudioTransport
+ virtual WebRtc_Word32
+ RecordedDataIsAvailable(const void* audioSamples,
+ const WebRtc_UWord32 nSamples,
+ const WebRtc_UWord8 nBytesPerSample,
+ const WebRtc_UWord8 nChannels,
+ const WebRtc_UWord32 samplesPerSec,
+ const WebRtc_UWord32 totalDelayMS,
+ const WebRtc_Word32 clockDrift,
+ const WebRtc_UWord32 currentMicLevel,
+ WebRtc_UWord32& newMicLevel);
+
+ virtual WebRtc_Word32 NeedMorePlayData(const WebRtc_UWord32 nSamples,
+ const WebRtc_UWord8 nBytesPerSample,
+ const WebRtc_UWord8 nChannels,
+ const WebRtc_UWord32 samplesPerSec,
+ void* audioSamples,
+ WebRtc_UWord32& nSamplesOut);
+
+ // AudioDeviceObserver
+ virtual void OnErrorIsReported(const ErrorCode error);
+ virtual void OnWarningIsReported(const WarningCode warning);
+
+protected:
+ VoEBaseImpl(voe::SharedData* shared);
+ virtual ~VoEBaseImpl();
+
+private:
+ WebRtc_Word32 StartPlayout();
+ WebRtc_Word32 StopPlayout();
+ WebRtc_Word32 StartSend();
+ WebRtc_Word32 StopSend();
+ WebRtc_Word32 TerminateInternal();
+
+ WebRtc_Word32 AddBuildInfo(char* str) const;
+ WebRtc_Word32 AddVoEVersion(char* str) const;
+#ifdef WEBRTC_EXTERNAL_TRANSPORT
+ WebRtc_Word32 AddExternalTransportBuild(char* str) const;
+#endif
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+ WebRtc_Word32 AddExternalRecAndPlayoutBuild(char* str) const;
+#endif
+ VoiceEngineObserver* _voiceEngineObserverPtr;
+ CriticalSectionWrapper& _callbackCritSect;
+
+ bool _voiceEngineObserver;
+ WebRtc_UWord32 _oldVoEMicLevel;
+ WebRtc_UWord32 _oldMicLevel;
+ AudioFrame _audioFrame;
+ voe::SharedData* _shared;
+
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_BASE_IMPL_H
diff --git a/voice_engine/voe_call_report_impl.cc b/voice_engine/voe_call_report_impl.cc
new file mode 100644
index 0000000..778cb81
--- /dev/null
+++ b/voice_engine/voe_call_report_impl.cc
@@ -0,0 +1,411 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_call_report_impl.h"
+
+#include "audio_processing.h"
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "file_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc
+{
+
+VoECallReport* VoECallReport::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+
+VoECallReportImpl::VoECallReportImpl(voe::SharedData* shared) :
+ _file(*FileWrapper::Create()), _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoECallReportImpl() - ctor");
+}
+
+VoECallReportImpl::~VoECallReportImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "~VoECallReportImpl() - dtor");
+ delete &_file;
+}
+
+int VoECallReportImpl::ResetCallReportStatistics(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ResetCallReportStatistics(channel=%d)", channel);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ assert(_shared->audio_processing() != NULL);
+
+ bool echoMode =
+ _shared->audio_processing()->echo_cancellation()->are_metrics_enabled();
+
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ " current AudioProcessingModule echo metric state %d)",
+ echoMode);
+ // Reset the APM statistics
+ if (_shared->audio_processing()->echo_cancellation()->enable_metrics(true)
+ != 0)
+ {
+ _shared->SetLastError(VE_APM_ERROR, kTraceError,
+ "ResetCallReportStatistics() unable to "
+ "set the AudioProcessingModule echo metrics state");
+ return -1;
+ }
+ // Restore metric states
+ _shared->audio_processing()->echo_cancellation()->enable_metrics(echoMode);
+
+ // Reset channel dependent statistics
+ if (channel != -1)
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "ResetCallReportStatistics() failed to locate channel");
+ return -1;
+ }
+ channelPtr->ResetDeadOrAliveCounters();
+ channelPtr->ResetRTCPStatistics();
+ }
+ else
+ {
+ WebRtc_Word32 numOfChannels =
+ _shared->channel_manager().NumOfChannels();
+ if (numOfChannels <= 0)
+ {
+ return 0;
+ }
+ WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
+ _shared->channel_manager().GetChannelIds(channelsArray, numOfChannels);
+ for (int i = 0; i < numOfChannels; i++)
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channelsArray[i]);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr)
+ {
+ channelPtr->ResetDeadOrAliveCounters();
+ channelPtr->ResetRTCPStatistics();
+ }
+ }
+ delete[] channelsArray;
+ }
+
+ return 0;
+}
+
+int VoECallReportImpl::GetEchoMetricSummary(EchoStatistics& stats)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetEchoMetricSummary()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ assert(_shared->audio_processing() != NULL);
+
+ return (GetEchoMetricSummaryInternal(stats));
+}
+
+int VoECallReportImpl::GetEchoMetricSummaryInternal(EchoStatistics& stats)
+{
+ // Retrieve echo metrics from the AudioProcessingModule
+ int ret(0);
+ bool mode(false);
+ EchoCancellation::Metrics metrics;
+
+ // Ensure that echo metrics is enabled
+
+ mode =
+ _shared->audio_processing()->echo_cancellation()->are_metrics_enabled();
+ if (mode != false)
+ {
+ ret = _shared->audio_processing()->echo_cancellation()->
+ GetMetrics(&metrics);
+ if (ret != 0)
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " AudioProcessingModule GetMetrics() => error");
+ }
+ }
+ else
+ {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " AudioProcessingModule echo metrics is not enabled");
+ }
+
+ if ((ret != 0) || (mode == false))
+ {
+ // Mark complete struct as invalid (-100 dB)
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " unable to retrieve echo metrics from the AudioProcessingModule");
+ stats.erl.min = -100;
+ stats.erl.max = -100;
+ stats.erl.average = -100;
+ stats.erle.min = -100;
+ stats.erle.max = -100;
+ stats.erle.average = -100;
+ stats.rerl.min = -100;
+ stats.rerl.max = -100;
+ stats.rerl.average = -100;
+ stats.a_nlp.min = -100;
+ stats.a_nlp.max = -100;
+ stats.a_nlp.average = -100;
+ }
+ else
+ {
+
+ // Deliver output results to user
+ stats.erl.min = metrics.echo_return_loss.minimum;
+ stats.erl.max = metrics.echo_return_loss.maximum;
+ stats.erl.average = metrics.echo_return_loss.average;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1), " erl: min=%d, max=%d, avg=%d",
+ stats.erl.min, stats.erl.max, stats.erl.average);
+
+ stats.erle.min = metrics.echo_return_loss_enhancement.minimum;
+ stats.erle.max = metrics.echo_return_loss_enhancement.maximum;
+ stats.erle.average = metrics.echo_return_loss_enhancement.average;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1), " erle: min=%d, max=%d, avg=%d",
+ stats.erle.min, stats.erle.max, stats.erle.average);
+
+ stats.rerl.min = metrics.residual_echo_return_loss.minimum;
+ stats.rerl.max = metrics.residual_echo_return_loss.maximum;
+ stats.rerl.average = metrics.residual_echo_return_loss.average;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1), " rerl: min=%d, max=%d, avg=%d",
+ stats.rerl.min, stats.rerl.max, stats.rerl.average);
+
+ stats.a_nlp.min = metrics.a_nlp.minimum;
+ stats.a_nlp.max = metrics.a_nlp.maximum;
+ stats.a_nlp.average = metrics.a_nlp.average;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " a_nlp: min=%d, max=%d, avg=%d",
+ stats.a_nlp.min, stats.a_nlp.max, stats.a_nlp.average);
+ }
+ return 0;
+}
+
+int VoECallReportImpl::GetRoundTripTimeSummary(int channel, StatVal& delaysMs)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRoundTripTimeSummary()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRoundTripTimeSummary() failed to locate channel");
+ return -1;
+ }
+
+ return channelPtr->GetRoundTripTimeSummary(delaysMs);
+}
+
+int VoECallReportImpl::GetDeadOrAliveSummary(int channel,
+ int& numOfDeadDetections,
+ int& numOfAliveDetections)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetDeadOrAliveSummary(channel=%d)", channel);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ return (GetDeadOrAliveSummaryInternal(channel, numOfDeadDetections,
+ numOfAliveDetections));
+}
+
+int VoECallReportImpl::GetDeadOrAliveSummaryInternal(int channel,
+ int& numOfDeadDetections,
+ int& numOfAliveDetections)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetDeadOrAliveSummary(channel=%d)", channel);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRoundTripTimeSummary() failed to locate channel");
+ return -1;
+ }
+
+ return channelPtr->GetDeadOrAliveCounters(numOfDeadDetections,
+ numOfAliveDetections);
+}
+
+int VoECallReportImpl::WriteReportToFile(const char* fileNameUTF8)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "WriteReportToFile(fileNameUTF8=%s)", fileNameUTF8);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ if (NULL == fileNameUTF8)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "WriteReportToFile() invalid filename");
+ return -1;
+ }
+
+ if (_file.Open())
+ {
+ _file.CloseFile();
+ }
+
+ // Open text file in write mode
+ if (_file.OpenFile(fileNameUTF8, false, false, true) != 0)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "WriteReportToFile() unable to open the file");
+ return -1;
+ }
+
+ // Summarize information and add it to the open file
+ //
+ _file.WriteText("WebRtc VoiceEngine Call Report\n");
+ _file.WriteText("==============================\n");
+ _file.WriteText("\nNetwork Packet Round Trip Time (RTT)\n");
+ _file.WriteText("------------------------------------\n\n");
+
+ WebRtc_Word32 numOfChannels = _shared->channel_manager().NumOfChannels();
+ if (numOfChannels <= 0)
+ {
+ return 0;
+ }
+ WebRtc_Word32* channelsArray = new WebRtc_Word32[numOfChannels];
+ _shared->channel_manager().GetChannelIds(channelsArray, numOfChannels);
+ for (int ch = 0; ch < numOfChannels; ch++)
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channelsArray[ch]);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr)
+ {
+ StatVal delaysMs;
+ _file.WriteText("channel %d:\n", ch);
+ channelPtr->GetRoundTripTimeSummary(delaysMs);
+ _file.WriteText(" min:%5d [ms]\n", delaysMs.min);
+ _file.WriteText(" max:%5d [ms]\n", delaysMs.max);
+ _file.WriteText(" avg:%5d [ms]\n", delaysMs.average);
+ }
+ }
+
+ _file.WriteText("\nDead-or-Alive Connection Detections\n");
+ _file.WriteText("------------------------------------\n\n");
+
+ for (int ch = 0; ch < numOfChannels; ch++)
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channelsArray[ch]);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr)
+ {
+ int nDead(0);
+ int nAlive(0);
+ _file.WriteText("channel %d:\n", ch);
+ GetDeadOrAliveSummary(ch, nDead, nAlive);
+ _file.WriteText(" #dead :%6d\n", nDead);
+ _file.WriteText(" #alive:%6d\n", nAlive);
+ }
+ }
+
+ delete[] channelsArray;
+
+ EchoStatistics echo;
+ GetEchoMetricSummary(echo);
+
+ _file.WriteText("\nEcho Metrics\n");
+ _file.WriteText("------------\n\n");
+
+ _file.WriteText("erl:\n");
+ _file.WriteText(" min:%5d [dB]\n", echo.erl.min);
+ _file.WriteText(" max:%5d [dB]\n", echo.erl.max);
+ _file.WriteText(" avg:%5d [dB]\n", echo.erl.average);
+ _file.WriteText("\nerle:\n");
+ _file.WriteText(" min:%5d [dB]\n", echo.erle.min);
+ _file.WriteText(" max:%5d [dB]\n", echo.erle.max);
+ _file.WriteText(" avg:%5d [dB]\n", echo.erle.average);
+ _file.WriteText("rerl:\n");
+ _file.WriteText(" min:%5d [dB]\n", echo.rerl.min);
+ _file.WriteText(" max:%5d [dB]\n", echo.rerl.max);
+ _file.WriteText(" avg:%5d [dB]\n", echo.rerl.average);
+ _file.WriteText("a_nlp:\n");
+ _file.WriteText(" min:%5d [dB]\n", echo.a_nlp.min);
+ _file.WriteText(" max:%5d [dB]\n", echo.a_nlp.max);
+ _file.WriteText(" avg:%5d [dB]\n", echo.a_nlp.average);
+
+ _file.WriteText("\n<END>");
+
+ _file.Flush();
+ _file.CloseFile();
+
+ return 0;
+}
+
+#endif // WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_call_report_impl.h b/voice_engine/voe_call_report_impl.h
new file mode 100644
index 0000000..fcc708a
--- /dev/null
+++ b/voice_engine/voe_call_report_impl.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_IMPL_H
+
+#include "voe_call_report.h"
+
+#include "shared_data.h"
+
+
+namespace webrtc
+{
+class FileWrapper;
+
+class VoECallReportImpl: public VoECallReport
+{
+public:
+ virtual int ResetCallReportStatistics(int channel);
+
+ virtual int GetEchoMetricSummary(EchoStatistics& stats);
+
+ virtual int GetRoundTripTimeSummary(int channel,
+ StatVal& delaysMs);
+
+ virtual int GetDeadOrAliveSummary(int channel, int& numOfDeadDetections,
+ int& numOfAliveDetections);
+
+ virtual int WriteReportToFile(const char* fileNameUTF8);
+
+protected:
+ VoECallReportImpl(voe::SharedData* shared);
+ virtual ~VoECallReportImpl();
+
+private:
+ int GetDeadOrAliveSummaryInternal(int channel,
+ int& numOfDeadDetections,
+ int& numOfAliveDetections);
+
+ int GetEchoMetricSummaryInternal(EchoStatistics& stats);
+
+ int GetSpeechAndNoiseSummaryInternal(LevelStatistics& stats);
+
+ FileWrapper& _file;
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_CALL_REPORT_IMPL_H
diff --git a/voice_engine/voe_codec_impl.cc b/voice_engine/voe_codec_impl.cc
new file mode 100644
index 0000000..6414efc
--- /dev/null
+++ b/voice_engine/voe_codec_impl.cc
@@ -0,0 +1,662 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_codec_impl.h"
+
+#include "audio_coding_module.h"
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc
+{
+
+VoECodec* VoECodec::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_CODEC_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+
+VoECodecImpl::VoECodecImpl(voe::SharedData* shared) : _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoECodecImpl() - ctor");
+}
+
+VoECodecImpl::~VoECodecImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "~VoECodecImpl() - dtor");
+}
+
+int VoECodecImpl::NumOfCodecs()
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "NumOfCodecs()");
+
+ // Number of supported codecs in the ACM
+ WebRtc_UWord8 nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "NumOfCodecs() => %u", nSupportedCodecs);
+ return (nSupportedCodecs);
+}
+
+int VoECodecImpl::GetCodec(int index, CodecInst& codec)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetCodec(index=%d, codec=?)", index);
+ CodecInst acmCodec;
+ if (AudioCodingModule::Codec(index, (CodecInst&) acmCodec)
+ == -1)
+ {
+ _shared->SetLastError(VE_INVALID_LISTNR, kTraceError,
+ "GetCodec() invalid index");
+ return -1;
+ }
+ ACMToExternalCodecRepresentation(codec, acmCodec);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetCodec() => plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
+ "channels=%d, rate=%d", codec.plname, codec.pacsize,
+ codec.plfreq, codec.pltype, codec.channels, codec.rate);
+ return 0;
+}
+
+int VoECodecImpl::SetSendCodec(int channel, const CodecInst& codec)
+{
+ CodecInst copyCodec;
+ ExternalToACMCodecRepresentation(copyCodec, codec);
+
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetSendCodec(channel=%d, codec)", channel);
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "codec: plname=%s, pacsize=%d, plfreq=%d, pltype=%d, "
+ "channels=%d, rate=%d", codec.plname, codec.pacsize,
+ codec.plfreq, codec.pltype, codec.channels, codec.rate);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ // External sanity checks performed outside the ACM
+ if ((STR_CASE_CMP(copyCodec.plname, "L16") == 0) &&
+ (copyCodec.pacsize >= 960))
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendCodec() invalid L16 packet size");
+ return -1;
+ }
+ if (!STR_CASE_CMP(copyCodec.plname, "CN")
+ || !STR_CASE_CMP(copyCodec.plname, "TELEPHONE-EVENT")
+ || !STR_CASE_CMP(copyCodec.plname, "RED"))
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendCodec() invalid codec name");
+ return -1;
+ }
+ if ((copyCodec.channels != 1) && (copyCodec.channels != 2))
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendCodec() invalid number of channels");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetSendCodec() failed to locate channel");
+ return -1;
+ }
+ if (!AudioCodingModule::IsCodecValid(
+ (CodecInst&) copyCodec))
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendCodec() invalid codec");
+ return -1;
+ }
+ if (channelPtr->SetSendCodec(copyCodec) != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_SET_SEND_CODEC, kTraceError,
+ "SetSendCodec() failed to set send codec");
+ return -1;
+ }
+
+ return 0;
+}
+
+int VoECodecImpl::GetSendCodec(int channel, CodecInst& codec)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSendCodec(channel=%d, codec=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetSendCodec() failed to locate channel");
+ return -1;
+ }
+ CodecInst acmCodec;
+ if (channelPtr->GetSendCodec(acmCodec) != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_GET_SEND_CODEC, kTraceError,
+ "GetSendCodec() failed to get send codec");
+ return -1;
+ }
+ ACMToExternalCodecRepresentation(codec, acmCodec);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetSendCodec() => plname=%s, pacsize=%d, plfreq=%d, "
+ "channels=%d, rate=%d", codec.plname, codec.pacsize,
+ codec.plfreq, codec.channels, codec.rate);
+ return 0;
+}
+
+int VoECodecImpl::GetRecCodec(int channel, CodecInst& codec)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRecCodec(channel=%d, codec=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRecCodec() failed to locate channel");
+ return -1;
+ }
+ CodecInst acmCodec;
+ if (channelPtr->GetRecCodec(acmCodec) != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_GET_REC_CODEC, kTraceError,
+ "GetRecCodec() failed to get received codec");
+ return -1;
+ }
+ ACMToExternalCodecRepresentation(codec, acmCodec);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetRecCodec() => plname=%s, pacsize=%d, plfreq=%d, "
+ "channels=%d, rate=%d", codec.plname, codec.pacsize,
+ codec.plfreq, codec.channels, codec.rate);
+ return 0;
+}
+
+int VoECodecImpl::SetAMREncFormat(int channel, AmrMode mode)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetAMREncFormat(channel=%d, mode=%d)", channel, mode);
+#ifdef WEBRTC_CODEC_AMR
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetAMREncFormat() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetAMREncFormat(mode);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetAMREncFormat() AMR codec is not supported");
+ return -1;
+#endif
+}
+
+int VoECodecImpl::SetAMRDecFormat(int channel, AmrMode mode)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetAMRDecFormat(channel=%i, mode=%i)", channel, mode);
+#ifdef WEBRTC_CODEC_AMR
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetAMRDecFormat() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetAMRDecFormat(mode);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetAMRDecFormat() AMR codec is not supported");
+ return -1;
+#endif
+}
+
+int VoECodecImpl::SetAMRWbEncFormat(int channel, AmrMode mode)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetAMRWbEncFormat(channel=%d, mode=%d)", channel, mode);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+#ifdef WEBRTC_CODEC_AMRWB
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetAMRWbEncFormat() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetAMRWbEncFormat(mode);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetAMRWbEncFormat() AMR-wb codec is not supported");
+ return -1;
+#endif
+}
+
+int VoECodecImpl::SetAMRWbDecFormat(int channel, AmrMode mode)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetAMRWbDecFormat(channel=%i, mode=%i)", channel, mode);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+#ifdef WEBRTC_CODEC_AMRWB
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetAMRWbDecFormat() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetAMRWbDecFormat(mode);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetAMRWbDecFormat() AMR-wb codec is not supported");
+ return -1;
+#endif
+}
+
+int VoECodecImpl::SetRecPayloadType(int channel, const CodecInst& codec)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetRecPayloadType(channel=%d, codec)", channel);
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "codec: plname=%s, plfreq=%d, pltype=%d, channels=%u, "
+ "pacsize=%d, rate=%d", codec.plname, codec.plfreq, codec.pltype,
+ codec.channels, codec.pacsize, codec.rate);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRecPayloadType() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetRecPayloadType(codec);
+}
+
+int VoECodecImpl::GetRecPayloadType(int channel, CodecInst& codec)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRecPayloadType(channel=%d, codec)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRecPayloadType() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRecPayloadType(codec);
+}
+
+int VoECodecImpl::SetSendCNPayloadType(int channel, int type,
+ PayloadFrequencies frequency)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetSendCNPayloadType(channel=%d, type=%d, frequency=%d)",
+ channel, type, frequency);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (type < 96 || type > 127)
+ {
+ // Only allow dynamic range: 96 to 127
+ _shared->SetLastError(VE_INVALID_PLTYPE, kTraceError,
+ "SetSendCNPayloadType() invalid payload type");
+ return -1;
+ }
+ if ((frequency != kFreq16000Hz) && (frequency != kFreq32000Hz))
+ {
+ // It is not possible to modify the payload type for CN/8000.
+ // We only allow modification of the CN payload type for CN/16000
+ // and CN/32000.
+ _shared->SetLastError(VE_INVALID_PLFREQ, kTraceError,
+ "SetSendCNPayloadType() invalid payload frequency");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetSendCNPayloadType() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetSendCNPayloadType(type, frequency);
+}
+
+int VoECodecImpl::SetISACInitTargetRate(int channel, int rateBps,
+ bool useFixedFrameSize)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetISACInitTargetRate(channel=%d, rateBps=%d, "
+ "useFixedFrameSize=%d)", channel, rateBps, useFixedFrameSize);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+#ifdef WEBRTC_CODEC_ISAC
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetISACInitTargetRate() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetISACInitTargetRate(rateBps, useFixedFrameSize);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetISACInitTargetRate() iSAC codec is not supported");
+ return -1;
+#endif
+}
+
+int VoECodecImpl::SetISACMaxRate(int channel, int rateBps)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetISACMaxRate(channel=%d, rateBps=%d)", channel, rateBps);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+#ifdef WEBRTC_CODEC_ISAC
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetISACMaxRate() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetISACMaxRate(rateBps);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetISACMaxRate() iSAC codec is not supported");
+ return -1;
+#endif
+}
+
+int VoECodecImpl::SetISACMaxPayloadSize(int channel, int sizeBytes)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetISACMaxPayloadSize(channel=%d, sizeBytes=%d)", channel,
+ sizeBytes);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+#ifdef WEBRTC_CODEC_ISAC
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetISACMaxPayloadSize() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetISACMaxPayloadSize(sizeBytes);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetISACMaxPayloadSize() iSAC codec is not supported");
+ return -1;
+#endif
+ return 0;
+}
+
+int VoECodecImpl::SetVADStatus(int channel, bool enable, VadModes mode,
+ bool disableDTX)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetVADStatus(channel=%i, enable=%i, mode=%i, disableDTX=%i)",
+ channel, enable, mode, disableDTX);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetVADStatus failed to locate channel");
+ return -1;
+ }
+
+ ACMVADMode vadMode(VADNormal);
+ switch (mode)
+ {
+ case kVadConventional:
+ vadMode = VADNormal;
+ break;
+ case kVadAggressiveLow:
+ vadMode = VADLowBitrate;
+ break;
+ case kVadAggressiveMid:
+ vadMode = VADAggr;
+ break;
+ case kVadAggressiveHigh:
+ vadMode = VADVeryAggr;
+ break;
+ }
+ return channelPtr->SetVADStatus(enable, vadMode, disableDTX);
+}
+
+int VoECodecImpl::GetVADStatus(int channel, bool& enabled, VadModes& mode,
+ bool& disabledDTX)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetVADStatus(channel=%i)", channel);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetVADStatus failed to locate channel");
+ return -1;
+ }
+
+ ACMVADMode vadMode;
+ int ret = channelPtr->GetVADStatus(enabled, vadMode, disabledDTX);
+
+ if (ret != 0)
+ {
+ _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
+ "GetVADStatus failed to get VAD mode");
+ return -1;
+ }
+ switch (vadMode)
+ {
+ case VADNormal:
+ mode = kVadConventional;
+ break;
+ case VADLowBitrate:
+ mode = kVadAggressiveLow;
+ break;
+ case VADAggr:
+ mode = kVadAggressiveMid;
+ break;
+ case VADVeryAggr:
+ mode = kVadAggressiveHigh;
+ break;
+ }
+
+ return 0;
+}
+
+void VoECodecImpl::ACMToExternalCodecRepresentation(CodecInst& toInst,
+ const CodecInst& fromInst)
+{
+ toInst = fromInst;
+ if (STR_CASE_CMP(fromInst.plname,"SILK") == 0)
+ {
+ if (fromInst.plfreq == 12000)
+ {
+ if (fromInst.pacsize == 320)
+ {
+ toInst.pacsize = 240;
+ }
+ else if (fromInst.pacsize == 640)
+ {
+ toInst.pacsize = 480;
+ }
+ else if (fromInst.pacsize == 960)
+ {
+ toInst.pacsize = 720;
+ }
+ }
+ else if (fromInst.plfreq == 24000)
+ {
+ if (fromInst.pacsize == 640)
+ {
+ toInst.pacsize = 480;
+ }
+ else if (fromInst.pacsize == 1280)
+ {
+ toInst.pacsize = 960;
+ }
+ else if (fromInst.pacsize == 1920)
+ {
+ toInst.pacsize = 1440;
+ }
+ }
+ }
+}
+
+void VoECodecImpl::ExternalToACMCodecRepresentation(CodecInst& toInst,
+ const CodecInst& fromInst)
+{
+ toInst = fromInst;
+ if (STR_CASE_CMP(fromInst.plname,"SILK") == 0)
+ {
+ if (fromInst.plfreq == 12000)
+ {
+ if (fromInst.pacsize == 240)
+ {
+ toInst.pacsize = 320;
+ }
+ else if (fromInst.pacsize == 480)
+ {
+ toInst.pacsize = 640;
+ }
+ else if (fromInst.pacsize == 720)
+ {
+ toInst.pacsize = 960;
+ }
+ }
+ else if (fromInst.plfreq == 24000)
+ {
+ if (fromInst.pacsize == 480)
+ {
+ toInst.pacsize = 640;
+ }
+ else if (fromInst.pacsize == 960)
+ {
+ toInst.pacsize = 1280;
+ }
+ else if (fromInst.pacsize == 1440)
+ {
+ toInst.pacsize = 1920;
+ }
+ }
+ }
+}
+
+#endif // WEBRTC_VOICE_ENGINE_CODEC_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_codec_impl.h b/voice_engine/voe_codec_impl.h
new file mode 100644
index 0000000..eb955ec
--- /dev/null
+++ b/voice_engine/voe_codec_impl.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_CODEC_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_CODEC_IMPL_H
+
+#include "voe_codec.h"
+
+#include "shared_data.h"
+
+namespace webrtc
+{
+
+class VoECodecImpl: public VoECodec
+{
+public:
+ virtual int NumOfCodecs();
+
+ virtual int GetCodec(int index, CodecInst& codec);
+
+ virtual int SetSendCodec(int channel, const CodecInst& codec);
+
+ virtual int GetSendCodec(int channel, CodecInst& codec);
+
+ virtual int GetRecCodec(int channel, CodecInst& codec);
+
+ virtual int SetAMREncFormat(int channel,
+ AmrMode mode = kRfc3267BwEfficient);
+
+ virtual int SetAMRDecFormat(int channel,
+ AmrMode mode = kRfc3267BwEfficient);
+
+ virtual int SetAMRWbEncFormat(int channel,
+ AmrMode mode = kRfc3267BwEfficient);
+
+ virtual int SetAMRWbDecFormat(int channel,
+ AmrMode mode = kRfc3267BwEfficient);
+
+ virtual int SetSendCNPayloadType(
+ int channel, int type,
+ PayloadFrequencies frequency = kFreq16000Hz);
+
+ virtual int SetRecPayloadType(int channel,
+ const CodecInst& codec);
+
+ virtual int GetRecPayloadType(int channel, CodecInst& codec);
+
+ virtual int SetISACInitTargetRate(int channel,
+ int rateBps,
+ bool useFixedFrameSize = false);
+
+ virtual int SetISACMaxRate(int channel, int rateBps);
+
+ virtual int SetISACMaxPayloadSize(int channel, int sizeBytes);
+
+ virtual int SetVADStatus(int channel,
+ bool enable,
+ VadModes mode = kVadConventional,
+ bool disableDTX = false);
+
+ virtual int GetVADStatus(int channel,
+ bool& enabled,
+ VadModes& mode,
+ bool& disabledDTX);
+
+protected:
+ VoECodecImpl(voe::SharedData* shared);
+ virtual ~VoECodecImpl();
+
+private:
+ void ACMToExternalCodecRepresentation(CodecInst& toInst,
+ const CodecInst& fromInst);
+
+ void ExternalToACMCodecRepresentation(CodecInst& toInst,
+ const CodecInst& fromInst);
+
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_CODEC_IMPL_H
diff --git a/voice_engine/voe_dtmf_impl.cc b/voice_engine/voe_dtmf_impl.cc
new file mode 100644
index 0000000..0581737
--- /dev/null
+++ b/voice_engine/voe_dtmf_impl.cc
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_dtmf_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "output_mixer.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEDtmf* VoEDtmf::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_DTMF_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+
+VoEDtmfImpl::VoEDtmfImpl(voe::SharedData* shared) :
+ _dtmfFeedback(true),
+ _dtmfDirectFeedback(false),
+ _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEDtmfImpl::VoEDtmfImpl() - ctor");
+}
+
+VoEDtmfImpl::~VoEDtmfImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEDtmfImpl::~VoEDtmfImpl() - dtor");
+}
+
+int VoEDtmfImpl::SendTelephoneEvent(int channel,
+ int eventCode,
+ bool outOfBand,
+ int lengthMs,
+ int attenuationDb)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SendTelephoneEvent(channel=%d, eventCode=%d, outOfBand=%d,"
+ "length=%d, attenuationDb=%d)",
+ channel, eventCode, (int)outOfBand, lengthMs, attenuationDb);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SendTelephoneEvent() failed to locate channel");
+ return -1;
+ }
+ if (!channelPtr->Sending())
+ {
+ _shared->SetLastError(VE_NOT_SENDING, kTraceError,
+ "SendTelephoneEvent() sending is not active");
+ return -1;
+ }
+
+ // Sanity check
+ const int maxEventCode = outOfBand ?
+ static_cast<int>(kMaxTelephoneEventCode) :
+ static_cast<int>(kMaxDtmfEventCode);
+ const bool testFailed = ((eventCode < 0) ||
+ (eventCode > maxEventCode) ||
+ (lengthMs < kMinTelephoneEventDuration) ||
+ (lengthMs > kMaxTelephoneEventDuration) ||
+ (attenuationDb < kMinTelephoneEventAttenuation) ||
+ (attenuationDb > kMaxTelephoneEventAttenuation));
+ if (testFailed)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SendTelephoneEvent() invalid parameter(s)");
+ return -1;
+ }
+
+ const bool isDtmf =
+ (eventCode >= 0) && (eventCode <= kMaxDtmfEventCode);
+ const bool playDtmfToneDirect =
+ isDtmf && (_dtmfFeedback && _dtmfDirectFeedback);
+
+ if (playDtmfToneDirect)
+ {
+ // Mute the microphone signal while playing back the tone directly.
+ // This is to reduce the risk of introducing echo from the added output.
+ _shared->transmit_mixer()->UpdateMuteMicrophoneTime(lengthMs);
+
+ // Play out local feedback tone directly (same approach for both inband
+ // and outband).
+ // Reduce the length of the the tone with 80ms to reduce risk of echo.
+ // For non-direct feedback, outband and inband cases are handled
+ // differently.
+ _shared->output_mixer()->PlayDtmfTone(eventCode, lengthMs - 80,
+ attenuationDb);
+ }
+
+ if (outOfBand)
+ {
+ // The RTP/RTCP module will always deliver OnPlayTelephoneEvent when
+ // an event is transmitted. It is up to the VoE to utilize it or not.
+ // This flag ensures that feedback/playout is enabled; however, the
+ // channel object must still parse out the Dtmf events (0-15) from
+ // all possible events (0-255).
+ const bool playDTFMEvent = (_dtmfFeedback && !_dtmfDirectFeedback);
+
+ return channelPtr->SendTelephoneEventOutband(eventCode,
+ lengthMs,
+ attenuationDb,
+ playDTFMEvent);
+ }
+ else
+ {
+ // For Dtmf tones, we want to ensure that inband tones are played out
+ // in sync with the transmitted audio. This flag is utilized by the
+ // channel object to determine if the queued Dtmf e vent shall also
+ // be fed to the output mixer in the same step as input audio is
+ // replaced by inband Dtmf tones.
+ const bool playDTFMEvent =
+ (isDtmf && _dtmfFeedback && !_dtmfDirectFeedback);
+
+ return channelPtr->SendTelephoneEventInband(eventCode,
+ lengthMs,
+ attenuationDb,
+ playDTFMEvent);
+ }
+}
+
+int VoEDtmfImpl::SetSendTelephoneEventPayloadType(int channel,
+ unsigned char type)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetSendTelephoneEventPayloadType(channel=%d, type=%u)",
+ channel, type);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetSendTelephoneEventPayloadType() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetSendTelephoneEventPayloadType(type);
+}
+
+int VoEDtmfImpl::GetSendTelephoneEventPayloadType(int channel,
+ unsigned char& type)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSendTelephoneEventPayloadType(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetSendTelephoneEventPayloadType() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetSendTelephoneEventPayloadType(type);
+}
+
+int VoEDtmfImpl::PlayDtmfTone(int eventCode,
+ int lengthMs,
+ int attenuationDb)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "PlayDtmfTone(eventCode=%d, lengthMs=%d, attenuationDb=%d)",
+ eventCode, lengthMs, attenuationDb);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (!_shared->audio_device()->Playing())
+ {
+ _shared->SetLastError(VE_NOT_PLAYING, kTraceError,
+ "PlayDtmfTone() no channel is playing out");
+ return -1;
+ }
+ if ((eventCode < kMinDtmfEventCode) ||
+ (eventCode > kMaxDtmfEventCode) ||
+ (lengthMs < kMinTelephoneEventDuration) ||
+ (lengthMs > kMaxTelephoneEventDuration) ||
+ (attenuationDb <kMinTelephoneEventAttenuation) ||
+ (attenuationDb > kMaxTelephoneEventAttenuation))
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "PlayDtmfTone() invalid tone parameter(s)");
+ return -1;
+ }
+ return _shared->output_mixer()->PlayDtmfTone(eventCode, lengthMs,
+ attenuationDb);
+}
+
+int VoEDtmfImpl::StartPlayingDtmfTone(int eventCode,
+ int attenuationDb)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartPlayingDtmfTone(eventCode=%d, attenuationDb=%d)",
+ eventCode, attenuationDb);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (!_shared->audio_device()->Playing())
+ {
+ _shared->SetLastError(VE_NOT_PLAYING, kTraceError,
+ "StartPlayingDtmfTone() no channel is playing out");
+ return -1;
+ }
+ if ((eventCode < kMinDtmfEventCode) ||
+ (eventCode > kMaxDtmfEventCode) ||
+ (attenuationDb < kMinTelephoneEventAttenuation) ||
+ (attenuationDb > kMaxTelephoneEventAttenuation))
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "StartPlayingDtmfTone() invalid tone parameter(s)");
+ return -1;
+ }
+ return _shared->output_mixer()->StartPlayingDtmfTone(eventCode,
+ attenuationDb);
+}
+
+int VoEDtmfImpl::StopPlayingDtmfTone()
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StopPlayingDtmfTone()");
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ return _shared->output_mixer()->StopPlayingDtmfTone();
+}
+
+int VoEDtmfImpl::RegisterTelephoneEventDetection(
+ int channel,
+ TelephoneEventDetectionMethods detectionMethod,
+ VoETelephoneEventObserver& observer)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "RegisterTelephoneEventDetection(channel=%d, detectionMethod=%d,"
+ "observer=0x%x)", channel, detectionMethod, &observer);
+#ifdef WEBRTC_DTMF_DETECTION
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "RegisterTelephoneEventDetection() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->RegisterTelephoneEventDetection(detectionMethod,
+ observer);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetTelephoneEventDetectionStatus() Dtmf detection is not supported");
+ return -1;
+#endif
+}
+
+int VoEDtmfImpl::DeRegisterTelephoneEventDetection(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DeRegisterTelephoneEventDetection(channel=%d)", channel);
+#ifdef WEBRTC_DTMF_DETECTION
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DeRegisterTelephoneEventDe tection() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->DeRegisterTelephoneEventDetection();
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "DeRegisterTelephoneEventDetection() Dtmf detection is not supported");
+ return -1;
+#endif
+}
+
+
+int VoEDtmfImpl::GetTelephoneEventDetectionStatus(
+ int channel,
+ bool& enabled,
+ TelephoneEventDetectionMethods& detectionMethod)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetTelephoneEventDetectionStatus(channel=%d)", channel);
+#ifdef WEBRTC_DTMF_DETECTION
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetTelephoneEventDetectionStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetTelephoneEventDetectionStatus(enabled,
+ detectionMethod);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "GetTelephoneEventDetectionStatus() Dtmf detection is not supported");
+ return -1;
+#endif
+}
+
+int VoEDtmfImpl::SetDtmfFeedbackStatus(bool enable, bool directFeedback)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetDtmfFeedbackStatus(enable=%d, directFeeback=%d)",
+ (int)enable, (int)directFeedback);
+
+ CriticalSectionScoped sc(_shared->crit_sec());
+
+ _dtmfFeedback = enable;
+ _dtmfDirectFeedback = directFeedback;
+
+ return 0;
+}
+
+int VoEDtmfImpl::GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetDtmfFeedbackStatus()");
+
+ CriticalSectionScoped sc(_shared->crit_sec());
+
+ enabled = _dtmfFeedback;
+ directFeedback = _dtmfDirectFeedback;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetDtmfFeedbackStatus() => enabled=%d, directFeedback=%d",
+ enabled, directFeedback);
+ return 0;
+}
+
+int VoEDtmfImpl::SetDtmfPlayoutStatus(int channel, bool enable)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetDtmfPlayoutStatus(channel=%d, enable=%d)",
+ channel, enable);
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetDtmfPlayoutStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetDtmfPlayoutStatus(enable);
+}
+
+int VoEDtmfImpl::GetDtmfPlayoutStatus(int channel, bool& enabled)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetDtmfPlayoutStatus(channel=%d, enabled=?)", channel);
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetDtmfPlayoutStatus() failed to locate channel");
+ return -1;
+ }
+ enabled = channelPtr->DtmfPlayoutStatus();
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetDtmfPlayoutStatus() => enabled=%d", enabled);
+ return 0;
+}
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_dtmf_impl.h b/voice_engine/voe_dtmf_impl.h
new file mode 100644
index 0000000..ad3874b
--- /dev/null
+++ b/voice_engine/voe_dtmf_impl.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_DTMF_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_DTMF_IMPL_H
+
+#include "voe_dtmf.h"
+
+#include "shared_data.h"
+
+namespace webrtc
+{
+
+class VoEDtmfImpl : public VoEDtmf
+{
+public:
+ virtual int SendTelephoneEvent(
+ int channel,
+ int eventCode,
+ bool outOfBand = true,
+ int lengthMs = 160,
+ int attenuationDb = 10);
+
+ virtual int SetSendTelephoneEventPayloadType(int channel,
+ unsigned char type);
+
+ virtual int GetSendTelephoneEventPayloadType(int channel,
+ unsigned char& type);
+
+ virtual int SetDtmfFeedbackStatus(bool enable,
+ bool directFeedback = false);
+
+ virtual int GetDtmfFeedbackStatus(bool& enabled, bool& directFeedback);
+
+ virtual int PlayDtmfTone(int eventCode,
+ int lengthMs = 200,
+ int attenuationDb = 10);
+
+ virtual int StartPlayingDtmfTone(int eventCode,
+ int attenuationDb = 10);
+
+ virtual int StopPlayingDtmfTone();
+
+ virtual int RegisterTelephoneEventDetection(
+ int channel,
+ TelephoneEventDetectionMethods detectionMethod,
+ VoETelephoneEventObserver& observer);
+
+ virtual int DeRegisterTelephoneEventDetection(int channel);
+
+ virtual int GetTelephoneEventDetectionStatus(
+ int channel,
+ bool& enabled,
+ TelephoneEventDetectionMethods& detectionMethod);
+
+ virtual int SetDtmfPlayoutStatus(int channel, bool enable);
+
+ virtual int GetDtmfPlayoutStatus(int channel, bool& enabled);
+
+protected:
+ VoEDtmfImpl(voe::SharedData* shared);
+ virtual ~VoEDtmfImpl();
+
+private:
+ bool _dtmfFeedback;
+ bool _dtmfDirectFeedback;
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_DTMF_IMPL_H
diff --git a/voice_engine/voe_encryption_impl.cc b/voice_engine/voe_encryption_impl.cc
new file mode 100644
index 0000000..4ac8ada
--- /dev/null
+++ b/voice_engine/voe_encryption_impl.cc
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_encryption_impl.h"
+
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEEncryption* VoEEncryption::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+
+VoEEncryptionImpl::VoEEncryptionImpl(voe::SharedData* shared) : _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEEncryptionImpl::VoEEncryptionImpl() - ctor");
+}
+
+VoEEncryptionImpl::~VoEEncryptionImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEEncryptionImpl::~VoEEncryptionImpl() - dtor");
+}
+
+int VoEEncryptionImpl::EnableSRTPSend(
+ int channel,
+ CipherTypes cipherType,
+ int cipherKeyLength,
+ AuthenticationTypes authType,
+ int authKeyLength,
+ int authTagLength,
+ SecurityLevels level,
+ const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+ bool useForRTCP)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "EnableSRTPSend(channel=%i, cipherType=%i, cipherKeyLength=%i,"
+ " authType=%i, authKeyLength=%i, authTagLength=%i, level=%i, "
+ "key=?, useForRTCP=%d)",
+ channel, cipherType, cipherKeyLength, authType,
+ authKeyLength, authTagLength, level, useForRTCP);
+#ifdef WEBRTC_SRTP
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "EnableSRTPSend() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->EnableSRTPSend(cipherType,
+ cipherKeyLength,
+ authType,
+ authKeyLength,
+ authTagLength,
+ level,
+ key,
+ useForRTCP);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "EnableSRTPSend() SRTP is not supported");
+ return -1;
+#endif
+}
+
+int VoEEncryptionImpl::DisableSRTPSend(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DisableSRTPSend(channel=%i)",channel);
+#ifdef WEBRTC_SRTP
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DisableSRTPSend() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->DisableSRTPSend();
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "DisableSRTPSend() SRTP is not supported");
+ return -1;
+#endif
+}
+
+int VoEEncryptionImpl::EnableSRTPReceive(
+ int channel,
+ CipherTypes cipherType,
+ int cipherKeyLength,
+ AuthenticationTypes authType,
+ int authKeyLength,
+ int authTagLength,
+ SecurityLevels level,
+ const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+ bool useForRTCP)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "EnableSRTPReceive(channel=%i, cipherType=%i, "
+ "cipherKeyLength=%i, authType=%i, authKeyLength=%i, "
+ "authTagLength=%i, level=%i, key=?, useForRTCP=%d)",
+ channel, cipherType, cipherKeyLength, authType,
+ authKeyLength, authTagLength, level, useForRTCP);
+#ifdef WEBRTC_SRTP
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "EnableSRTPReceive() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->EnableSRTPReceive(cipherType,
+ cipherKeyLength,
+ authType,
+ authKeyLength,
+ authTagLength,
+ level,
+ key,
+ useForRTCP);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "EnableSRTPReceive() SRTP is not supported");
+ return -1;
+#endif
+}
+
+int VoEEncryptionImpl::DisableSRTPReceive(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DisableSRTPReceive(channel=%i)", channel);
+#ifdef WEBRTC_SRTP
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DisableSRTPReceive() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->DisableSRTPReceive();
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "DisableSRTPReceive() SRTP is not supported");
+ return -1;
+#endif
+}
+
+int VoEEncryptionImpl::RegisterExternalEncryption(int channel,
+ Encryption& encryption)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "RegisterExternalEncryption(channel=%d, encryption=0x%x)",
+ channel, &encryption);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "RegisterExternalEncryption() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->RegisterExternalEncryption(encryption);
+}
+
+int VoEEncryptionImpl::DeRegisterExternalEncryption(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DeRegisterExternalEncryption(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DeRegisterExternalEncryption() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->DeRegisterExternalEncryption();
+}
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+
+// EOF
+} // namespace webrtc
diff --git a/voice_engine/voe_encryption_impl.h b/voice_engine/voe_encryption_impl.h
new file mode 100644
index 0000000..76124d4
--- /dev/null
+++ b/voice_engine/voe_encryption_impl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_IMPL_H
+
+#include "voe_encryption.h"
+
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoEEncryptionImpl : public VoEEncryption
+{
+public:
+ // SRTP
+ virtual int EnableSRTPSend(
+ int channel,
+ CipherTypes cipherType,
+ int cipherKeyLength,
+ AuthenticationTypes authType,
+ int authKeyLength,
+ int authTagLength,
+ SecurityLevels level,
+ const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+ bool useForRTCP = false);
+
+ virtual int DisableSRTPSend(int channel);
+
+ virtual int EnableSRTPReceive(
+ int channel,
+ CipherTypes cipherType,
+ int cipherKeyLength,
+ AuthenticationTypes authType,
+ int authKeyLength,
+ int authTagLength,
+ SecurityLevels level,
+ const unsigned char key[kVoiceEngineMaxSrtpKeyLength],
+ bool useForRTCP = false);
+
+ virtual int DisableSRTPReceive(int channel);
+
+ // External encryption
+ virtual int RegisterExternalEncryption(
+ int channel,
+ Encryption& encryption);
+
+ virtual int DeRegisterExternalEncryption(int channel);
+
+protected:
+ VoEEncryptionImpl(voe::SharedData* shared);
+ virtual ~VoEEncryptionImpl();
+
+private:
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // #ifndef WEBRTC_VOICE_ENGINE_VOE_ENCRYPTION_IMPL_H
diff --git a/voice_engine/voe_external_media_impl.cc b/voice_engine/voe_external_media_impl.cc
new file mode 100644
index 0000000..0216023
--- /dev/null
+++ b/voice_engine/voe_external_media_impl.cc
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_external_media_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "output_mixer.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "voice_engine_impl.h"
+#include "voe_errors.h"
+
+namespace webrtc {
+
+VoEExternalMedia* VoEExternalMedia::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+
+VoEExternalMediaImpl::VoEExternalMediaImpl(voe::SharedData* shared)
+ : playout_delay_ms_(0), shared_(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1),
+ "VoEExternalMediaImpl() - ctor");
+}
+
+VoEExternalMediaImpl::~VoEExternalMediaImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(shared_->instance_id(), -1),
+ "~VoEExternalMediaImpl() - dtor");
+}
+
+int VoEExternalMediaImpl::RegisterExternalMediaProcessing(
+ int channel,
+ ProcessingTypes type,
+ VoEMediaProcess& processObject)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
+ "RegisterExternalMediaProcessing(channel=%d, type=%d, "
+ "processObject=0x%x)", channel, type, &processObject);
+ ANDROID_NOT_SUPPORTED(shared_->statistics());
+ IPHONE_NOT_SUPPORTED(shared_->statistics());
+ if (!shared_->statistics().Initialized())
+ {
+ shared_->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ switch (type)
+ {
+ case kPlaybackPerChannel:
+ case kRecordingPerChannel:
+ {
+ voe::ScopedChannel sc(shared_->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "RegisterExternalMediaProcessing() failed to locate "
+ "channel");
+ return -1;
+ }
+ return channelPtr->RegisterExternalMediaProcessing(type,
+ processObject);
+ }
+ case kPlaybackAllChannelsMixed:
+ {
+ return shared_->output_mixer()->RegisterExternalMediaProcessing(
+ processObject);
+ }
+ case kRecordingAllChannelsMixed:
+ case kRecordingPreprocessing:
+ {
+ return shared_->transmit_mixer()->RegisterExternalMediaProcessing(
+ &processObject, type);
+ }
+ }
+ return -1;
+}
+
+int VoEExternalMediaImpl::DeRegisterExternalMediaProcessing(
+ int channel,
+ ProcessingTypes type)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
+ "DeRegisterExternalMediaProcessing(channel=%d)", channel);
+ ANDROID_NOT_SUPPORTED(shared_->statistics());
+ IPHONE_NOT_SUPPORTED(shared_->statistics());
+ if (!shared_->statistics().Initialized())
+ {
+ shared_->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ switch (type)
+ {
+ case kPlaybackPerChannel:
+ case kRecordingPerChannel:
+ {
+ voe::ScopedChannel sc(shared_->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ shared_->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "RegisterExternalMediaProcessing() "
+ "failed to locate channel");
+ return -1;
+ }
+ return channelPtr->DeRegisterExternalMediaProcessing(type);
+ }
+ case kPlaybackAllChannelsMixed:
+ {
+ return shared_->output_mixer()->
+ DeRegisterExternalMediaProcessing();
+ }
+ case kRecordingAllChannelsMixed:
+ case kRecordingPreprocessing:
+ {
+ return shared_->transmit_mixer()->
+ DeRegisterExternalMediaProcessing(type);
+ }
+ }
+ return -1;
+}
+
+int VoEExternalMediaImpl::SetExternalRecordingStatus(bool enable)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
+ "SetExternalRecordingStatus(enable=%d)", enable);
+ ANDROID_NOT_SUPPORTED(shared_->statistics());
+ IPHONE_NOT_SUPPORTED(shared_->statistics());
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+ if (shared_->audio_device()->Recording())
+ {
+ shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
+ "SetExternalRecordingStatus() cannot set state while sending");
+ return -1;
+ }
+ shared_->set_ext_recording(enable);
+ return 0;
+#else
+ shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetExternalRecordingStatus() external recording is not supported");
+ return -1;
+#endif
+}
+
+int VoEExternalMediaImpl::ExternalRecordingInsertData(
+ const WebRtc_Word16 speechData10ms[],
+ int lengthSamples,
+ int samplingFreqHz,
+ int current_delay_ms)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1),
+ "ExternalRecordingInsertData(speechData10ms=0x%x,"
+ " lengthSamples=%u, samplingFreqHz=%d, current_delay_ms=%d)",
+ &speechData10ms[0], lengthSamples, samplingFreqHz,
+ current_delay_ms);
+ ANDROID_NOT_SUPPORTED(shared_->statistics());
+ IPHONE_NOT_SUPPORTED(shared_->statistics());
+
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+ if (!shared_->statistics().Initialized())
+ {
+ shared_->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (!shared_->ext_recording())
+ {
+ shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
+ "ExternalRecordingInsertData() external recording is not enabled");
+ return -1;
+ }
+ if (shared_->NumOfSendingChannels() == 0)
+ {
+ shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
+ "SetExternalRecordingStatus() no channel is sending");
+ return -1;
+ }
+ if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
+ (48000 != samplingFreqHz) && (44000 != samplingFreqHz))
+ {
+ shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetExternalRecordingStatus() invalid sample rate");
+ return -1;
+ }
+ if ((0 == lengthSamples) ||
+ ((lengthSamples % (samplingFreqHz / 100)) != 0))
+ {
+ shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetExternalRecordingStatus() invalid buffer size");
+ return -1;
+ }
+ if (current_delay_ms < 0)
+ {
+ shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetExternalRecordingStatus() invalid delay)");
+ return -1;
+ }
+
+ WebRtc_UWord16 blockSize = samplingFreqHz / 100;
+ WebRtc_UWord32 nBlocks = lengthSamples / blockSize;
+ WebRtc_Word16 totalDelayMS = 0;
+ WebRtc_UWord16 playoutDelayMS = 0;
+
+ for (WebRtc_UWord32 i = 0; i < nBlocks; i++)
+ {
+ if (!shared_->ext_playout())
+ {
+ // Use real playout delay if external playout is not enabled.
+ if (shared_->audio_device()->PlayoutDelay(&playoutDelayMS) != 0) {
+ shared_->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
+ "PlayoutDelay() unable to get the playout delay");
+ }
+ totalDelayMS = current_delay_ms + playoutDelayMS;
+ }
+ else
+ {
+ // Use stored delay value given the last call
+ // to ExternalPlayoutGetData.
+ totalDelayMS = current_delay_ms + playout_delay_ms_;
+ // Compensate for block sizes larger than 10ms
+ totalDelayMS -= (WebRtc_Word16)(i*10);
+ if (totalDelayMS < 0)
+ totalDelayMS = 0;
+ }
+ shared_->transmit_mixer()->PrepareDemux(
+ (const WebRtc_Word8*)(&speechData10ms[i*blockSize]),
+ blockSize,
+ 1,
+ samplingFreqHz,
+ totalDelayMS,
+ 0,
+ 0);
+
+ shared_->transmit_mixer()->DemuxAndMix();
+ shared_->transmit_mixer()->EncodeAndSend();
+ }
+ return 0;
+#else
+ shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "ExternalRecordingInsertData() external recording is not supported");
+ return -1;
+#endif
+}
+
+int VoEExternalMediaImpl::SetExternalPlayoutStatus(bool enable)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(shared_->instance_id(), -1),
+ "SetExternalPlayoutStatus(enable=%d)", enable);
+ ANDROID_NOT_SUPPORTED(shared_->statistics());
+ IPHONE_NOT_SUPPORTED(shared_->statistics());
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+ if (shared_->audio_device()->Playing())
+ {
+ shared_->SetLastError(VE_ALREADY_SENDING, kTraceError,
+ "SetExternalPlayoutStatus() cannot set state while playing");
+ return -1;
+ }
+ shared_->set_ext_playout(enable);
+ return 0;
+#else
+ shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetExternalPlayoutStatus() external playout is not supported");
+ return -1;
+#endif
+}
+
+int VoEExternalMediaImpl::ExternalPlayoutGetData(
+ WebRtc_Word16 speechData10ms[],
+ int samplingFreqHz,
+ int current_delay_ms,
+ int& lengthSamples)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(shared_->instance_id(), -1),
+ "ExternalPlayoutGetData(speechData10ms=0x%x, samplingFreqHz=%d"
+ ", current_delay_ms=%d)", &speechData10ms[0], samplingFreqHz,
+ current_delay_ms);
+ ANDROID_NOT_SUPPORTED(shared_->statistics());
+ IPHONE_NOT_SUPPORTED(shared_->statistics());
+#ifdef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+ if (!shared_->statistics().Initialized())
+ {
+ shared_->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (!shared_->ext_playout())
+ {
+ shared_->SetLastError(VE_INVALID_OPERATION, kTraceError,
+ "ExternalPlayoutGetData() external playout is not enabled");
+ return -1;
+ }
+ if ((16000 != samplingFreqHz) && (32000 != samplingFreqHz) &&
+ (48000 != samplingFreqHz) && (44000 != samplingFreqHz))
+ {
+ shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "ExternalPlayoutGetData() invalid sample rate");
+ return -1;
+ }
+ if (current_delay_ms < 0)
+ {
+ shared_->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "ExternalPlayoutGetData() invalid delay)");
+ return -1;
+ }
+
+ AudioFrame audioFrame;
+
+ // Retrieve mixed output at the specified rate
+ shared_->output_mixer()->MixActiveChannels();
+ shared_->output_mixer()->DoOperationsOnCombinedSignal();
+ shared_->output_mixer()->GetMixedAudio(samplingFreqHz, 1, &audioFrame);
+
+ // Deliver audio (PCM) samples to the external sink
+ memcpy(speechData10ms,
+ audioFrame.data_,
+ sizeof(WebRtc_Word16)*(audioFrame.samples_per_channel_));
+ lengthSamples = audioFrame.samples_per_channel_;
+
+ // Store current playout delay (to be used by ExternalRecordingInsertData).
+ playout_delay_ms_ = current_delay_ms;
+
+ return 0;
+#else
+ shared_->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "ExternalPlayoutGetData() external playout is not supported");
+ return -1;
+#endif
+}
+
+#endif // WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_external_media_impl.h b/voice_engine/voe_external_media_impl.h
new file mode 100644
index 0000000..c922392
--- /dev/null
+++ b/voice_engine/voe_external_media_impl.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_IMPL_H
+
+#include "voe_external_media.h"
+
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoEExternalMediaImpl : public VoEExternalMedia
+{
+public:
+ virtual int RegisterExternalMediaProcessing(
+ int channel,
+ ProcessingTypes type,
+ VoEMediaProcess& processObject);
+
+ virtual int DeRegisterExternalMediaProcessing(
+ int channel,
+ ProcessingTypes type);
+
+ virtual int SetExternalRecordingStatus(bool enable);
+
+ virtual int SetExternalPlayoutStatus(bool enable);
+
+ virtual int ExternalRecordingInsertData(
+ const WebRtc_Word16 speechData10ms[],
+ int lengthSamples,
+ int samplingFreqHz,
+ int current_delay_ms);
+
+ virtual int ExternalPlayoutGetData(WebRtc_Word16 speechData10ms[],
+ int samplingFreqHz,
+ int current_delay_ms,
+ int& lengthSamples);
+
+protected:
+ VoEExternalMediaImpl(voe::SharedData* shared);
+ virtual ~VoEExternalMediaImpl();
+
+private:
+ int playout_delay_ms_;
+ voe::SharedData* shared_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_EXTERNAL_MEDIA_IMPL_H
diff --git a/voice_engine/voe_file_impl.cc b/voice_engine/voe_file_impl.cc
new file mode 100644
index 0000000..8f0061f
--- /dev/null
+++ b/voice_engine/voe_file_impl.cc
@@ -0,0 +1,1419 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_file_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "file_wrapper.h"
+#include "media_file.h"
+#include "output_mixer.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEFile* VoEFile::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_FILE_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_FILE_API
+
+VoEFileImpl::VoEFileImpl(voe::SharedData* shared) : _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEFileImpl::VoEFileImpl() - ctor");
+}
+
+VoEFileImpl::~VoEFileImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEFileImpl::~VoEFileImpl() - dtor");
+}
+
+int VoEFileImpl::StartPlayingFileLocally(
+ int channel,
+ const char fileNameUTF8[1024],
+ bool loop, FileFormats format,
+ float volumeScaling,
+ int startPointMs,
+ int stopPointMs)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartPlayingFileLocally(channel=%d, fileNameUTF8[]=%s, "
+ "loop=%d, format=%d, volumeScaling=%5.3f, startPointMs=%d,"
+ " stopPointMs=%d)",
+ channel, fileNameUTF8, loop, format, volumeScaling,
+ startPointMs, stopPointMs);
+ assert(1024 == FileWrapper::kMaxFileNameSize);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StartPlayingFileLocally() failed to locate channel");
+ return -1;
+ }
+
+ return channelPtr->StartPlayingFileLocally(fileNameUTF8,
+ loop,
+ format,
+ startPointMs,
+ volumeScaling,
+ stopPointMs,
+ NULL);
+}
+
+int VoEFileImpl::StartPlayingFileLocally(int channel,
+ InStream* stream,
+ FileFormats format,
+ float volumeScaling,
+ int startPointMs,
+ int stopPointMs)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartPlayingFileLocally(channel=%d, stream, format=%d, "
+ "volumeScaling=%5.3f, startPointMs=%d, stopPointMs=%d)",
+ channel, format, volumeScaling, startPointMs, stopPointMs);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StartPlayingFileLocally() failed to locate channel");
+ return -1;
+ }
+
+ return channelPtr->StartPlayingFileLocally(stream,
+ format,
+ startPointMs,
+ volumeScaling,
+ stopPointMs,
+ NULL);
+}
+
+int VoEFileImpl::StopPlayingFileLocally(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StopPlayingFileLocally()");
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StopPlayingFileLocally() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->StopPlayingFileLocally();
+}
+
+int VoEFileImpl::IsPlayingFileLocally(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "IsPlayingFileLocally(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StopPlayingFileLocally() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->IsPlayingFileLocally();
+}
+
+int VoEFileImpl::ScaleLocalFilePlayout(int channel, float scale)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ScaleLocalFilePlayout(channel=%d, scale=%5.3f)",
+ channel, scale);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StopPlayingFileLocally() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->ScaleLocalFilePlayout(scale);
+}
+
+int VoEFileImpl::StartPlayingFileAsMicrophone(int channel,
+ const char fileNameUTF8[1024],
+ bool loop,
+ bool mixWithMicrophone,
+ FileFormats format,
+ float volumeScaling)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartPlayingFileAsMicrophone(channel=%d, fileNameUTF8=%s, "
+ "loop=%d, mixWithMicrophone=%d, format=%d, "
+ "volumeScaling=%5.3f)",
+ channel, fileNameUTF8, loop, mixWithMicrophone, format,
+ volumeScaling);
+ assert(1024 == FileWrapper::kMaxFileNameSize);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ const WebRtc_UWord32 startPointMs(0);
+ const WebRtc_UWord32 stopPointMs(0);
+
+ if (channel == -1)
+ {
+ int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone(
+ fileNameUTF8,
+ loop,
+ format,
+ startPointMs,
+ volumeScaling,
+ stopPointMs,
+ NULL);
+ if (res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartPlayingFileAsMicrophone() failed to start playing file");
+ return(-1);
+ }
+ else
+ {
+ _shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
+ return(0);
+ }
+ }
+ else
+ {
+ // Add file after demultiplexing <=> affects one channel only
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StartPlayingFileAsMicrophone() failed to locate channel");
+ return -1;
+ }
+
+ int res = channelPtr->StartPlayingFileAsMicrophone(fileNameUTF8,
+ loop,
+ format,
+ startPointMs,
+ volumeScaling,
+ stopPointMs,
+ NULL);
+ if (res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartPlayingFileAsMicrophone() failed to start playing file");
+ return -1;
+ }
+ else
+ {
+ channelPtr->SetMixWithMicStatus(mixWithMicrophone);
+ return 0;
+ }
+ }
+}
+
+int VoEFileImpl::StartPlayingFileAsMicrophone(int channel,
+ InStream* stream,
+ bool mixWithMicrophone,
+ FileFormats format,
+ float volumeScaling)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartPlayingFileAsMicrophone(channel=%d, stream,"
+ " mixWithMicrophone=%d, format=%d, volumeScaling=%5.3f)",
+ channel, mixWithMicrophone, format, volumeScaling);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ const WebRtc_UWord32 startPointMs(0);
+ const WebRtc_UWord32 stopPointMs(0);
+
+ if (channel == -1)
+ {
+ int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone(
+ stream,
+ format,
+ startPointMs,
+ volumeScaling,
+ stopPointMs,
+ NULL);
+ if (res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartPlayingFileAsMicrophone() failed to start "
+ "playing stream");
+ return(-1);
+ }
+ else
+ {
+ _shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
+ return(0);
+ }
+ }
+ else
+ {
+ // Add file after demultiplexing <=> affects one channel only
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StartPlayingFileAsMicrophone() failed to locate channel");
+ return -1;
+ }
+
+ int res = channelPtr->StartPlayingFileAsMicrophone(
+ stream, format, startPointMs, volumeScaling, stopPointMs, NULL);
+ if (res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartPlayingFileAsMicrophone() failed to start "
+ "playing stream");
+ return -1;
+ }
+ else
+ {
+ channelPtr->SetMixWithMicStatus(mixWithMicrophone);
+ return 0;
+ }
+ }
+}
+
+int VoEFileImpl::StopPlayingFileAsMicrophone(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StopPlayingFileAsMicrophone(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (channel == -1)
+ {
+ // Stop adding file before demultiplexing <=> affects all channels
+ return _shared->transmit_mixer()->StopPlayingFileAsMicrophone();
+ }
+ else
+ {
+ // Stop adding file after demultiplexing <=> affects one channel only
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StopPlayingFileAsMicrophone() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->StopPlayingFileAsMicrophone();
+ }
+}
+
+int VoEFileImpl::IsPlayingFileAsMicrophone(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "IsPlayingFileAsMicrophone(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (channel == -1)
+ {
+ return _shared->transmit_mixer()->IsPlayingFileAsMicrophone();
+ }
+ else
+ {
+ // Stop adding file after demultiplexing <=> affects one channel only
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "IsPlayingFileAsMicrophone() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->IsPlayingFileAsMicrophone();
+ }
+}
+
+int VoEFileImpl::ScaleFileAsMicrophonePlayout(int channel, float scale)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ScaleFileAsMicrophonePlayout(channel=%d, scale=%5.3f)",
+ channel, scale);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (channel == -1)
+ {
+ return _shared->transmit_mixer()->ScaleFileAsMicrophonePlayout(scale);
+ }
+ else
+ {
+ // Stop adding file after demultiplexing <=> affects one channel only
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "IsPlayingFileAsMicrophone() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->ScaleFileAsMicrophonePlayout(scale);
+ }
+}
+
+int VoEFileImpl::StartRecordingPlayout(
+ int channel, const char* fileNameUTF8, CodecInst* compression,
+ int maxSizeBytes)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartRecordingPlayout(channel=%d, fileNameUTF8=%s, "
+ "compression, maxSizeBytes=%d)",
+ channel, fileNameUTF8, maxSizeBytes);
+ assert(1024 == FileWrapper::kMaxFileNameSize);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (channel == -1)
+ {
+ return _shared->output_mixer()->StartRecordingPlayout
+ (fileNameUTF8, compression);
+ }
+ else
+ {
+ // Add file after demultiplexing <=> affects one channel only
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StartRecordingPlayout() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->StartRecordingPlayout(fileNameUTF8, compression);
+ }
+}
+
+int VoEFileImpl::StartRecordingPlayout(
+ int channel, OutStream* stream, CodecInst* compression)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartRecordingPlayout(channel=%d, stream, compression)",
+ channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (channel == -1)
+ {
+ return _shared->output_mixer()->
+ StartRecordingPlayout(stream, compression);
+ }
+ else
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StartRecordingPlayout() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->StartRecordingPlayout(stream, compression);
+ }
+}
+
+int VoEFileImpl::StopRecordingPlayout(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StopRecordingPlayout(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (channel == -1)
+ {
+ return _shared->output_mixer()->StopRecordingPlayout();
+ }
+ else
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StopRecordingPlayout() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->StopRecordingPlayout();
+ }
+}
+
+int VoEFileImpl::StartRecordingMicrophone(
+ const char* fileNameUTF8, CodecInst* compression, int maxSizeBytes)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartRecordingMicrophone(fileNameUTF8=%s, compression, "
+ "maxSizeBytes=%d)", fileNameUTF8, maxSizeBytes);
+ assert(1024 == FileWrapper::kMaxFileNameSize);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (_shared->transmit_mixer()->StartRecordingMicrophone(fileNameUTF8,
+ compression))
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartRecordingMicrophone() failed to start recording");
+ return -1;
+ }
+ if (_shared->audio_device()->Recording())
+ {
+ return 0;
+ }
+ if (!_shared->ext_recording())
+ {
+ if (_shared->audio_device()->InitRecording() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartRecordingMicrophone() failed to initialize recording");
+ return -1;
+ }
+ if (_shared->audio_device()->StartRecording() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartRecordingMicrophone() failed to start recording");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int VoEFileImpl::StartRecordingMicrophone(
+ OutStream* stream, CodecInst* compression)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartRecordingMicrophone(stream, compression)");
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (_shared->transmit_mixer()->StartRecordingMicrophone(stream,
+ compression) == -1)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartRecordingMicrophone() failed to start recording");
+ return -1;
+ }
+ if (_shared->audio_device()->Recording())
+ {
+ return 0;
+ }
+ if (!_shared->ext_recording())
+ {
+ if (_shared->audio_device()->InitRecording() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartRecordingMicrophone() failed to initialize recording");
+ return -1;
+ }
+ if (_shared->audio_device()->StartRecording() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StartRecordingMicrophone() failed to start recording");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int VoEFileImpl::StopRecordingMicrophone()
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StopRecordingMicrophone()");
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ int err = 0;
+
+ // TODO(xians): consider removing Start/StopRecording() in
+ // Start/StopRecordingMicrophone() if no channel is recording.
+ if (_shared->NumOfSendingChannels() == 0 &&
+ _shared->audio_device()->Recording())
+ {
+ // Stop audio-device recording if no channel is recording
+ if (_shared->audio_device()->StopRecording() != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_STOP_RECORDING, kTraceError,
+ "StopRecordingMicrophone() failed to stop recording");
+ err = -1;
+ }
+ }
+
+ if (_shared->transmit_mixer()->StopRecordingMicrophone() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "StopRecordingMicrophone() failed to stop recording to mixer");
+ err = -1;
+ }
+
+ return err;
+}
+
+// TODO(andrew): a cursory inspection suggests there's a large amount of
+// overlap in these convert functions which could be refactored to a helper.
+int VoEFileImpl::ConvertPCMToWAV(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToWAV(fileNameInUTF8=%s, fileNameOutUTF8=%s)",
+ fileNameInUTF8, fileNameOutUTF8);
+
+ // Create file player object
+ FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(
+ -1,
+ kFileFormatPcm16kHzFile));
+
+ int res=playerObj.StartPlayingFile(fileNameInUTF8,false,0,1.0,0,0, NULL);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertPCMToWAV failed to create player object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ return -1;
+ }
+
+ // Create file recorder object
+ FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+ -1, kFileFormatWavFile));
+
+ CodecInst codecInst;
+ strncpy(codecInst.plname,"L16",32);
+ codecInst.channels = 1;
+ codecInst.rate = 256000;
+ codecInst.plfreq = 16000;
+ codecInst.pltype = 94;
+ codecInst.pacsize = 160;
+
+ res = recObj.StartRecordingAudioFile(fileNameOutUTF8,codecInst,0);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertPCMToWAV failed to create recorder object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ recObj.StopRecording();
+ FileRecorder::DestroyFileRecorder(&recObj);
+ return -1;
+ }
+
+ // Run throught the file
+ AudioFrame audioFrame;
+ WebRtc_Word16 decodedData[160];
+ int decLength=0;
+ const WebRtc_UWord32 frequency = 16000;
+
+ while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+ {
+ if(decLength!=frequency/100)
+ {
+ // This is an OK way to end
+ break;
+ }
+
+ res=audioFrame.UpdateFrame(-1, 0, decodedData,
+ (WebRtc_UWord16)decLength,
+ frequency, AudioFrame::kNormalSpeech,
+ AudioFrame::kVadActive);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToWAV failed during conversion (audio frame)");
+ break;
+ }
+
+ res=recObj.RecordAudioToFile(audioFrame);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToWAV failed during conversion (write frame)");
+ }
+ }
+
+ playerObj.StopPlayingFile();
+ recObj.StopRecording();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ FileRecorder::DestroyFileRecorder(&recObj);
+
+ return res;
+}
+
+int VoEFileImpl::ConvertPCMToWAV(InStream* streamIn, OutStream* streamOut)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToWAV(streamIn, streamOut)");
+
+ if ((streamIn == NULL) || (streamOut == NULL))
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1), "invalid stream handles");
+ return (-1);
+ }
+
+ // Create file player object
+ FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(-1,
+ kFileFormatPcm16kHzFile));
+ int res = playerObj.StartPlayingFile(*streamIn,0,1.0,0,0,NULL);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertPCMToWAV failed to create player object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ return -1;
+ }
+
+ // Create file recorder object
+ FileRecorder& recObj(*FileRecorder::CreateFileRecorder(-1,
+ kFileFormatWavFile));
+ CodecInst codecInst;
+ strncpy(codecInst.plname, "L16", 32);
+ codecInst.channels = 1;
+ codecInst.rate = 256000;
+ codecInst.plfreq = 16000;
+ codecInst.pltype = 94;
+ codecInst.pacsize = 160;
+ res = recObj.StartRecordingAudioFile(*streamOut,codecInst,0);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertPCMToWAV failed to create recorder object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ recObj.StopRecording();
+ FileRecorder::DestroyFileRecorder(&recObj);
+ return -1;
+ }
+
+ // Run throught the file
+ AudioFrame audioFrame;
+ WebRtc_Word16 decodedData[160];
+ int decLength=0;
+ const WebRtc_UWord32 frequency = 16000;
+
+ while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+ {
+ if(decLength!=frequency/100)
+ {
+ // This is an OK way to end
+ break;
+ }
+
+ res=audioFrame.UpdateFrame(-1, 0, decodedData,
+ (WebRtc_UWord16)decLength, frequency,
+ AudioFrame::kNormalSpeech,
+ AudioFrame::kVadActive);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToWAV failed during conversion "
+ "(create audio frame)");
+ break;
+ }
+
+ res=recObj.RecordAudioToFile(audioFrame);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToWAV failed during conversion (write frame)");
+ }
+ }
+
+ playerObj.StopPlayingFile();
+ recObj.StopRecording();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ FileRecorder::DestroyFileRecorder(&recObj);
+
+ return res;
+}
+
+int VoEFileImpl::ConvertWAVToPCM(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ConvertWAVToPCM(fileNameInUTF8=%s, fileNameOutUTF8=%s)",
+ fileNameInUTF8, fileNameOutUTF8);
+
+ // Create file player object
+ FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(-1,
+ kFileFormatWavFile));
+ int res = playerObj.StartPlayingFile(fileNameInUTF8,false,0,1.0,0,0,NULL);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertWAVToPCM failed to create player object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ return -1;
+ }
+
+ // Create file recorder object
+ FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+ -1, kFileFormatPcm16kHzFile));
+
+ CodecInst codecInst;
+ strncpy(codecInst.plname,"L16",32);
+ codecInst.channels = 1;
+ codecInst.rate = 256000;
+ codecInst.plfreq = 16000;
+ codecInst.pltype = 94;
+ codecInst.pacsize = 160;
+
+ res = recObj.StartRecordingAudioFile(fileNameOutUTF8,codecInst,0);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertWAVToPCM failed to create recorder object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ recObj.StopRecording();
+ FileRecorder::DestroyFileRecorder(&recObj);
+ return -1;
+ }
+
+ // Run throught the file
+ AudioFrame audioFrame;
+ WebRtc_Word16 decodedData[160];
+ int decLength=0;
+ const WebRtc_UWord32 frequency = 16000;
+
+ while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+ {
+ if(decLength!=frequency/100)
+ {
+ // This is an OK way to end
+ break;
+ }
+
+ res=audioFrame.UpdateFrame(-1, 0, decodedData,
+ (WebRtc_UWord16)decLength,
+ frequency, AudioFrame::kNormalSpeech,
+ AudioFrame::kVadActive);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertWAVToPCM failed during conversion (audio frame)");
+ break;
+ }
+
+ res=recObj.RecordAudioToFile(audioFrame);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertWAVToPCM failed during conversion (write frame)");
+ }
+ }
+
+ playerObj.StopPlayingFile();
+ recObj.StopRecording();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ FileRecorder::DestroyFileRecorder(&recObj);
+
+ return res;
+}
+
+int VoEFileImpl::ConvertWAVToPCM(InStream* streamIn, OutStream* streamOut)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ConvertWAVToPCM(streamIn, streamOut)");
+
+ if ((streamIn == NULL) || (streamOut == NULL))
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1), "invalid stream handles");
+ return (-1);
+ }
+
+ // Create file player object
+ FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(-1,
+ kFileFormatWavFile));
+ int res = playerObj.StartPlayingFile(*streamIn,0,1.0,0,0,NULL);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertWAVToPCM failed to create player object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ return -1;
+ }
+
+ // Create file recorder object
+ FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+ -1, kFileFormatPcm16kHzFile));
+
+ CodecInst codecInst;
+ strncpy(codecInst.plname,"L16",32);
+ codecInst.channels = 1;
+ codecInst.rate = 256000;
+ codecInst.plfreq = 16000;
+ codecInst.pltype = 94;
+ codecInst.pacsize = 160;
+
+ res = recObj.StartRecordingAudioFile(*streamOut,codecInst,0);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertWAVToPCM failed to create recorder object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ recObj.StopRecording();
+ FileRecorder::DestroyFileRecorder(&recObj);
+ return -1;
+ }
+
+ // Run throught the file
+ AudioFrame audioFrame;
+ WebRtc_Word16 decodedData[160];
+ int decLength=0;
+ const WebRtc_UWord32 frequency = 16000;
+
+ while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+ {
+ if(decLength!=frequency/100)
+ {
+ // This is an OK way to end
+ break;
+ }
+
+ res=audioFrame.UpdateFrame(-1, 0, decodedData,
+ (WebRtc_UWord16)decLength, frequency,
+ AudioFrame::kNormalSpeech,
+ AudioFrame::kVadActive);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertWAVToPCM failed during conversion (audio frame)");
+ break;
+ }
+
+ res=recObj.RecordAudioToFile(audioFrame);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertWAVToPCM failed during conversion (write frame)");
+ }
+ }
+
+ playerObj.StopPlayingFile();
+ recObj.StopRecording();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ FileRecorder::DestroyFileRecorder(&recObj);
+
+ return res;
+}
+
+int VoEFileImpl::ConvertPCMToCompressed(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8,
+ CodecInst* compression)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToCompressed(fileNameInUTF8=%s, fileNameOutUTF8=%s"
+ ", compression)", fileNameInUTF8, fileNameOutUTF8);
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ " compression: plname=%s, plfreq=%d, pacsize=%d",
+ compression->plname, compression->plfreq,
+ compression->pacsize);
+
+ // Create file player object
+ FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(
+ -1,
+ kFileFormatPcm16kHzFile));
+ int res = playerObj.StartPlayingFile(fileNameInUTF8,false,0,1.0,0,0, NULL);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertPCMToCompressed failed to create player object");
+ // Clean up and shutdown the file player
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ return -1;
+ }
+
+ // Create file recorder object
+ FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+ -1,
+ kFileFormatCompressedFile));
+ res = recObj.StartRecordingAudioFile(fileNameOutUTF8, *compression,0);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertPCMToCompressed failed to create recorder object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ recObj.StopRecording();
+ FileRecorder::DestroyFileRecorder(&recObj);
+ return -1;
+ }
+
+ // Run throught the file
+ AudioFrame audioFrame;
+ WebRtc_Word16 decodedData[160];
+ int decLength=0;
+ const WebRtc_UWord32 frequency = 16000;
+
+ while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+ {
+ if(decLength!=frequency/100)
+ {
+ // This is an OK way to end
+ break;
+ }
+ res=audioFrame.UpdateFrame(-1, 0, decodedData,
+ (WebRtc_UWord16)decLength,
+ frequency, AudioFrame::kNormalSpeech,
+ AudioFrame::kVadActive);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToCompressed failed during conversion "
+ "(audio frame)");
+ break;
+ }
+
+ res=recObj.RecordAudioToFile(audioFrame);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToCompressed failed during conversion "
+ "(write frame)");
+ }
+ }
+
+ playerObj.StopPlayingFile();
+ recObj.StopRecording();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ FileRecorder::DestroyFileRecorder(&recObj);
+
+ return res;
+}
+
+int VoEFileImpl::ConvertPCMToCompressed(InStream* streamIn,
+ OutStream* streamOut,
+ CodecInst* compression)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToCompressed(streamIn, streamOut, compression)");
+
+ if ((streamIn == NULL) || (streamOut == NULL))
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1), "invalid stream handles");
+ return (-1);
+ }
+
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ " compression: plname=%s, plfreq=%d, pacsize=%d",
+ compression->plname, compression->plfreq,
+ compression->pacsize);
+
+ // Create file player object
+ FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(
+ -1, kFileFormatPcm16kHzFile));
+
+ int res = playerObj.StartPlayingFile(*streamIn,0,1.0,0,0,NULL);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertPCMToCompressed failed to create player object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ return -1;
+ }
+
+ // Create file recorder object
+ FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+ -1, kFileFormatCompressedFile));
+ res = recObj.StartRecordingAudioFile(*streamOut,*compression,0);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertPCMToCompressed failed to create recorder object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ recObj.StopRecording();
+ FileRecorder::DestroyFileRecorder(&recObj);
+ return -1;
+ }
+
+ // Run throught the file
+ AudioFrame audioFrame;
+ WebRtc_Word16 decodedData[160];
+ int decLength=0;
+ const WebRtc_UWord32 frequency = 16000;
+
+ while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+ {
+ if(decLength!=frequency/100)
+ {
+ // This is an OK way to end
+ break;
+ }
+ res=audioFrame.UpdateFrame(-1, 0, decodedData,
+ (WebRtc_UWord16)decLength,
+ frequency, AudioFrame::kNormalSpeech,
+ AudioFrame::kVadActive);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToCompressed failed during conversion "
+ "(audio frame)");
+ break;
+ }
+
+ res=recObj.RecordAudioToFile(audioFrame);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertPCMToCompressed failed during conversion "
+ "(write frame)");
+ }
+ }
+
+ playerObj.StopPlayingFile();
+ recObj.StopRecording();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ FileRecorder::DestroyFileRecorder(&recObj);
+
+ return res;
+}
+
+int VoEFileImpl::ConvertCompressedToPCM(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ConvertCompressedToPCM(fileNameInUTF8=%s,"
+ " fileNameOutUTF8=%s)",
+ fileNameInUTF8, fileNameOutUTF8);
+
+ // Create file player object
+ FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(
+ -1, kFileFormatCompressedFile));
+
+ int res = playerObj.StartPlayingFile(fileNameInUTF8,false,0,1.0,0,0,NULL);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertCompressedToPCM failed to create player object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ return -1;
+ }
+
+ // Create file recorder object
+ FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+ -1, kFileFormatPcm16kHzFile));
+
+ CodecInst codecInst;
+ strncpy(codecInst.plname,"L16",32);
+ codecInst.channels = 1;
+ codecInst.rate = 256000;
+ codecInst.plfreq = 16000;
+ codecInst.pltype = 94;
+ codecInst.pacsize = 160;
+
+ res = recObj.StartRecordingAudioFile(fileNameOutUTF8,codecInst,0);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertCompressedToPCM failed to create recorder object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ recObj.StopRecording();
+ FileRecorder::DestroyFileRecorder(&recObj);
+ return -1;
+ }
+
+ // Run throught the file
+ AudioFrame audioFrame;
+ WebRtc_Word16 decodedData[160];
+ int decLength=0;
+ const WebRtc_UWord32 frequency = 16000;
+
+ while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+ {
+ if(decLength!=frequency/100)
+ {
+ // This is an OK way to end
+ break;
+ }
+ res=audioFrame.UpdateFrame(-1, 0, decodedData,
+ (WebRtc_UWord16)decLength,
+ frequency,
+ AudioFrame::kNormalSpeech,
+ AudioFrame::kVadActive);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertCompressedToPCM failed during conversion "
+ "(create audio frame)");
+ break;
+ }
+
+ res=recObj.RecordAudioToFile(audioFrame);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertCompressedToPCM failed during conversion "
+ "(write frame)");
+ }
+ }
+
+ playerObj.StopPlayingFile();
+ recObj.StopRecording();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ FileRecorder::DestroyFileRecorder(&recObj);
+
+ return res;
+}
+
+int VoEFileImpl::ConvertCompressedToPCM(InStream* streamIn,
+ OutStream* streamOut)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ConvertCompressedToPCM(file, file);");
+
+ if ((streamIn == NULL) || (streamOut == NULL))
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1), "invalid stream handles");
+ return (-1);
+ }
+
+ // Create file player object
+ FilePlayer& playerObj(*FilePlayer::CreateFilePlayer(
+ -1, kFileFormatCompressedFile));
+ int res;
+
+ res = playerObj.StartPlayingFile(*streamIn,0,1.0,0,0,NULL);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertCompressedToPCM failed to create player object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ return -1;
+ }
+
+ // Create file recorder object
+ FileRecorder& recObj(*FileRecorder::CreateFileRecorder(
+ -1, kFileFormatPcm16kHzFile));
+
+ CodecInst codecInst;
+ strncpy(codecInst.plname,"L16",32);
+ codecInst.channels = 1;
+ codecInst.rate = 256000;
+ codecInst.plfreq = 16000;
+ codecInst.pltype = 94;
+ codecInst.pacsize = 160;
+
+ res = recObj.StartRecordingAudioFile(*streamOut,codecInst,0);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "ConvertCompressedToPCM failed to create recorder object");
+ playerObj.StopPlayingFile();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ recObj.StopRecording();
+ FileRecorder::DestroyFileRecorder(&recObj);
+ return -1;
+ }
+
+ // Run throught the file
+ AudioFrame audioFrame;
+ WebRtc_Word16 decodedData[160];
+ int decLength=0;
+ const WebRtc_UWord32 frequency = 16000;
+
+ while(!playerObj.Get10msAudioFromFile(decodedData,decLength,frequency))
+ {
+ if(decLength!=frequency/100)
+ {
+ // This is an OK way to end
+ break;
+ }
+ res=audioFrame.UpdateFrame(-1, 0, decodedData,
+ (WebRtc_UWord16)decLength,
+ frequency,
+ AudioFrame::kNormalSpeech,
+ AudioFrame::kVadActive);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertCompressedToPCM failed during conversion "
+ "(audio frame)");
+ break;
+ }
+
+ res=recObj.RecordAudioToFile(audioFrame);
+ if(res)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "ConvertCompressedToPCM failed during conversion "
+ "(write frame)");
+ }
+ }
+
+ playerObj.StopPlayingFile();
+ recObj.StopRecording();
+ FilePlayer::DestroyFilePlayer(&playerObj);
+ FileRecorder::DestroyFileRecorder(&recObj);
+
+ return res;
+}
+
+
+int VoEFileImpl::GetFileDuration(const char* fileNameUTF8,
+ int& durationMs,
+ FileFormats format)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetFileDuration(fileNameUTF8=%s, format=%d)",
+ fileNameUTF8, format);
+
+ // Create a dummy file module for this
+ MediaFile * fileModule=MediaFile::CreateMediaFile(-1);
+
+ // Temp container of the right format
+ WebRtc_UWord32 duration;
+ int res=fileModule->FileDurationMs(fileNameUTF8,duration,format);
+ if (res)
+ {
+ _shared->SetLastError(VE_BAD_FILE, kTraceError,
+ "GetFileDuration() failed measure file duration");
+ return -1;
+ }
+ durationMs = duration;
+ MediaFile::DestroyMediaFile(fileModule);
+ fileModule = NULL;
+
+ return(res);
+}
+
+int VoEFileImpl::GetPlaybackPosition(int channel, int& positionMs)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetPlaybackPosition(channel=%d)", channel);
+
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetPlaybackPosition() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetLocalPlayoutPosition(positionMs);
+}
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_FILE_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_file_impl.h b/voice_engine/voe_file_impl.h
new file mode 100644
index 0000000..dcb5642
--- /dev/null
+++ b/voice_engine/voe_file_impl.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_FILE_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_FILE_IMPL_H
+
+#include "voe_file.h"
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoEFileImpl : public VoEFile
+{
+public:
+ // Playout file locally
+
+ virtual int StartPlayingFileLocally(
+ int channel,
+ const char fileNameUTF8[1024],
+ bool loop = false,
+ FileFormats format = kFileFormatPcm16kHzFile,
+ float volumeScaling = 1.0,
+ int startPointMs = 0,
+ int stopPointMs = 0);
+
+ virtual int StartPlayingFileLocally(
+ int channel,
+ InStream* stream,
+ FileFormats format = kFileFormatPcm16kHzFile,
+ float volumeScaling = 1.0,
+ int startPointMs = 0, int stopPointMs = 0);
+
+ virtual int StopPlayingFileLocally(int channel);
+
+ virtual int IsPlayingFileLocally(int channel);
+
+ virtual int ScaleLocalFilePlayout(int channel, float scale);
+
+ // Use file as microphone input
+
+ virtual int StartPlayingFileAsMicrophone(
+ int channel,
+ const char fileNameUTF8[1024],
+ bool loop = false ,
+ bool mixWithMicrophone = false,
+ FileFormats format = kFileFormatPcm16kHzFile,
+ float volumeScaling = 1.0);
+
+ virtual int StartPlayingFileAsMicrophone(
+ int channel,
+ InStream* stream,
+ bool mixWithMicrophone = false,
+ FileFormats format = kFileFormatPcm16kHzFile,
+ float volumeScaling = 1.0);
+
+ virtual int StopPlayingFileAsMicrophone(int channel);
+
+ virtual int IsPlayingFileAsMicrophone(int channel);
+
+ virtual int ScaleFileAsMicrophonePlayout(int channel, float scale);
+
+ // Record speaker signal to file
+
+ virtual int StartRecordingPlayout(int channel,
+ const char* fileNameUTF8,
+ CodecInst* compression = NULL,
+ int maxSizeBytes = -1);
+
+ virtual int StartRecordingPlayout(int channel,
+ OutStream* stream,
+ CodecInst* compression = NULL);
+
+ virtual int StopRecordingPlayout(int channel);
+
+ // Record microphone signal to file
+
+ virtual int StartRecordingMicrophone(const char* fileNameUTF8,
+ CodecInst* compression = NULL,
+ int maxSizeBytes = -1);
+
+ virtual int StartRecordingMicrophone(OutStream* stream,
+ CodecInst* compression = NULL);
+
+ virtual int StopRecordingMicrophone();
+
+ // Conversion between different file formats
+
+ virtual int ConvertPCMToWAV(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8);
+
+ virtual int ConvertPCMToWAV(InStream* streamIn,
+ OutStream* streamOut);
+
+ virtual int ConvertWAVToPCM(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8);
+
+ virtual int ConvertWAVToPCM(InStream* streamIn,
+ OutStream* streamOut);
+
+ virtual int ConvertPCMToCompressed(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8,
+ CodecInst* compression);
+
+ virtual int ConvertPCMToCompressed(InStream* streamIn,
+ OutStream* streamOut,
+ CodecInst* compression);
+
+ virtual int ConvertCompressedToPCM(const char* fileNameInUTF8,
+ const char* fileNameOutUTF8);
+
+ virtual int ConvertCompressedToPCM(InStream* streamIn,
+ OutStream* streamOut);
+
+ // Misc file functions
+
+ virtual int GetFileDuration(
+ const char* fileNameUTF8,
+ int& durationMs,
+ FileFormats format = kFileFormatPcm16kHzFile);
+
+ virtual int GetPlaybackPosition(int channel, int& positionMs);
+
+protected:
+ VoEFileImpl(voe::SharedData* shared);
+ virtual ~VoEFileImpl();
+
+private:
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_FILE_IMPL_H
+
diff --git a/voice_engine/voe_hardware_impl.cc b/voice_engine/voe_hardware_impl.cc
new file mode 100644
index 0000000..7247a69
--- /dev/null
+++ b/voice_engine/voe_hardware_impl.cc
@@ -0,0 +1,823 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_hardware_impl.h"
+
+#include <cassert>
+
+#include "cpu_wrapper.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc
+{
+
+VoEHardware* VoEHardware::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_HARDWARE_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+
+VoEHardwareImpl::VoEHardwareImpl(voe::SharedData* shared) :
+ _cpu(NULL), _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEHardwareImpl() - ctor");
+
+ _cpu = CpuWrapper::CreateCpu();
+ if (_cpu)
+ {
+ _cpu->CpuUsage(); // init cpu usage
+ }
+}
+
+VoEHardwareImpl::~VoEHardwareImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "~VoEHardwareImpl() - dtor");
+
+ if (_cpu)
+ {
+ delete _cpu;
+ _cpu = NULL;
+ }
+}
+
+int VoEHardwareImpl::SetAudioDeviceLayer(AudioLayers audioLayer)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetAudioDeviceLayer(audioLayer=%d)", audioLayer);
+
+ // Don't allow a change if VoE is initialized
+ if (_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_ALREADY_INITED, kTraceError);
+ return -1;
+ }
+
+ // Map to AudioDeviceModule::AudioLayer
+ AudioDeviceModule::AudioLayer
+ wantedLayer(AudioDeviceModule::kPlatformDefaultAudio);
+ switch (audioLayer)
+ {
+ case kAudioPlatformDefault:
+ // already set above
+ break;
+ case kAudioWindowsCore:
+ wantedLayer = AudioDeviceModule::kWindowsCoreAudio;
+ break;
+ case kAudioWindowsWave:
+ wantedLayer = AudioDeviceModule::kWindowsWaveAudio;
+ break;
+ case kAudioLinuxAlsa:
+ wantedLayer = AudioDeviceModule::kLinuxAlsaAudio;
+ break;
+ case kAudioLinuxPulse:
+ wantedLayer = AudioDeviceModule::kLinuxPulseAudio;
+ break;
+ }
+
+ // Save the audio device layer for Init()
+ _shared->set_audio_device_layer(wantedLayer);
+
+ return 0;
+}
+
+int VoEHardwareImpl::GetAudioDeviceLayer(AudioLayers& audioLayer)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetAudioDeviceLayer(devices=?)");
+
+ // Can always be called regardless of VoE state
+
+ AudioDeviceModule::AudioLayer
+ activeLayer(AudioDeviceModule::kPlatformDefaultAudio);
+
+ if (_shared->audio_device())
+ {
+ // Get active audio layer from ADM
+ if (_shared->audio_device()->ActiveAudioLayer(&activeLayer) != 0)
+ {
+ _shared->SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
+ " Audio Device error");
+ return -1;
+ }
+ }
+ else
+ {
+ // Return VoE's internal layer setting
+ activeLayer = _shared->audio_device_layer();
+ }
+
+ // Map to AudioLayers
+ switch (activeLayer)
+ {
+ case AudioDeviceModule::kPlatformDefaultAudio:
+ audioLayer = kAudioPlatformDefault;
+ break;
+ case AudioDeviceModule::kWindowsCoreAudio:
+ audioLayer = kAudioWindowsCore;
+ break;
+ case AudioDeviceModule::kWindowsWaveAudio:
+ audioLayer = kAudioWindowsWave;
+ break;
+ case AudioDeviceModule::kLinuxAlsaAudio:
+ audioLayer = kAudioLinuxAlsa;
+ break;
+ case AudioDeviceModule::kLinuxPulseAudio:
+ audioLayer = kAudioLinuxPulse;
+ break;
+ default:
+ _shared->SetLastError(VE_UNDEFINED_SC_ERR, kTraceError,
+ " unknown audio layer");
+ }
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " Output: audioLayer=%d", audioLayer);
+
+ return 0;
+}
+int VoEHardwareImpl::GetNumOfRecordingDevices(int& devices)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetNumOfRecordingDevices(devices=?)");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ devices = static_cast<int> (_shared->audio_device()->RecordingDevices());
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1), " Output: devices=%d", devices);
+
+ return 0;
+}
+
+int VoEHardwareImpl::GetNumOfPlayoutDevices(int& devices)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetNumOfPlayoutDevices(devices=?)");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ devices = static_cast<int> (_shared->audio_device()->PlayoutDevices());
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " Output: devices=%d", devices);
+
+ return 0;
+}
+
+int VoEHardwareImpl::GetRecordingDeviceName(int index,
+ char strNameUTF8[128],
+ char strGuidUTF8[128])
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRecordingDeviceName(index=%d)", index);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (strNameUTF8 == NULL)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "GetRecordingDeviceName() invalid argument");
+ return -1;
+ }
+
+ // Note that strGuidUTF8 is allowed to be NULL
+
+ // Init len variable to length of supplied vectors
+ const WebRtc_UWord16 strLen = 128;
+
+ // Check if length has been changed in module
+ assert(strLen == kAdmMaxDeviceNameSize);
+ assert(strLen == kAdmMaxGuidSize);
+
+ char name[strLen];
+ char guid[strLen];
+
+ // Get names from module
+ if (_shared->audio_device()->RecordingDeviceName(index, name, guid) != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
+ "GetRecordingDeviceName() failed to get device name");
+ return -1;
+ }
+
+ // Copy to vectors supplied by user
+ strncpy(strNameUTF8, name, strLen);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " Output: strNameUTF8=%s", strNameUTF8);
+
+ if (strGuidUTF8 != NULL)
+ {
+ strncpy(strGuidUTF8, guid, strLen);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " Output: strGuidUTF8=%s", strGuidUTF8);
+ }
+
+ return 0;
+}
+
+int VoEHardwareImpl::GetPlayoutDeviceName(int index,
+ char strNameUTF8[128],
+ char strGuidUTF8[128])
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetPlayoutDeviceName(index=%d)", index);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (strNameUTF8 == NULL)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "GetPlayoutDeviceName() invalid argument");
+ return -1;
+ }
+
+ // Note that strGuidUTF8 is allowed to be NULL
+
+ // Init len variable to length of supplied vectors
+ const WebRtc_UWord16 strLen = 128;
+
+ // Check if length has been changed in module
+ assert(strLen == kAdmMaxDeviceNameSize);
+ assert(strLen == kAdmMaxGuidSize);
+
+ char name[strLen];
+ char guid[strLen];
+
+ // Get names from module
+ if (_shared->audio_device()->PlayoutDeviceName(index, name, guid) != 0)
+ {
+ _shared->SetLastError(VE_CANNOT_RETRIEVE_DEVICE_NAME, kTraceError,
+ "GetPlayoutDeviceName() failed to get device name");
+ return -1;
+ }
+
+ // Copy to vectors supplied by user
+ strncpy(strNameUTF8, name, strLen);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " Output: strNameUTF8=%s", strNameUTF8);
+
+ if (strGuidUTF8 != NULL)
+ {
+ strncpy(strGuidUTF8, guid, strLen);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " Output: strGuidUTF8=%s", strGuidUTF8);
+ }
+
+ return 0;
+}
+
+int VoEHardwareImpl::SetRecordingDevice(int index,
+ StereoChannel recordingChannel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetRecordingDevice(index=%d, recordingChannel=%d)",
+ index, (int) recordingChannel);
+ CriticalSectionScoped cs(_shared->crit_sec());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+ // TODO(leozwang): Add this api to Android OpenSL ES implementation.
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ bool isRecording(false);
+
+ // Store state about activated recording to be able to restore it after the
+ // recording device has been modified.
+ if (_shared->audio_device()->Recording())
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetRecordingDevice() device is modified while recording"
+ " is active...");
+ isRecording = true;
+ if (_shared->audio_device()->StopRecording() == -1)
+ {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+ "SetRecordingDevice() unable to stop recording");
+ return -1;
+ }
+ }
+
+ // We let the module do the index sanity
+
+ // Set recording channel
+ AudioDeviceModule::ChannelType recCh =
+ AudioDeviceModule::kChannelBoth;
+ switch (recordingChannel)
+ {
+ case kStereoLeft:
+ recCh = AudioDeviceModule::kChannelLeft;
+ break;
+ case kStereoRight:
+ recCh = AudioDeviceModule::kChannelRight;
+ break;
+ case kStereoBoth:
+ // default setting kChannelBoth (<=> mono)
+ break;
+ }
+
+ if (_shared->audio_device()->SetRecordingChannel(recCh) != 0) {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning,
+ "SetRecordingChannel() unable to set the recording channel");
+ }
+
+ // Map indices to unsigned since underlying functions need that
+ WebRtc_UWord16 indexU = static_cast<WebRtc_UWord16> (index);
+
+ WebRtc_Word32 res(0);
+
+ if (index == -1)
+ {
+ res = _shared->audio_device()->SetRecordingDevice(
+ AudioDeviceModule::kDefaultCommunicationDevice);
+ }
+ else if (index == -2)
+ {
+ res = _shared->audio_device()->SetRecordingDevice(
+ AudioDeviceModule::kDefaultDevice);
+ }
+ else
+ {
+ res = _shared->audio_device()->SetRecordingDevice(indexU);
+ }
+
+ if (res != 0)
+ {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+ "SetRecordingDevice() unable to set the recording device");
+ return -1;
+ }
+
+ // Init microphone, so user can do volume settings etc
+ if (_shared->audio_device()->InitMicrophone() == -1)
+ {
+ _shared->SetLastError(VE_CANNOT_ACCESS_MIC_VOL, kTraceWarning,
+ "SetRecordingDevice() cannot access microphone");
+ }
+
+ // Set number of channels
+ bool available = false;
+ if (_shared->audio_device()->StereoRecordingIsAvailable(&available) != 0) {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+ "StereoRecordingIsAvailable() failed to query stereo recording");
+ }
+
+ if (_shared->audio_device()->SetStereoRecording(available) != 0)
+ {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+ "SetRecordingDevice() failed to set mono recording mode");
+ }
+
+ // Restore recording if it was enabled already when calling this function.
+ if (isRecording)
+ {
+ if (!_shared->ext_recording())
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetRecordingDevice() recording is now being restored...");
+ if (_shared->audio_device()->InitRecording() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetRecordingDevice() failed to initialize recording");
+ return -1;
+ }
+ if (_shared->audio_device()->StartRecording() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetRecordingDevice() failed to start recording");
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int VoEHardwareImpl::SetPlayoutDevice(int index)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetPlayoutDevice(index=%d)", index);
+ CriticalSectionScoped cs(_shared->crit_sec());
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ bool isPlaying(false);
+
+ // Store state about activated playout to be able to restore it after the
+ // playout device has been modified.
+ if (_shared->audio_device()->Playing())
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetPlayoutDevice() device is modified while playout is "
+ "active...");
+ isPlaying = true;
+ if (_shared->audio_device()->StopPlayout() == -1)
+ {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+ "SetPlayoutDevice() unable to stop playout");
+ return -1;
+ }
+ }
+
+ // We let the module do the index sanity
+
+ // Map indices to unsigned since underlying functions need that
+ WebRtc_UWord16 indexU = static_cast<WebRtc_UWord16> (index);
+
+ WebRtc_Word32 res(0);
+
+ if (index == -1)
+ {
+ res = _shared->audio_device()->SetPlayoutDevice(
+ AudioDeviceModule::kDefaultCommunicationDevice);
+ }
+ else if (index == -2)
+ {
+ res = _shared->audio_device()->SetPlayoutDevice(
+ AudioDeviceModule::kDefaultDevice);
+ }
+ else
+ {
+ res = _shared->audio_device()->SetPlayoutDevice(indexU);
+ }
+
+ if (res != 0)
+ {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceError,
+ "SetPlayoutDevice() unable to set the playout device");
+ return -1;
+ }
+
+ // Init speaker, so user can do volume settings etc
+ if (_shared->audio_device()->InitSpeaker() == -1)
+ {
+ _shared->SetLastError(VE_CANNOT_ACCESS_SPEAKER_VOL, kTraceWarning,
+ "SetPlayoutDevice() cannot access speaker");
+ }
+
+ // Set number of channels
+ bool available = false;
+ _shared->audio_device()->StereoPlayoutIsAvailable(&available);
+ if (_shared->audio_device()->SetStereoPlayout(available) != 0)
+ {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceWarning,
+ "SetPlayoutDevice() failed to set stereo playout mode");
+ }
+
+ // Restore playout if it was enabled already when calling this function.
+ if (isPlaying)
+ {
+ if (!_shared->ext_playout())
+ {
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetPlayoutDevice() playout is now being restored...");
+ if (_shared->audio_device()->InitPlayout() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetPlayoutDevice() failed to initialize playout");
+ return -1;
+ }
+ if (_shared->audio_device()->StartPlayout() != 0)
+ {
+ WEBRTC_TRACE(kTraceError, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetPlayoutDevice() failed to start playout");
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int VoEHardwareImpl::GetRecordingDeviceStatus(bool& isAvailable)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRecordingDeviceStatus()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ // We let the module do isRecording sanity
+
+ bool available(false);
+
+ // Check availability
+ if (_shared->audio_device()->RecordingIsAvailable(&available) != 0)
+ {
+ _shared->SetLastError(VE_UNDEFINED_SC_REC_ERR, kTraceError,
+ " Audio Device error");
+ return -1;
+ }
+
+ isAvailable = available;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " Output: isAvailable = %d)", (int) isAvailable);
+
+ return 0;
+}
+
+int VoEHardwareImpl::GetPlayoutDeviceStatus(bool& isAvailable)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetPlayoutDeviceStatus()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ // We let the module do isPlaying sanity
+
+ bool available(false);
+
+ // Check availability
+ if (_shared->audio_device()->PlayoutIsAvailable(&available) != 0)
+ {
+ _shared->SetLastError(VE_PLAY_UNDEFINED_SC_ERR, kTraceError,
+ " Audio Device error");
+ return -1;
+ }
+
+ isAvailable = available;
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " Output: isAvailable = %d)", (int) isAvailable);
+
+ return 0;
+}
+
+int VoEHardwareImpl::ResetAudioDevice()
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ResetAudioDevice()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+#if defined(WEBRTC_IOS)
+ if (_shared->audio_device()->ResetAudioDevice() < 0)
+ {
+ _shared->SetLastError(VE_SOUNDCARD_ERROR, kTraceError,
+ " Failed to reset sound device");
+ return -1;
+ }
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ " no support for resetting sound device");
+ return -1;
+#endif
+
+ return 0;
+}
+
+int VoEHardwareImpl::AudioDeviceControl(unsigned int par1, unsigned int par2,
+ unsigned int par3)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "AudioDeviceControl(%i, %i, %i)", par1, par2, par3);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ " no support for resetting sound device");
+ return -1;
+}
+
+int VoEHardwareImpl::SetLoudspeakerStatus(bool enable)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetLoudspeakerStatus(enable=%i)", (int) enable);
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+#if defined(WEBRTC_ANDROID)
+ if (_shared->audio_device()->SetLoudspeakerStatus(enable) < 0)
+ {
+ _shared->SetLastError(VE_IGNORED_FUNCTION, kTraceError,
+ " Failed to set loudspeaker status");
+ return -1;
+ }
+
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ " no support for setting loudspeaker status");
+ return -1;
+#endif
+}
+
+int VoEHardwareImpl::GetLoudspeakerStatus(bool& enabled)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetLoudspeakerStatus()");
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+#if defined(WEBRTC_ANDROID)
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ if (_shared->audio_device()->GetLoudspeakerStatus(&enabled) < 0)
+ {
+ _shared->SetLastError(VE_IGNORED_FUNCTION, kTraceError,
+ " Failed to get loudspeaker status");
+ return -1;
+ }
+
+ return 0;
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ " no support for setting loudspeaker status");
+ return -1;
+#endif
+}
+
+int VoEHardwareImpl::GetCPULoad(int& loadPercent)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetCPULoad()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ // Get CPU load from ADM
+ WebRtc_UWord16 load(0);
+ if (_shared->audio_device()->CPULoad(&load) != 0)
+ {
+ _shared->SetLastError(VE_CPU_INFO_ERROR, kTraceError,
+ " error getting system CPU load");
+ return -1;
+ }
+
+ loadPercent = static_cast<int> (load);
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " Output: loadPercent = %d", loadPercent);
+
+ return 0;
+}
+
+int VoEHardwareImpl::GetSystemCPULoad(int& loadPercent)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSystemCPULoad(loadPercent=?)");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ // Check if implemented for this platform
+ if (!_cpu)
+ {
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ " no support for getting system CPU load");
+ return -1;
+ }
+
+ // Get CPU load
+ WebRtc_Word32 load = _cpu->CpuUsage();
+ if (load < 0)
+ {
+ _shared->SetLastError(VE_CPU_INFO_ERROR, kTraceError,
+ " error getting system CPU load");
+ return -1;
+ }
+
+ loadPercent = static_cast<int> (load);
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ " Output: loadPercent = %d", loadPercent);
+
+ return 0;
+}
+
+int VoEHardwareImpl::EnableBuiltInAEC(bool enable)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "%s", __FUNCTION__);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ return _shared->audio_device()->EnableBuiltInAEC(enable);
+}
+
+bool VoEHardwareImpl::BuiltInAECIsEnabled() const
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "%s", __FUNCTION__);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return false;
+ }
+
+ return _shared->audio_device()->BuiltInAECIsEnabled();
+}
+
+#endif // WEBRTC_VOICE_ENGINE_HARDWARE_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_hardware_impl.h b/voice_engine/voe_hardware_impl.h
new file mode 100644
index 0000000..c801228
--- /dev/null
+++ b/voice_engine/voe_hardware_impl.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_HARDWARE_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_HARDWARE_IMPL_H
+
+#include "voe_hardware.h"
+
+#include "shared_data.h"
+
+namespace webrtc
+{
+class CpuWrapper;
+
+class VoEHardwareImpl: public VoEHardware
+{
+public:
+ virtual int GetNumOfRecordingDevices(int& devices);
+
+ virtual int GetNumOfPlayoutDevices(int& devices);
+
+ virtual int GetRecordingDeviceName(int index,
+ char strNameUTF8[128],
+ char strGuidUTF8[128]);
+
+ virtual int GetPlayoutDeviceName(int index,
+ char strNameUTF8[128],
+ char strGuidUTF8[128]);
+
+ virtual int GetRecordingDeviceStatus(bool& isAvailable);
+
+ virtual int GetPlayoutDeviceStatus(bool& isAvailable);
+
+ virtual int SetRecordingDevice(
+ int index,
+ StereoChannel recordingChannel = kStereoBoth);
+
+ virtual int SetPlayoutDevice(int index);
+
+ virtual int SetAudioDeviceLayer(AudioLayers audioLayer);
+
+ virtual int GetAudioDeviceLayer(AudioLayers& audioLayer);
+
+ virtual int GetCPULoad(int& loadPercent);
+
+ virtual int GetSystemCPULoad(int& loadPercent);
+
+ virtual int ResetAudioDevice();
+
+ virtual int AudioDeviceControl(unsigned int par1,
+ unsigned int par2,
+ unsigned int par3);
+
+ virtual int SetLoudspeakerStatus(bool enable);
+
+ virtual int GetLoudspeakerStatus(bool& enabled);
+
+ virtual int EnableBuiltInAEC(bool enable);
+ virtual bool BuiltInAECIsEnabled() const;
+
+protected:
+ VoEHardwareImpl(voe::SharedData* shared);
+ virtual ~VoEHardwareImpl();
+
+private:
+ CpuWrapper* _cpu;
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_HARDWARE_IMPL_H
diff --git a/voice_engine/voe_neteq_stats_impl.cc b/voice_engine/voe_neteq_stats_impl.cc
new file mode 100644
index 0000000..50f2dfb
--- /dev/null
+++ b/voice_engine/voe_neteq_stats_impl.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_neteq_stats_impl.h"
+
+#include "audio_coding_module.h"
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+
+namespace webrtc {
+
+VoENetEqStats* VoENetEqStats::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+
+VoENetEqStatsImpl::VoENetEqStatsImpl(voe::SharedData* shared) : _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoENetEqStatsImpl::VoENetEqStatsImpl() - ctor");
+}
+
+VoENetEqStatsImpl::~VoENetEqStatsImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoENetEqStatsImpl::~VoENetEqStatsImpl() - dtor");
+}
+
+int VoENetEqStatsImpl::GetNetworkStatistics(int channel,
+ NetworkStatistics& stats)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetNetworkStatistics(channel=%d, stats=?)", channel);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetNetworkStatistics() failed to locate channel");
+ return -1;
+ }
+
+ return channelPtr->GetNetworkStatistics(stats);
+}
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_neteq_stats_impl.h b/voice_engine/voe_neteq_stats_impl.h
new file mode 100644
index 0000000..1b077b3
--- /dev/null
+++ b/voice_engine/voe_neteq_stats_impl.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H
+
+#include "voe_neteq_stats.h"
+
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoENetEqStatsImpl : public VoENetEqStats
+{
+public:
+ virtual int GetNetworkStatistics(int channel,
+ NetworkStatistics& stats);
+
+protected:
+ VoENetEqStatsImpl(voe::SharedData* shared);
+ virtual ~VoENetEqStatsImpl();
+
+private:
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_NETEQ_STATS_IMPL_H
diff --git a/voice_engine/voe_network_impl.cc b/voice_engine/voe_network_impl.cc
new file mode 100644
index 0000000..d0b9895
--- /dev/null
+++ b/voice_engine/voe_network_impl.cc
@@ -0,0 +1,872 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_network_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc
+{
+
+VoENetwork* VoENetwork::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_NETWORK_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+
+VoENetworkImpl::VoENetworkImpl(voe::SharedData* shared) : _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoENetworkImpl() - ctor");
+}
+
+VoENetworkImpl::~VoENetworkImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "~VoENetworkImpl() - dtor");
+}
+
+int VoENetworkImpl::RegisterExternalTransport(int channel,
+ Transport& transport)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetExternalTransport(channel=%d, transport=0x%x)",
+ channel, &transport);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetExternalTransport() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->RegisterExternalTransport(transport);
+}
+
+int VoENetworkImpl::DeRegisterExternalTransport(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DeRegisterExternalTransport(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DeRegisterExternalTransport() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->DeRegisterExternalTransport();
+}
+
+int VoENetworkImpl::ReceivedRTPPacket(int channel,
+ const void* data,
+ unsigned int length)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ReceivedRTPPacket(channel=%d, length=%u)", channel, length);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if ((length < 12) || (length > 807))
+ {
+ _shared->SetLastError(VE_INVALID_PACKET, kTraceError,
+ "ReceivedRTPPacket() invalid packet length");
+ return -1;
+ }
+ if (NULL == data)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "ReceivedRTPPacket() invalid data vector");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "ReceivedRTPPacket() failed to locate channel");
+ return -1;
+ }
+
+ if (!channelPtr->ExternalTransport())
+ {
+ _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
+ "ReceivedRTPPacket() external transport is not enabled");
+ return -1;
+ }
+ return channelPtr->ReceivedRTPPacket((const WebRtc_Word8*) data, length);
+}
+
+int VoENetworkImpl::ReceivedRTCPPacket(int channel, const void* data,
+ unsigned int length)
+{
+ WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "ReceivedRTCPPacket(channel=%d, length=%u)", channel, length);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (length < 4)
+ {
+ _shared->SetLastError(VE_INVALID_PACKET, kTraceError,
+ "ReceivedRTCPPacket() invalid packet length");
+ return -1;
+ }
+ if (NULL == data)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "ReceivedRTCPPacket() invalid data vector");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "ReceivedRTCPPacket() failed to locate channel");
+ return -1;
+ }
+ if (!channelPtr->ExternalTransport())
+ {
+ _shared->SetLastError(VE_INVALID_OPERATION, kTraceError,
+ "ReceivedRTCPPacket() external transport is not enabled");
+ return -1;
+ }
+ return channelPtr->ReceivedRTCPPacket((const WebRtc_Word8*) data, length);
+}
+
+int VoENetworkImpl::GetSourceInfo(int channel,
+ int& rtpPort,
+ int& rtcpPort,
+ char ipAddr[64])
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSourceInfo(channel=%d, rtpPort=?, rtcpPort=?, ipAddr[]=?)",
+ channel);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (NULL == ipAddr)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "GetSourceInfo() invalid IP-address buffer");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetSourceInfo() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->ExternalTransport())
+ {
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "GetSourceInfo() external transport is enabled");
+ return -1;
+ }
+ return channelPtr->GetSourceInfo(rtpPort, rtcpPort, ipAddr);
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "GetSourceInfo() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+int VoENetworkImpl::GetLocalIP(char ipAddr[64], bool ipv6)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetLocalIP(ipAddr[]=?, ipv6=%d)", ipv6);
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (NULL == ipAddr)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "GetLocalIP() invalid IP-address buffer");
+ return -1;
+ }
+
+ // Create a temporary socket module to ensure that this method can be
+ // called also when no channels are created.
+ WebRtc_UWord8 numSockThreads(1);
+ UdpTransport* socketPtr =
+ UdpTransport::Create(
+ -1,
+ numSockThreads);
+ if (NULL == socketPtr)
+ {
+ _shared->SetLastError(VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceError,
+ "GetLocalIP() failed to create socket module");
+ return -1;
+ }
+
+ // Use a buffer big enough for IPv6 addresses and initialize it with zeros.
+ char localIPAddr[256] = {0};
+
+ if (ipv6)
+ {
+ char localIP[16];
+ if (socketPtr->LocalHostAddressIPV6(localIP) != 0)
+ {
+ _shared->SetLastError(VE_INVALID_IP_ADDRESS, kTraceError,
+ "GetLocalIP() failed to retrieve local IP - 1");
+ UdpTransport::Destroy(socketPtr);
+ return -1;
+ }
+ // Convert 128-bit address to character string (a:b:c:d:e:f:g:h)
+ sprintf(localIPAddr,
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x"
+ "%.2x:%.2x%.2x",
+ localIP[0], localIP[1], localIP[2], localIP[3], localIP[4],
+ localIP[5], localIP[6], localIP[7], localIP[8], localIP[9],
+ localIP[10], localIP[11], localIP[12], localIP[13],
+ localIP[14], localIP[15]);
+ }
+ else
+ {
+ WebRtc_UWord32 localIP(0);
+ // Read local IP (as 32-bit address) from the socket module
+ if (socketPtr->LocalHostAddress(localIP) != 0)
+ {
+ _shared->SetLastError(VE_INVALID_IP_ADDRESS, kTraceError,
+ "GetLocalIP() failed to retrieve local IP - 2");
+ UdpTransport::Destroy(socketPtr);
+ return -1;
+ }
+ // Convert 32-bit address to character string (x.y.z.w)
+ sprintf(localIPAddr, "%d.%d.%d.%d", (int) ((localIP >> 24) & 0x0ff),
+ (int) ((localIP >> 16) & 0x0ff),
+ (int) ((localIP >> 8) & 0x0ff),
+ (int) (localIP & 0x0ff));
+ }
+
+ strcpy(ipAddr, localIPAddr);
+
+ UdpTransport::Destroy(socketPtr);
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetLocalIP() => ipAddr=%s", ipAddr);
+ return 0;
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "GetLocalIP() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+int VoENetworkImpl::EnableIPv6(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "EnableIPv6(channel=%d)", channel);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "EnableIPv6() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->ExternalTransport())
+ {
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "EnableIPv6() external transport is enabled");
+ return -1;
+ }
+ return channelPtr->EnableIPv6();
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "EnableIPv6() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+bool VoENetworkImpl::IPv6IsEnabled(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "IPv6IsEnabled(channel=%d)", channel);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return false;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "IPv6IsEnabled() failed to locate channel");
+ return false;
+ }
+ if (channelPtr->ExternalTransport())
+ {
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "IPv6IsEnabled() external transport is enabled");
+ return false;
+ }
+ return channelPtr->IPv6IsEnabled();
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "IPv6IsEnabled() VoE is built for external transport");
+ return false;
+#endif
+}
+
+int VoENetworkImpl::SetSourceFilter(int channel,
+ int rtpPort,
+ int rtcpPort,
+ const char ipAddr[64])
+{
+ (ipAddr == NULL) ? WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetSourceFilter(channel=%d, rtpPort=%d,"
+ " rtcpPort=%d)",
+ channel, rtpPort, rtcpPort)
+ : WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "SetSourceFilter(channel=%d, rtpPort=%d,"
+ " rtcpPort=%d, ipAddr=%s)",
+ channel, rtpPort, rtcpPort, ipAddr);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if ((rtpPort < 0) || (rtpPort > 65535))
+ {
+ _shared->SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+ "SetSourceFilter() invalid RTP port");
+ return -1;
+ }
+ if ((rtcpPort < 0) || (rtcpPort > 65535))
+ {
+ _shared->SetLastError(VE_INVALID_PORT_NMBR, kTraceError,
+ "SetSourceFilter() invalid RTCP port");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetSourceFilter() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->ExternalTransport())
+ {
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "SetSourceFilter() external transport is enabled");
+ return -1;
+ }
+ return channelPtr->SetSourceFilter(rtpPort, rtcpPort, ipAddr);
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "SetSourceFilter() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+int VoENetworkImpl::GetSourceFilter(int channel,
+ int& rtpPort,
+ int& rtcpPort,
+ char ipAddr[64])
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSourceFilter(channel=%d, rtpPort=?, rtcpPort=?, "
+ "ipAddr[]=?)",
+ channel);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (NULL == ipAddr)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "GetSourceFilter() invalid IP-address buffer");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetSourceFilter() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->ExternalTransport())
+ {
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "GetSourceFilter() external transport is enabled");
+ return -1;
+ }
+ return channelPtr->GetSourceFilter(rtpPort, rtcpPort, ipAddr);
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "GetSourceFilter() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+int VoENetworkImpl::SetSendTOS(int channel,
+ int DSCP,
+ int priority,
+ bool useSetSockopt)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetSendTOS(channel=%d, DSCP=%d, useSetSockopt=%d)",
+ channel, DSCP, useSetSockopt);
+
+#if !defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_MAC)
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceWarning,
+ "SetSendTOS() is not supported on this platform");
+ return -1;
+#endif
+
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if ((DSCP < 0) || (DSCP > 63))
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendTOS() Invalid DSCP value");
+ return -1;
+ }
+#if defined(_WIN32) || defined(WEBRTC_LINUX)
+ if ((priority < -1) || (priority > 7))
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendTOS() Invalid priority value");
+ return -1;
+ }
+#else
+ if (-1 != priority)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendTOS() priority not supported");
+ return -1;
+ }
+#endif
+#if defined(_WIN32)
+ if ((priority >= 0) && useSetSockopt)
+ {
+ // On Windows, priority and useSetSockopt cannot be combined
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetSendTOS() priority and useSetSockopt conflict");
+ return -1;
+ }
+#endif
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetSendTOS() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->ExternalTransport())
+ {
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "SetSendTOS() external transport is enabled");
+ return -1;
+ }
+#if defined(WEBRTC_LINUX) || defined(WEBRTC_MAC)
+ useSetSockopt = true;
+ WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ " force useSetSockopt=true since there is no alternative"
+ " implementation");
+#endif
+
+ return channelPtr->SetSendTOS(DSCP, priority, useSetSockopt);
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "SetSendTOS() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+int VoENetworkImpl::GetSendTOS(int channel,
+ int& DSCP,
+ int& priority,
+ bool& useSetSockopt)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSendTOS(channel=%d)", channel);
+
+#if !defined(_WIN32) && !defined(WEBRTC_LINUX) && !defined(WEBRTC_MAC)
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceWarning,
+ "GetSendTOS() is not supported on this platform");
+ return -1;
+#endif
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetSendTOS() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->ExternalTransport())
+ {
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "GetSendTOS() external transport is enabled");
+ return -1;
+ }
+ return channelPtr->GetSendTOS(DSCP, priority, useSetSockopt);
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "GetSendTOS() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+int VoENetworkImpl::SetSendGQoS(int channel,
+ bool enable,
+ int serviceType,
+ int overrideDSCP)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetSendGQOS(channel=%d, enable=%d, serviceType=%d,"
+ " overrideDSCP=%d)",
+ channel, (int) enable, serviceType, overrideDSCP);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+#if !defined(_WIN32)
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceWarning,
+ "SetSendGQOS() is not supported on this platform");
+ return -1;
+#elif !defined(WEBRTC_EXTERNAL_TRANSPORT)
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetSendGQOS() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->ExternalTransport())
+ {
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "SetSendGQOS() external transport is enabled");
+ return -1;
+ }
+ return channelPtr->SetSendGQoS(enable, serviceType, overrideDSCP);
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "SetSendGQOS() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+int VoENetworkImpl::GetSendGQoS(int channel,
+ bool& enabled,
+ int& serviceType,
+ int& overrideDSCP)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSendGQOS(channel=%d)", channel);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+#if !defined(_WIN32)
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceWarning,
+ "GetSendGQOS() is not supported on this platform");
+ return -1;
+#elif !defined(WEBRTC_EXTERNAL_TRANSPORT)
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetSendGQOS() failed to locate channel");
+ return -1;
+ }
+ if (channelPtr->ExternalTransport())
+ {
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceError,
+ "GetSendGQOS() external transport is enabled");
+ return -1;
+ }
+ return channelPtr->GetSendGQoS(enabled, serviceType, overrideDSCP);
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "GetSendGQOS() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+int VoENetworkImpl::SetPacketTimeoutNotification(int channel,
+ bool enable,
+ int timeoutSeconds)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetPacketTimeoutNotification(channel=%d, enable=%d, "
+ "timeoutSeconds=%d)",
+ channel, (int) enable, timeoutSeconds);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (enable &&
+ ((timeoutSeconds < kVoiceEngineMinPacketTimeoutSec) ||
+ (timeoutSeconds > kVoiceEngineMaxPacketTimeoutSec)))
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetPacketTimeoutNotification() invalid timeout size");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetPacketTimeoutNotification() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetPacketTimeoutNotification(enable, timeoutSeconds);
+}
+
+int VoENetworkImpl::GetPacketTimeoutNotification(int channel,
+ bool& enabled,
+ int& timeoutSeconds)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetPacketTimeoutNotification(channel=%d, enabled=?,"
+ " timeoutSeconds=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetPacketTimeoutNotification() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetPacketTimeoutNotification(enabled, timeoutSeconds);
+}
+
+int VoENetworkImpl::RegisterDeadOrAliveObserver(int channel,
+ VoEConnectionObserver&
+ observer)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "RegisterDeadOrAliveObserver(channel=%d, observer=0x%x)",
+ channel, &observer);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "RegisterDeadOrAliveObserver() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->RegisterDeadOrAliveObserver(observer);
+}
+
+int VoENetworkImpl::DeRegisterDeadOrAliveObserver(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DeRegisterDeadOrAliveObserver(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DeRegisterDeadOrAliveObserver() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->DeRegisterDeadOrAliveObserver();
+}
+
+int VoENetworkImpl::SetPeriodicDeadOrAliveStatus(int channel, bool enable,
+ int sampleTimeSeconds)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetPeriodicDeadOrAliveStatus(channel=%d, enable=%d,"
+ " sampleTimeSeconds=%d)",
+ channel, enable, sampleTimeSeconds);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (enable &&
+ ((sampleTimeSeconds < kVoiceEngineMinSampleTimeSec) ||
+ (sampleTimeSeconds > kVoiceEngineMaxSampleTimeSec)))
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetPeriodicDeadOrAliveStatus() invalid sample time");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetPeriodicDeadOrAliveStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetPeriodicDeadOrAliveStatus(enable, sampleTimeSeconds);
+}
+
+int VoENetworkImpl::GetPeriodicDeadOrAliveStatus(int channel,
+ bool& enabled,
+ int& sampleTimeSeconds)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetPeriodicDeadOrAliveStatus(channel=%d, enabled=?,"
+ " sampleTimeSeconds=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetPeriodicDeadOrAliveStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetPeriodicDeadOrAliveStatus(enabled,
+ sampleTimeSeconds);
+}
+
+int VoENetworkImpl::SendUDPPacket(int channel,
+ const void* data,
+ unsigned int length,
+ int& transmittedBytes,
+ bool useRtcpSocket)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SendUDPPacket(channel=%d, data=0x%x, length=%u, useRTCP=%d)",
+ channel, data, length, useRtcpSocket);
+#ifndef WEBRTC_EXTERNAL_TRANSPORT
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (NULL == data)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SendUDPPacket() invalid data buffer");
+ return -1;
+ }
+ if (0 == length)
+ {
+ _shared->SetLastError(VE_INVALID_PACKET, kTraceError,
+ "SendUDPPacket() invalid packet size");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SendUDPPacket() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SendUDPPacket(data,
+ length,
+ transmittedBytes,
+ useRtcpSocket);
+#else
+ _shared->SetLastError(VE_EXTERNAL_TRANSPORT_ENABLED, kTraceWarning,
+ "SendUDPPacket() VoE is built for external transport");
+ return -1;
+#endif
+}
+
+#endif // WEBRTC_VOICE_ENGINE_NETWORK_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_network_impl.h b/voice_engine/voe_network_impl.h
new file mode 100644
index 0000000..b159c81
--- /dev/null
+++ b/voice_engine/voe_network_impl.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_NETWORK_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_NETWORK_IMPL_H
+
+#include "voe_network.h"
+
+#include "shared_data.h"
+
+
+namespace webrtc
+{
+
+class VoENetworkImpl: public VoENetwork
+{
+public:
+ virtual int RegisterExternalTransport(int channel, Transport& transport);
+
+ virtual int DeRegisterExternalTransport(int channel);
+
+ virtual int ReceivedRTPPacket(int channel,
+ const void* data,
+ unsigned int length);
+
+ virtual int ReceivedRTCPPacket(int channel,
+ const void* data,
+ unsigned int length);
+
+ virtual int GetSourceInfo(int channel,
+ int& rtpPort,
+ int& rtcpPort,
+ char ipAddr[64]);
+
+ virtual int GetLocalIP(char ipAddr[64], bool ipv6 = false);
+
+ virtual int EnableIPv6(int channel);
+
+ virtual bool IPv6IsEnabled(int channel);
+
+ virtual int SetSourceFilter(int channel,
+ int rtpPort,
+ int rtcpPort,
+ const char ipAddr[64] = 0);
+
+ virtual int GetSourceFilter(int channel,
+ int& rtpPort,
+ int& rtcpPort,
+ char ipAddr[64]);
+
+ virtual int SetSendTOS(int channel,
+ int DSCP,
+ int priority = -1,
+ bool useSetSockopt = false);
+
+ virtual int GetSendTOS(int channel,
+ int& DSCP,
+ int& priority,
+ bool& useSetSockopt);
+
+ virtual int SetSendGQoS(int channel,
+ bool enable,
+ int serviceType,
+ int overrideDSCP);
+
+ virtual int GetSendGQoS(int channel,
+ bool& enabled,
+ int& serviceType,
+ int& overrideDSCP);
+
+ virtual int SetPacketTimeoutNotification(int channel,
+ bool enable,
+ int timeoutSeconds = 2);
+
+ virtual int GetPacketTimeoutNotification(int channel,
+ bool& enabled,
+ int& timeoutSeconds);
+
+ virtual int RegisterDeadOrAliveObserver(int channel,
+ VoEConnectionObserver& observer);
+
+ virtual int DeRegisterDeadOrAliveObserver(int channel);
+
+ virtual int SetPeriodicDeadOrAliveStatus(int channel,
+ bool enable,
+ int sampleTimeSeconds = 2);
+
+ virtual int GetPeriodicDeadOrAliveStatus(int channel,
+ bool& enabled,
+ int& sampleTimeSeconds);
+
+ virtual int SendUDPPacket(int channel,
+ const void* data,
+ unsigned int length,
+ int& transmittedBytes,
+ bool useRtcpSocket = false);
+
+protected:
+ VoENetworkImpl(voe::SharedData* shared);
+ virtual ~VoENetworkImpl();
+private:
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_NETWORK_IMPL_H
diff --git a/voice_engine/voe_rtp_rtcp_impl.cc b/voice_engine/voe_rtp_rtcp_impl.cc
new file mode 100644
index 0000000..d21f722
--- /dev/null
+++ b/voice_engine/voe_rtp_rtcp_impl.cc
@@ -0,0 +1,667 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_rtp_rtcp_impl.h"
+#include "trace.h"
+#include "file_wrapper.h"
+#include "critical_section_wrapper.h"
+#include "voice_engine_impl.h"
+#include "voe_errors.h"
+
+#include "channel.h"
+#include "transmit_mixer.h"
+
+namespace webrtc {
+
+VoERTP_RTCP* VoERTP_RTCP::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+
+VoERTP_RTCPImpl::VoERTP_RTCPImpl(voe::SharedData* shared) : _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoERTP_RTCPImpl::VoERTP_RTCPImpl() - ctor");
+}
+
+VoERTP_RTCPImpl::~VoERTP_RTCPImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoERTP_RTCPImpl::~VoERTP_RTCPImpl() - dtor");
+}
+
+int VoERTP_RTCPImpl::RegisterRTPObserver(int channel, VoERTPObserver& observer)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "RegisterRTPObserver(channel=%d observer=0x%x)",
+ channel, &observer);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "RegisterRTPObserver() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->RegisterRTPObserver(observer);
+}
+
+int VoERTP_RTCPImpl::DeRegisterRTPObserver(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DeRegisterRTPObserver(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DeRegisterRTPObserver() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->DeRegisterRTPObserver();
+}
+
+int VoERTP_RTCPImpl::RegisterRTCPObserver(int channel, VoERTCPObserver& observer)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "RegisterRTCPObserver(channel=%d observer=0x%x)",
+ channel, &observer);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "RegisterRTPObserver() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->RegisterRTCPObserver(observer);
+}
+
+int VoERTP_RTCPImpl::DeRegisterRTCPObserver(int channel)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "DeRegisterRTCPObserver(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "DeRegisterRTCPObserver() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->DeRegisterRTCPObserver();
+}
+
+int VoERTP_RTCPImpl::SetLocalSSRC(int channel, unsigned int ssrc)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetLocalSSRC(channel=%d, %lu)", channel, ssrc);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetLocalSSRC() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetLocalSSRC(ssrc);
+}
+
+int VoERTP_RTCPImpl::GetLocalSSRC(int channel, unsigned int& ssrc)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetLocalSSRC(channel=%d, ssrc=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetLocalSSRC() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetLocalSSRC(ssrc);
+}
+
+int VoERTP_RTCPImpl::GetRemoteSSRC(int channel, unsigned int& ssrc)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRemoteSSRC(channel=%d, ssrc=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRemoteSSRC() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRemoteSSRC(ssrc);
+}
+
+int VoERTP_RTCPImpl::GetRemoteCSRCs(int channel, unsigned int arrCSRC[15])
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRemoteCSRCs(channel=%d, arrCSRC=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRemoteCSRCs() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRemoteCSRCs(arrCSRC);
+}
+
+
+int VoERTP_RTCPImpl::SetRTPAudioLevelIndicationStatus(int channel,
+ bool enable,
+ unsigned char ID)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetRTPAudioLevelIndicationStatus(channel=%d, enable=%d,"
+ " ID=%u)", channel, enable, ID);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (ID < kVoiceEngineMinRtpExtensionId ||
+ ID > kVoiceEngineMaxRtpExtensionId)
+ {
+ // [RFC5285] The 4-bit ID is the local identifier of this element in
+ // the range 1-14 inclusive.
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetRTPAudioLevelIndicationStatus() invalid ID parameter");
+ return -1;
+ }
+
+ // Set state and ID for the specified channel.
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetRTPAudioLevelIndicationStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetRTPAudioLevelIndicationStatus(enable, ID);
+}
+
+int VoERTP_RTCPImpl::GetRTPAudioLevelIndicationStatus(int channel,
+ bool& enabled,
+ unsigned char& ID)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRTPAudioLevelIndicationStatus(channel=%d, enable=?, ID=?)",
+ channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRTPAudioLevelIndicationStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRTPAudioLevelIndicationStatus(enabled, ID);
+}
+
+int VoERTP_RTCPImpl::SetRTCPStatus(int channel, bool enable)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetRTCPStatus(channel=%d, enable=%d)", channel, enable);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetRTCPStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetRTCPStatus(enable);
+}
+
+int VoERTP_RTCPImpl::GetRTCPStatus(int channel, bool& enabled)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRTCPStatus(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRTCPStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRTCPStatus(enabled);
+}
+
+int VoERTP_RTCPImpl::SetRTCP_CNAME(int channel, const char cName[256])
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetRTCP_CNAME(channel=%d, cName=%s)", channel, cName);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetRTCP_CNAME() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetRTCP_CNAME(cName);
+}
+
+int VoERTP_RTCPImpl::GetRTCP_CNAME(int channel, char cName[256])
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRTCP_CNAME(channel=%d, cName=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRTCP_CNAME() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRTCP_CNAME(cName);
+}
+
+int VoERTP_RTCPImpl::GetRemoteRTCP_CNAME(int channel, char cName[256])
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRemoteRTCP_CNAME(channel=%d, cName=?)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRemoteRTCP_CNAME() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRemoteRTCP_CNAME(cName);
+}
+
+int VoERTP_RTCPImpl::GetRemoteRTCPData(
+ int channel,
+ unsigned int& NTPHigh, // from sender info in SR
+ unsigned int& NTPLow, // from sender info in SR
+ unsigned int& timestamp, // from sender info in SR
+ unsigned int& playoutTimestamp, // derived locally
+ unsigned int* jitter, // from report block 1 in SR/RR
+ unsigned short* fractionLost) // from report block 1 in SR/RR
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRemoteRTCPData(channel=%d,...)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRemoteRTCP_CNAME() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRemoteRTCPData(NTPHigh,
+ NTPLow,
+ timestamp,
+ playoutTimestamp,
+ jitter,
+ fractionLost);
+}
+
+int VoERTP_RTCPImpl::SendApplicationDefinedRTCPPacket(
+ int channel,
+ const unsigned char subType,
+ unsigned int name,
+ const char* data,
+ unsigned short dataLengthInBytes)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SendApplicationDefinedRTCPPacket(channel=%d, subType=%u,"
+ "name=%u, data=?, dataLengthInBytes=%u)",
+ channel, subType, name, dataLengthInBytes);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SendApplicationDefinedRTCPPacket() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SendApplicationDefinedRTCPPacket(subType,
+ name,
+ data,
+ dataLengthInBytes);
+}
+
+int VoERTP_RTCPImpl::GetRTPStatistics(int channel,
+ unsigned int& averageJitterMs,
+ unsigned int& maxJitterMs,
+ unsigned int& discardedPackets)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRTPStatistics(channel=%d,....)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRTPStatistics() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRTPStatistics(averageJitterMs,
+ maxJitterMs,
+ discardedPackets);
+}
+
+int VoERTP_RTCPImpl::GetRTCPStatistics(int channel, CallStatistics& stats)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRTCPStatistics(channel=%d)", channel);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRTPStatistics() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRTPStatistics(stats);
+}
+
+int VoERTP_RTCPImpl::GetRemoteRTCPSenderInfo(int channel,
+ SenderInfo* sender_info) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRemoteRTCPSenderInfo(channel=%d)", channel);
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channel_ptr = sc.ChannelPtr();
+ if (channel_ptr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRemoteRTCPSenderInfo() failed to locate channel");
+ return -1;
+ }
+ return channel_ptr->GetRemoteRTCPSenderInfo(sender_info);
+}
+
+int VoERTP_RTCPImpl::GetRemoteRTCPReportBlocks(
+ int channel, std::vector<ReportBlock>* report_blocks) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRemoteRTCPReportBlocks(channel=%d)", channel);
+ if (!_shared->statistics().Initialized()) {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channel_ptr = sc.ChannelPtr();
+ if (channel_ptr == NULL) {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetRemoteRTCPReportBlocks() failed to locate channel");
+ return -1;
+ }
+ return channel_ptr->GetRemoteRTCPReportBlocks(report_blocks);
+}
+
+int VoERTP_RTCPImpl::SetFECStatus(int channel, bool enable, int redPayloadtype)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetFECStatus(channel=%d, enable=%d, redPayloadtype=%d)",
+ channel, enable, redPayloadtype);
+#ifdef WEBRTC_CODEC_RED
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetFECStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetFECStatus(enable, redPayloadtype);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "SetFECStatus() RED is not supported");
+ return -1;
+#endif
+}
+
+int VoERTP_RTCPImpl::GetFECStatus(int channel,
+ bool& enabled,
+ int& redPayloadtype)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetFECStatus(channel=%d, enabled=?, redPayloadtype=?)",
+ channel);
+#ifdef WEBRTC_CODEC_RED
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetFECStatus() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetFECStatus(enabled, redPayloadtype);
+#else
+ _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError,
+ "GetFECStatus() RED is not supported");
+ return -1;
+#endif
+}
+
+int VoERTP_RTCPImpl::StartRTPDump(int channel,
+ const char fileNameUTF8[1024],
+ RTPDirections direction)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StartRTPDump(channel=%d, fileNameUTF8=%s, direction=%d)",
+ channel, fileNameUTF8, direction);
+ assert(1024 == FileWrapper::kMaxFileNameSize);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StartRTPDump() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->StartRTPDump(fileNameUTF8, direction);
+}
+
+int VoERTP_RTCPImpl::StopRTPDump(int channel, RTPDirections direction)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "StopRTPDump(channel=%d, direction=%d)", channel, direction);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StopRTPDump() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->StopRTPDump(direction);
+}
+
+int VoERTP_RTCPImpl::RTPDumpIsActive(int channel, RTPDirections direction)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "RTPDumpIsActive(channel=%d, direction=%d)",
+ channel, direction);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StopRTPDump() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->RTPDumpIsActive(direction);
+}
+
+int VoERTP_RTCPImpl::InsertExtraRTPPacket(int channel,
+ unsigned char payloadType,
+ bool markerBit,
+ const char* payloadData,
+ unsigned short payloadSize)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "InsertExtraRTPPacket(channel=%d, payloadType=%u,"
+ " markerBit=%u, payloadSize=%u)",
+ channel, payloadType, markerBit, payloadSize);
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "StopRTPDump() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->InsertExtraRTPPacket(payloadType,
+ markerBit,
+ payloadData,
+ payloadSize);
+}
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_rtp_rtcp_impl.h b/voice_engine/voe_rtp_rtcp_impl.h
new file mode 100644
index 0000000..721499c
--- /dev/null
+++ b/voice_engine/voe_rtp_rtcp_impl.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H
+
+#include "voe_rtp_rtcp.h"
+
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoERTP_RTCPImpl : public VoERTP_RTCP
+{
+public:
+ // Registration of observers for RTP and RTCP callbacks
+ virtual int RegisterRTPObserver(int channel, VoERTPObserver& observer);
+
+ virtual int DeRegisterRTPObserver(int channel);
+
+ virtual int RegisterRTCPObserver(int channel, VoERTCPObserver& observer);
+
+ virtual int DeRegisterRTCPObserver(int channel);
+
+ // RTCP
+ virtual int SetRTCPStatus(int channel, bool enable);
+
+ virtual int GetRTCPStatus(int channel, bool& enabled);
+
+ virtual int SetRTCP_CNAME(int channel, const char cName[256]);
+
+ virtual int GetRTCP_CNAME(int channel, char cName[256]);
+
+ virtual int GetRemoteRTCP_CNAME(int channel, char cName[256]);
+
+ virtual int GetRemoteRTCPData(int channel,
+ unsigned int& NTPHigh,
+ unsigned int& NTPLow,
+ unsigned int& timestamp,
+ unsigned int& playoutTimestamp,
+ unsigned int* jitter = NULL,
+ unsigned short* fractionLost = NULL);
+
+ virtual int SendApplicationDefinedRTCPPacket(
+ int channel,
+ const unsigned char subType,
+ unsigned int name,
+ const char* data,
+ unsigned short dataLengthInBytes);
+
+ // SSRC
+ virtual int SetLocalSSRC(int channel, unsigned int ssrc);
+
+ virtual int GetLocalSSRC(int channel, unsigned int& ssrc);
+
+ virtual int GetRemoteSSRC(int channel, unsigned int& ssrc);
+
+ // RTP Header Extension for Client-to-Mixer Audio Level Indication
+ virtual int SetRTPAudioLevelIndicationStatus(int channel,
+ bool enable,
+ unsigned char ID);
+
+ virtual int GetRTPAudioLevelIndicationStatus(int channel,
+ bool& enabled,
+ unsigned char& ID);
+
+ // CSRC
+ virtual int GetRemoteCSRCs(int channel, unsigned int arrCSRC[15]);
+
+ // Statistics
+ virtual int GetRTPStatistics(int channel,
+ unsigned int& averageJitterMs,
+ unsigned int& maxJitterMs,
+ unsigned int& discardedPackets);
+
+ virtual int GetRTCPStatistics(int channel, CallStatistics& stats);
+
+ virtual int GetRemoteRTCPSenderInfo(int channel, SenderInfo* sender_info);
+
+ virtual int GetRemoteRTCPReportBlocks(
+ int channel, std::vector<ReportBlock>* report_blocks);
+
+ // FEC
+ virtual int SetFECStatus(int channel,
+ bool enable,
+ int redPayloadtype = -1);
+
+ virtual int GetFECStatus(int channel, bool& enabled, int& redPayloadtype);
+
+ // Store RTP and RTCP packets and dump to file (compatible with rtpplay)
+ virtual int StartRTPDump(int channel,
+ const char fileNameUTF8[1024],
+ RTPDirections direction = kRtpIncoming);
+
+ virtual int StopRTPDump(int channel,
+ RTPDirections direction = kRtpIncoming);
+
+ virtual int RTPDumpIsActive(int channel,
+ RTPDirections direction = kRtpIncoming);
+
+ // Insert (and transmits) extra RTP packet into active RTP audio stream
+ virtual int InsertExtraRTPPacket(int channel,
+ unsigned char payloadType,
+ bool markerBit,
+ const char* payloadData,
+ unsigned short payloadSize);
+
+protected:
+ VoERTP_RTCPImpl(voe::SharedData* shared);
+ virtual ~VoERTP_RTCPImpl();
+
+private:
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_RTP_RTCP_IMPL_H
+
diff --git a/voice_engine/voe_video_sync_impl.cc b/voice_engine/voe_video_sync_impl.cc
new file mode 100644
index 0000000..2a7ff7d
--- /dev/null
+++ b/voice_engine/voe_video_sync_impl.cc
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_video_sync_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "trace.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEVideoSync* VoEVideoSync::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+
+VoEVideoSyncImpl::VoEVideoSyncImpl(voe::SharedData* shared) : _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEVideoSyncImpl::VoEVideoSyncImpl() - ctor");
+}
+
+VoEVideoSyncImpl::~VoEVideoSyncImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEVideoSyncImpl::~VoEVideoSyncImpl() - dtor");
+}
+
+int VoEVideoSyncImpl::GetPlayoutTimestamp(int channel, unsigned int& timestamp)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetPlayoutTimestamp(channel=%d, timestamp=?)", channel);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetPlayoutTimestamp() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetPlayoutTimestamp(timestamp);
+}
+
+int VoEVideoSyncImpl::SetInitTimestamp(int channel,
+ unsigned int timestamp)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetInitTimestamp(channel=%d, timestamp=%lu)",
+ channel, timestamp);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetInitTimestamp() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetInitTimestamp(timestamp);
+}
+
+int VoEVideoSyncImpl::SetInitSequenceNumber(int channel,
+ short sequenceNumber)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetInitSequenceNumber(channel=%d, sequenceNumber=%hd)",
+ channel, sequenceNumber);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetInitSequenceNumber() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetInitSequenceNumber(sequenceNumber);
+}
+
+int VoEVideoSyncImpl::SetMinimumPlayoutDelay(int channel,int delayMs)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetMinimumPlayoutDelay(channel=%d, delayMs=%d)",
+ channel, delayMs);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetMinimumPlayoutDelay() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetMinimumPlayoutDelay(delayMs);
+}
+
+int VoEVideoSyncImpl::GetDelayEstimate(int channel, int& delayMs)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetDelayEstimate(channel=%d, delayMs=?)", channel);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetDelayEstimate() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetDelayEstimate(delayMs);
+}
+
+int VoEVideoSyncImpl::GetPlayoutBufferSize(int& bufferMs)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetPlayoutBufferSize(bufferMs=?)");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ AudioDeviceModule::BufferType type
+ (AudioDeviceModule::kFixedBufferSize);
+ WebRtc_UWord16 sizeMS(0);
+ if (_shared->audio_device()->PlayoutBuffer(&type, &sizeMS) != 0)
+ {
+ _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceError,
+ "GetPlayoutBufferSize() failed to read buffer size");
+ return -1;
+ }
+ bufferMs = sizeMS;
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetPlayoutBufferSize() => bufferMs=%d", bufferMs);
+ return 0;
+}
+
+int VoEVideoSyncImpl::GetRtpRtcp(int channel, RtpRtcp* &rtpRtcpModule)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetRtpRtcp(channel=%i)", channel);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetPlayoutTimestamp() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetRtpRtcp(rtpRtcpModule);
+}
+
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_video_sync_impl.h b/voice_engine/voe_video_sync_impl.h
new file mode 100644
index 0000000..1b75f05
--- /dev/null
+++ b/voice_engine/voe_video_sync_impl.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H
+
+#include "voe_video_sync.h"
+
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoEVideoSyncImpl : public VoEVideoSync
+{
+public:
+ virtual int GetPlayoutBufferSize(int& bufferMs);
+
+ virtual int SetMinimumPlayoutDelay(int channel, int delayMs);
+
+ virtual int GetDelayEstimate(int channel, int& delayMs);
+
+ virtual int SetInitTimestamp(int channel, unsigned int timestamp);
+
+ virtual int SetInitSequenceNumber(int channel, short sequenceNumber);
+
+ virtual int GetPlayoutTimestamp(int channel, unsigned int& timestamp);
+
+ virtual int GetRtpRtcp(int channel, RtpRtcp* &rtpRtcpModule);
+
+protected:
+ VoEVideoSyncImpl(voe::SharedData* shared);
+ virtual ~VoEVideoSyncImpl();
+
+private:
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_VIDEO_SYNC_IMPL_H
diff --git a/voice_engine/voe_volume_control_impl.cc b/voice_engine/voe_volume_control_impl.cc
new file mode 100644
index 0000000..f0d1652
--- /dev/null
+++ b/voice_engine/voe_volume_control_impl.cc
@@ -0,0 +1,640 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "voe_volume_control_impl.h"
+
+#include "channel.h"
+#include "critical_section_wrapper.h"
+#include "output_mixer.h"
+#include "trace.h"
+#include "transmit_mixer.h"
+#include "voe_errors.h"
+#include "voice_engine_impl.h"
+
+namespace webrtc {
+
+VoEVolumeControl* VoEVolumeControl::GetInterface(VoiceEngine* voiceEngine)
+{
+#ifndef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+ return NULL;
+#else
+ if (NULL == voiceEngine)
+ {
+ return NULL;
+ }
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ s->AddRef();
+ return s;
+#endif
+}
+
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+
+VoEVolumeControlImpl::VoEVolumeControlImpl(voe::SharedData* shared)
+ : _shared(shared)
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEVolumeControlImpl::VoEVolumeControlImpl() - ctor");
+}
+
+VoEVolumeControlImpl::~VoEVolumeControlImpl()
+{
+ WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "VoEVolumeControlImpl::~VoEVolumeControlImpl() - dtor");
+}
+
+int VoEVolumeControlImpl::SetSpeakerVolume(unsigned int volume)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetSpeakerVolume(volume=%u)", volume);
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (volume > kMaxVolumeLevel)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetSpeakerVolume() invalid argument");
+ return -1;
+ }
+
+ WebRtc_UWord32 maxVol(0);
+ WebRtc_UWord32 spkrVol(0);
+
+ // scale: [0,kMaxVolumeLevel] -> [0,MaxSpeakerVolume]
+ if (_shared->audio_device()->MaxSpeakerVolume(&maxVol) != 0)
+ {
+ _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
+ "SetSpeakerVolume() failed to get max volume");
+ return -1;
+ }
+ // Round the value and avoid floating computation.
+ spkrVol = (WebRtc_UWord32)((volume * maxVol +
+ (int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel));
+
+ // set the actual volume using the audio mixer
+ if (_shared->audio_device()->SetSpeakerVolume(spkrVol) != 0)
+ {
+ _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
+ "SetSpeakerVolume() failed to set speaker volume");
+ return -1;
+ }
+ return 0;
+}
+
+int VoEVolumeControlImpl::GetSpeakerVolume(unsigned int& volume)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSpeakerVolume()");
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ WebRtc_UWord32 spkrVol(0);
+ WebRtc_UWord32 maxVol(0);
+
+ if (_shared->audio_device()->SpeakerVolume(&spkrVol) != 0)
+ {
+ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+ "GetSpeakerVolume() unable to get speaker volume");
+ return -1;
+ }
+
+ // scale: [0, MaxSpeakerVolume] -> [0, kMaxVolumeLevel]
+ if (_shared->audio_device()->MaxSpeakerVolume(&maxVol) != 0)
+ {
+ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+ "GetSpeakerVolume() unable to get max speaker volume");
+ return -1;
+ }
+ // Round the value and avoid floating computation.
+ volume = (WebRtc_UWord32) ((spkrVol * kMaxVolumeLevel +
+ (int)(maxVol / 2)) / (maxVol));
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetSpeakerVolume() => volume=%d", volume);
+ return 0;
+}
+
+int VoEVolumeControlImpl::SetSystemOutputMute(bool enable)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSystemOutputMute(enabled=%d)", enable);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ if (_shared->audio_device()->SetSpeakerMute(enable) != 0)
+ {
+ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+ "SpeakerMute() unable to Set speaker mute");
+ return -1;
+ }
+
+ return 0;
+}
+
+int VoEVolumeControlImpl::GetSystemOutputMute(bool& enabled)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSystemOutputMute(enabled=?)");
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ if (_shared->audio_device()->SpeakerMute(&enabled) != 0)
+ {
+ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+ "SpeakerMute() unable to get speaker mute state");
+ return -1;
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetSystemOutputMute() => %d", enabled);
+ return 0;
+}
+
+int VoEVolumeControlImpl::SetMicVolume(unsigned int volume)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetMicVolume(volume=%u)", volume);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (volume > kMaxVolumeLevel)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetMicVolume() invalid argument");
+ return -1;
+ }
+
+ WebRtc_UWord32 maxVol(0);
+ WebRtc_UWord32 micVol(0);
+
+ // scale: [0, kMaxVolumeLevel] -> [0,MaxMicrophoneVolume]
+ if (_shared->audio_device()->MaxMicrophoneVolume(&maxVol) != 0)
+ {
+ _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
+ "SetMicVolume() failed to get max volume");
+ return -1;
+ }
+
+ if (volume == kMaxVolumeLevel) {
+ // On Linux running pulse, users are able to set the volume above 100%
+ // through the volume control panel, where the +100% range is digital
+ // scaling. WebRTC does not support setting the volume above 100%, and
+ // simply ignores changing the volume if the user tries to set it to
+ // |kMaxVolumeLevel| while the current volume is higher than |maxVol|.
+ if (_shared->audio_device()->MicrophoneVolume(&micVol) != 0) {
+ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+ "SetMicVolume() unable to get microphone volume");
+ return -1;
+ }
+ if (micVol >= maxVol)
+ return 0;
+ }
+
+ // Round the value and avoid floating point computation.
+ micVol = (WebRtc_UWord32) ((volume * maxVol +
+ (int)(kMaxVolumeLevel / 2)) / (kMaxVolumeLevel));
+
+ // set the actual volume using the audio mixer
+ if (_shared->audio_device()->SetMicrophoneVolume(micVol) != 0)
+ {
+ _shared->SetLastError(VE_MIC_VOL_ERROR, kTraceError,
+ "SetMicVolume() failed to set mic volume");
+ return -1;
+ }
+ return 0;
+}
+
+int VoEVolumeControlImpl::GetMicVolume(unsigned int& volume)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetMicVolume()");
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ WebRtc_UWord32 micVol(0);
+ WebRtc_UWord32 maxVol(0);
+
+ if (_shared->audio_device()->MicrophoneVolume(&micVol) != 0)
+ {
+ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+ "GetMicVolume() unable to get microphone volume");
+ return -1;
+ }
+
+ // scale: [0, MaxMicrophoneVolume] -> [0, kMaxVolumeLevel]
+ if (_shared->audio_device()->MaxMicrophoneVolume(&maxVol) != 0)
+ {
+ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+ "GetMicVolume() unable to get max microphone volume");
+ return -1;
+ }
+ if (micVol < maxVol) {
+ // Round the value and avoid floating point calculation.
+ volume = (WebRtc_UWord32) ((micVol * kMaxVolumeLevel +
+ (int)(maxVol / 2)) / (maxVol));
+ } else {
+ // Truncate the value to the kMaxVolumeLevel.
+ volume = kMaxVolumeLevel;
+ }
+
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetMicVolume() => volume=%d", volume);
+ return 0;
+}
+
+int VoEVolumeControlImpl::SetInputMute(int channel, bool enable)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetInputMute(channel=%d, enable=%d)", channel, enable);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (channel == -1)
+ {
+ // Mute before demultiplexing <=> affects all channels
+ return _shared->transmit_mixer()->SetMute(enable);
+ }
+ else
+ {
+ // Mute after demultiplexing <=> affects one channel only
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetInputMute() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetMute(enable);
+ }
+ return 0;
+}
+
+int VoEVolumeControlImpl::GetInputMute(int channel, bool& enabled)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetInputMute(channel=%d)", channel);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (channel == -1)
+ {
+ enabled = _shared->transmit_mixer()->Mute();
+ }
+ else
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetInputMute() failed to locate channel");
+ return -1;
+ }
+ enabled = channelPtr->Mute();
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetInputMute() => enabled = %d", (int)enabled);
+ return 0;
+}
+
+int VoEVolumeControlImpl::SetSystemInputMute(bool enable)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetSystemInputMute(enabled=%d)", enable);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ if (_shared->audio_device()->SetMicrophoneMute(enable) != 0)
+ {
+ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+ "MicrophoneMute() unable to set microphone mute state");
+ return -1;
+ }
+
+ return 0;
+}
+
+int VoEVolumeControlImpl::GetSystemInputMute(bool& enabled)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSystemInputMute(enabled=?)");
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ if (_shared->audio_device()->MicrophoneMute(&enabled) != 0)
+ {
+ _shared->SetLastError(VE_GET_MIC_VOL_ERROR, kTraceError,
+ "MicrophoneMute() unable to get microphone mute state");
+ return -1;
+ }
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetSystemInputMute() => %d", enabled);
+ return 0;
+}
+
+int VoEVolumeControlImpl::GetSpeechInputLevel(unsigned int& level)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSpeechInputLevel()");
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ WebRtc_Word8 currentLevel = _shared->transmit_mixer()->AudioLevel();
+ level = static_cast<unsigned int> (currentLevel);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetSpeechInputLevel() => %d", level);
+ return 0;
+}
+
+int VoEVolumeControlImpl::GetSpeechOutputLevel(int channel,
+ unsigned int& level)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSpeechOutputLevel(channel=%d, level=?)", channel);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (channel == -1)
+ {
+ return _shared->output_mixer()->GetSpeechOutputLevel(
+ (WebRtc_UWord32&)level);
+ }
+ else
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetSpeechOutputLevel() failed to locate channel");
+ return -1;
+ }
+ channelPtr->GetSpeechOutputLevel((WebRtc_UWord32&)level);
+ }
+ return 0;
+}
+
+int VoEVolumeControlImpl::GetSpeechInputLevelFullRange(unsigned int& level)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSpeechInputLevelFullRange(level=?)");
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ WebRtc_Word16 currentLevel = _shared->transmit_mixer()->
+ AudioLevelFullRange();
+ level = static_cast<unsigned int> (currentLevel);
+ WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
+ VoEId(_shared->instance_id(), -1),
+ "GetSpeechInputLevelFullRange() => %d", level);
+ return 0;
+}
+
+int VoEVolumeControlImpl::GetSpeechOutputLevelFullRange(int channel,
+ unsigned int& level)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetSpeechOutputLevelFullRange(channel=%d, level=?)", channel);
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (channel == -1)
+ {
+ return _shared->output_mixer()->GetSpeechOutputLevelFullRange(
+ (WebRtc_UWord32&)level);
+ }
+ else
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetSpeechOutputLevelFullRange() failed to locate channel");
+ return -1;
+ }
+ channelPtr->GetSpeechOutputLevelFullRange((WebRtc_UWord32&)level);
+ }
+ return 0;
+}
+
+int VoEVolumeControlImpl::SetChannelOutputVolumeScaling(int channel,
+ float scaling)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetChannelOutputVolumeScaling(channel=%d, scaling=%3.2f)",
+ channel, scaling);
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ if (scaling < kMinOutputVolumeScaling ||
+ scaling > kMaxOutputVolumeScaling)
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetChannelOutputVolumeScaling() invalid parameter");
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetChannelOutputVolumeScaling() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetChannelOutputVolumeScaling(scaling);
+}
+
+int VoEVolumeControlImpl::GetChannelOutputVolumeScaling(int channel,
+ float& scaling)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetChannelOutputVolumeScaling(channel=%d, scaling=?)", channel);
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetChannelOutputVolumeScaling() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetChannelOutputVolumeScaling(scaling);
+}
+
+int VoEVolumeControlImpl::SetOutputVolumePan(int channel,
+ float left,
+ float right)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "SetOutputVolumePan(channel=%d, left=%2.1f, right=%2.1f)",
+ channel, left, right);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ bool available(false);
+ _shared->audio_device()->StereoPlayoutIsAvailable(&available);
+ if (!available)
+ {
+ _shared->SetLastError(VE_FUNC_NO_STEREO, kTraceError,
+ "SetOutputVolumePan() stereo playout not supported");
+ return -1;
+ }
+ if ((left < kMinOutputVolumePanning) ||
+ (left > kMaxOutputVolumePanning) ||
+ (right < kMinOutputVolumePanning) ||
+ (right > kMaxOutputVolumePanning))
+ {
+ _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
+ "SetOutputVolumePan() invalid parameter");
+ return -1;
+ }
+
+ if (channel == -1)
+ {
+ // Master balance (affectes the signal after output mixing)
+ return _shared->output_mixer()->SetOutputVolumePan(left, right);
+ }
+ else
+ {
+ // Per-channel balance (affects the signal before output mixing)
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "SetOutputVolumePan() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->SetOutputVolumePan(left, right);
+ }
+ return 0;
+}
+
+int VoEVolumeControlImpl::GetOutputVolumePan(int channel,
+ float& left,
+ float& right)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
+ "GetOutputVolumePan(channel=%d, left=?, right=?)", channel);
+ ANDROID_NOT_SUPPORTED(_shared->statistics());
+ IPHONE_NOT_SUPPORTED(_shared->statistics());
+
+ if (!_shared->statistics().Initialized())
+ {
+ _shared->SetLastError(VE_NOT_INITED, kTraceError);
+ return -1;
+ }
+
+ bool available(false);
+ _shared->audio_device()->StereoPlayoutIsAvailable(&available);
+ if (!available)
+ {
+ _shared->SetLastError(VE_FUNC_NO_STEREO, kTraceError,
+ "GetOutputVolumePan() stereo playout not supported");
+ return -1;
+ }
+
+ if (channel == -1)
+ {
+ return _shared->output_mixer()->GetOutputVolumePan(left, right);
+ }
+ else
+ {
+ voe::ScopedChannel sc(_shared->channel_manager(), channel);
+ voe::Channel* channelPtr = sc.ChannelPtr();
+ if (channelPtr == NULL)
+ {
+ _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError,
+ "GetOutputVolumePan() failed to locate channel");
+ return -1;
+ }
+ return channelPtr->GetOutputVolumePan(left, right);
+ }
+ return 0;
+}
+
+#endif // #ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+
+} // namespace webrtc
diff --git a/voice_engine/voe_volume_control_impl.h b/voice_engine/voe_volume_control_impl.h
new file mode 100644
index 0000000..9e1cc5a
--- /dev/null
+++ b/voice_engine/voe_volume_control_impl.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H
+
+#include "voe_volume_control.h"
+
+#include "shared_data.h"
+
+namespace webrtc {
+
+class VoEVolumeControlImpl : public VoEVolumeControl
+{
+public:
+ virtual int SetSpeakerVolume(unsigned int volume);
+
+ virtual int GetSpeakerVolume(unsigned int& volume);
+
+ virtual int SetSystemOutputMute(bool enable);
+
+ virtual int GetSystemOutputMute(bool& enabled);
+
+ virtual int SetMicVolume(unsigned int volume);
+
+ virtual int GetMicVolume(unsigned int& volume);
+
+ virtual int SetInputMute(int channel, bool enable);
+
+ virtual int GetInputMute(int channel, bool& enabled);
+
+ virtual int SetSystemInputMute(bool enable);
+
+ virtual int GetSystemInputMute(bool& enabled);
+
+ virtual int GetSpeechInputLevel(unsigned int& level);
+
+ virtual int GetSpeechOutputLevel(int channel, unsigned int& level);
+
+ virtual int GetSpeechInputLevelFullRange(unsigned int& level);
+
+ virtual int GetSpeechOutputLevelFullRange(int channel,
+ unsigned int& level);
+
+ virtual int SetChannelOutputVolumeScaling(int channel, float scaling);
+
+ virtual int GetChannelOutputVolumeScaling(int channel, float& scaling);
+
+ virtual int SetOutputVolumePan(int channel, float left, float right);
+
+ virtual int GetOutputVolumePan(int channel, float& left, float& right);
+
+
+protected:
+ VoEVolumeControlImpl(voe::SharedData* shared);
+ virtual ~VoEVolumeControlImpl();
+
+private:
+ voe::SharedData* _shared;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOE_VOLUME_CONTROL_IMPL_H
+
diff --git a/voice_engine/voice_engine.gyp b/voice_engine/voice_engine.gyp
new file mode 100644
index 0000000..ebf13ca
--- /dev/null
+++ b/voice_engine/voice_engine.gyp
@@ -0,0 +1,23 @@
+# Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'includes': [
+ '../build/common.gypi',
+ 'voice_engine_core.gypi',
+ ],
+
+ # Test targets, excluded when building with Chromium.
+ 'conditions': [
+ ['include_tests==1', {
+ 'includes': [
+ 'test/voice_engine_tests.gypi',
+ ],
+ }],
+ ],
+}
diff --git a/voice_engine/voice_engine_core.gypi b/voice_engine/voice_engine_core.gypi
new file mode 100644
index 0000000..0478a71
--- /dev/null
+++ b/voice_engine/voice_engine_core.gypi
@@ -0,0 +1,154 @@
+# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS. All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'voice_engine_core',
+ 'type': '<(library)',
+ 'dependencies': [
+ '<(webrtc_root)/common_audio/common_audio.gyp:resampler',
+ '<(webrtc_root)/common_audio/common_audio.gyp:signal_processing',
+ '<(webrtc_root)/modules/modules.gyp:audio_coding_module',
+ '<(webrtc_root)/modules/modules.gyp:audio_conference_mixer',
+ '<(webrtc_root)/modules/modules.gyp:audio_device',
+ '<(webrtc_root)/modules/modules.gyp:audio_processing',
+ '<(webrtc_root)/modules/modules.gyp:media_file',
+ '<(webrtc_root)/modules/modules.gyp:rtp_rtcp',
+ '<(webrtc_root)/modules/modules.gyp:udp_transport',
+ '<(webrtc_root)/modules/modules.gyp:webrtc_utility',
+ '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+ ],
+ 'include_dirs': [
+ 'include',
+ '<(webrtc_root)/modules/audio_device',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ 'include',
+ ],
+ },
+ 'sources': [
+ '../common_types.h',
+ '../engine_configurations.h',
+ '../typedefs.h',
+ 'include/voe_audio_processing.h',
+ 'include/voe_base.h',
+ 'include/voe_call_report.h',
+ 'include/voe_codec.h',
+ 'include/voe_dtmf.h',
+ 'include/voe_encryption.h',
+ 'include/voe_errors.h',
+ 'include/voe_external_media.h',
+ 'include/voe_file.h',
+ 'include/voe_hardware.h',
+ 'include/voe_neteq_stats.h',
+ 'include/voe_network.h',
+ 'include/voe_rtp_rtcp.h',
+ 'include/voe_video_sync.h',
+ 'include/voe_volume_control.h',
+ 'channel.cc',
+ 'channel.h',
+ 'channel_manager.cc',
+ 'channel_manager.h',
+ 'channel_manager_base.cc',
+ 'channel_manager_base.h',
+ 'dtmf_inband.cc',
+ 'dtmf_inband.h',
+ 'dtmf_inband_queue.cc',
+ 'dtmf_inband_queue.h',
+ 'level_indicator.cc',
+ 'level_indicator.h',
+ 'monitor_module.cc',
+ 'monitor_module.h',
+ 'output_mixer.cc',
+ 'output_mixer.h',
+ 'output_mixer_internal.cc',
+ 'output_mixer_internal.h',
+ 'shared_data.cc',
+ 'shared_data.h',
+ 'statistics.cc',
+ 'statistics.h',
+ 'transmit_mixer.cc',
+ 'transmit_mixer.h',
+ 'utility.cc',
+ 'utility.h',
+ 'voe_audio_processing_impl.cc',
+ 'voe_audio_processing_impl.h',
+ 'voe_base_impl.cc',
+ 'voe_base_impl.h',
+ 'voe_call_report_impl.cc',
+ 'voe_call_report_impl.h',
+ 'voe_codec_impl.cc',
+ 'voe_codec_impl.h',
+ 'voe_dtmf_impl.cc',
+ 'voe_dtmf_impl.h',
+ 'voe_encryption_impl.cc',
+ 'voe_encryption_impl.h',
+ 'voe_external_media_impl.cc',
+ 'voe_external_media_impl.h',
+ 'voe_file_impl.cc',
+ 'voe_file_impl.h',
+ 'voe_hardware_impl.cc',
+ 'voe_hardware_impl.h',
+ 'voe_neteq_stats_impl.cc',
+ 'voe_neteq_stats_impl.h',
+ 'voe_network_impl.cc',
+ 'voe_network_impl.h',
+ 'voe_rtp_rtcp_impl.cc',
+ 'voe_rtp_rtcp_impl.h',
+ 'voe_video_sync_impl.cc',
+ 'voe_video_sync_impl.h',
+ 'voe_volume_control_impl.cc',
+ 'voe_volume_control_impl.h',
+ 'voice_engine_defines.h',
+ 'voice_engine_impl.cc',
+ 'voice_engine_impl.h',
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': ['WEBRTC_DRIFT_COMPENSATION_SUPPORTED',],
+ }],
+ ['include_tests==1', {
+ 'targets': [
+ {
+ 'target_name': 'voice_engine_unittests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'voice_engine_core',
+ '<(DEPTH)/testing/gtest.gyp:gtest',
+ '<(webrtc_root)/test/test.gyp:test_support_main',
+ # The rest are to satisfy the unittests' include chain.
+ # This would be unnecessary if we used qualified includes.
+ '<(webrtc_root)/common_audio/common_audio.gyp:resampler',
+ '<(webrtc_root)/modules/modules.gyp:audio_device',
+ '<(webrtc_root)/modules/modules.gyp:audio_processing',
+ '<(webrtc_root)/modules/modules.gyp:audio_coding_module',
+ '<(webrtc_root)/modules/modules.gyp:audio_conference_mixer',
+ '<(webrtc_root)/modules/modules.gyp:media_file',
+ '<(webrtc_root)/modules/modules.gyp:rtp_rtcp',
+ '<(webrtc_root)/modules/modules.gyp:udp_transport',
+ '<(webrtc_root)/modules/modules.gyp:webrtc_utility',
+ '<(webrtc_root)/system_wrappers/source/system_wrappers.gyp:system_wrappers',
+ ],
+ 'include_dirs': [
+ 'include',
+ ],
+ 'sources': [
+ 'channel_unittest.cc',
+ 'output_mixer_unittest.cc',
+ 'transmit_mixer_unittest.cc',
+ 'voe_audio_processing_unittest.cc',
+ ],
+ },
+ ], # targets
+ }], # include_tests
+ ], # conditions
+}
diff --git a/voice_engine/voice_engine_defines.h b/voice_engine/voice_engine_defines.h
new file mode 100644
index 0000000..13183dd
--- /dev/null
+++ b/voice_engine/voice_engine_defines.h
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+/*
+ * This file contains common constants for VoiceEngine, as well as
+ * platform specific settings and include files.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H
+#define WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H
+
+#include "common_types.h"
+#include "engine_configurations.h"
+
+// ----------------------------------------------------------------------------
+// Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+
+// VolumeControl
+enum { kMinVolumeLevel = 0 };
+enum { kMaxVolumeLevel = 255 };
+// Min scale factor for per-channel volume scaling
+const float kMinOutputVolumeScaling = 0.0f;
+// Max scale factor for per-channel volume scaling
+const float kMaxOutputVolumeScaling = 10.0f;
+// Min scale factor for output volume panning
+const float kMinOutputVolumePanning = 0.0f;
+// Max scale factor for output volume panning
+const float kMaxOutputVolumePanning = 1.0f;
+
+// DTMF
+enum { kMinDtmfEventCode = 0 }; // DTMF digit "0"
+enum { kMaxDtmfEventCode = 15 }; // DTMF digit "D"
+enum { kMinTelephoneEventCode = 0 }; // RFC4733 (Section 2.3.1)
+enum { kMaxTelephoneEventCode = 255 }; // RFC4733 (Section 2.3.1)
+enum { kMinTelephoneEventDuration = 100 };
+enum { kMaxTelephoneEventDuration = 60000 }; // Actual limit is 2^16
+enum { kMinTelephoneEventAttenuation = 0 }; // 0 dBm0
+enum { kMaxTelephoneEventAttenuation = 36 }; // -36 dBm0
+enum { kMinTelephoneEventSeparationMs = 100 }; // Min delta time between two
+ // telephone events
+enum { kVoiceEngineMaxIpPacketSizeBytes = 1500 }; // assumes Ethernet
+
+enum { kVoiceEngineMaxModuleVersionSize = 960 };
+
+// Base
+enum { kVoiceEngineVersionMaxMessageSize = 1024 };
+
+// Encryption
+// SRTP uses 30 bytes key length
+enum { kVoiceEngineMaxSrtpKeyLength = 30 };
+// SRTP minimum key/tag length for encryption level
+enum { kVoiceEngineMinSrtpEncryptLength = 16 };
+// SRTP maximum key/tag length for encryption level
+enum { kVoiceEngineMaxSrtpEncryptLength = 256 };
+// SRTP maximum key/tag length for authentication level,
+// HMAC SHA1 authentication type
+enum { kVoiceEngineMaxSrtpAuthSha1Length = 20 };
+// SRTP maximum tag length for authentication level,
+// null authentication type
+enum { kVoiceEngineMaxSrtpTagAuthNullLength = 12 };
+// SRTP maximum key length for authentication level,
+// null authentication type
+enum { kVoiceEngineMaxSrtpKeyAuthNullLength = 256 };
+
+// Audio processing
+enum { kVoiceEngineAudioProcessingDeviceSampleRateHz = 48000 };
+
+// Codec
+// Min init target rate for iSAC-wb
+enum { kVoiceEngineMinIsacInitTargetRateBpsWb = 10000 };
+// Max init target rate for iSAC-wb
+enum { kVoiceEngineMaxIsacInitTargetRateBpsWb = 32000 };
+// Min init target rate for iSAC-swb
+enum { kVoiceEngineMinIsacInitTargetRateBpsSwb = 10000 };
+// Max init target rate for iSAC-swb
+enum { kVoiceEngineMaxIsacInitTargetRateBpsSwb = 56000 };
+// Lowest max rate for iSAC-wb
+enum { kVoiceEngineMinIsacMaxRateBpsWb = 32000 };
+// Highest max rate for iSAC-wb
+enum { kVoiceEngineMaxIsacMaxRateBpsWb = 53400 };
+// Lowest max rate for iSAC-swb
+enum { kVoiceEngineMinIsacMaxRateBpsSwb = 32000 };
+// Highest max rate for iSAC-swb
+enum { kVoiceEngineMaxIsacMaxRateBpsSwb = 107000 };
+// Lowest max payload size for iSAC-wb
+enum { kVoiceEngineMinIsacMaxPayloadSizeBytesWb = 120 };
+// Highest max payload size for iSAC-wb
+enum { kVoiceEngineMaxIsacMaxPayloadSizeBytesWb = 400 };
+// Lowest max payload size for iSAC-swb
+enum { kVoiceEngineMinIsacMaxPayloadSizeBytesSwb = 120 };
+// Highest max payload size for iSAC-swb
+enum { kVoiceEngineMaxIsacMaxPayloadSizeBytesSwb = 600 };
+
+// VideoSync
+// Lowest minimum playout delay
+enum { kVoiceEngineMinMinPlayoutDelayMs = 0 };
+// Highest minimum playout delay
+enum { kVoiceEngineMaxMinPlayoutDelayMs = 1000 };
+
+// Network
+// Min packet-timeout time for received RTP packets
+enum { kVoiceEngineMinPacketTimeoutSec = 1 };
+// Max packet-timeout time for received RTP packets
+enum { kVoiceEngineMaxPacketTimeoutSec = 150 };
+// Min sample time for dead-or-alive detection
+enum { kVoiceEngineMinSampleTimeSec = 1 };
+// Max sample time for dead-or-alive detection
+enum { kVoiceEngineMaxSampleTimeSec = 150 };
+
+// RTP/RTCP
+// Min 4-bit ID for RTP extension (see section 4.2 in RFC 5285)
+enum { kVoiceEngineMinRtpExtensionId = 1 };
+// Max 4-bit ID for RTP extension
+enum { kVoiceEngineMaxRtpExtensionId = 14 };
+
+} // namespace webrtc
+
+// TODO(andrew): we shouldn't be using the precompiler for this.
+// Use enums or bools as appropriate.
+#define WEBRTC_AUDIO_PROCESSING_OFF false
+
+#define WEBRTC_VOICE_ENGINE_HP_DEFAULT_STATE true
+ // AudioProcessing HP is ON
+#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+ // AudioProcessing NS off
+#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE true
+ // AudioProcessing AGC on
+#define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+ // AudioProcessing EC off
+#define WEBRTC_VOICE_ENGINE_VAD_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+ // AudioProcessing off
+#define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+ // AudioProcessing RX AGC off
+#define WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+ // AudioProcessing RX NS off
+#define WEBRTC_VOICE_ENGINE_RX_HP_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+ // AudioProcessing RX High Pass Filter off
+
+#define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE NoiseSuppression::kModerate
+ // AudioProcessing NS moderate suppression
+#define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE GainControl::kAdaptiveAnalog
+ // AudioProcessing AGC analog digital combined
+#define WEBRTC_VOICE_ENGINE_RX_AGC_DEFAULT_MODE GainControl::kAdaptiveDigital
+ // AudioProcessing AGC mode
+#define WEBRTC_VOICE_ENGINE_RX_NS_DEFAULT_MODE NoiseSuppression::kModerate
+ // AudioProcessing RX NS mode
+
+// Macros
+// Comparison of two strings without regard to case
+#define STR_CASE_CMP(x,y) ::_stricmp(x,y)
+// Compares characters of two strings without regard to case
+#define STR_NCASE_CMP(x,y,n) ::_strnicmp(x,y,n)
+
+// ----------------------------------------------------------------------------
+// Build information macros
+// ----------------------------------------------------------------------------
+
+#if defined(_DEBUG)
+#define BUILDMODE "d"
+#elif defined(DEBUG)
+#define BUILDMODE "d"
+#elif defined(NDEBUG)
+#define BUILDMODE "r"
+#else
+#define BUILDMODE "?"
+#endif
+
+#define BUILDTIME __TIME__
+#define BUILDDATE __DATE__
+
+// Example: "Oct 10 2002 12:05:30 r"
+#define BUILDINFO BUILDDATE " " BUILDTIME " " BUILDMODE
+
+// ----------------------------------------------------------------------------
+// Macros
+// ----------------------------------------------------------------------------
+
+#if (defined(_DEBUG) && defined(_WIN32) && (_MSC_VER >= 1400))
+ #include <windows.h>
+ #include <stdio.h>
+ #define DEBUG_PRINT(...) \
+ { \
+ char msg[256]; \
+ sprintf(msg, __VA_ARGS__); \
+ OutputDebugStringA(msg); \
+ }
+#else
+ // special fix for visual 2003
+ #define DEBUG_PRINT(exp) ((void)0)
+#endif // defined(_DEBUG) && defined(_WIN32)
+
+#define CHECK_CHANNEL(channel) if (CheckChannel(channel) == -1) return -1;
+
+// ----------------------------------------------------------------------------
+// Default Trace filter
+// ----------------------------------------------------------------------------
+
+#define WEBRTC_VOICE_ENGINE_DEFAULT_TRACE_FILTER \
+ kTraceStateInfo | kTraceWarning | kTraceError | kTraceCritical | \
+ kTraceApiCall
+
+// ----------------------------------------------------------------------------
+// Inline functions
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+
+inline int VoEId(const int veId, const int chId)
+{
+ if (chId == -1)
+ {
+ const int dummyChannel(99);
+ return (int) ((veId << 16) + dummyChannel);
+ }
+ return (int) ((veId << 16) + chId);
+}
+
+inline int VoEModuleId(const int veId, const int chId)
+{
+ return (int) ((veId << 16) + chId);
+}
+
+// Convert module ID to internal VoE channel ID
+inline int VoEChannelId(const int moduleId)
+{
+ return (int) (moduleId & 0xffff);
+}
+
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+// Platform settings
+// ----------------------------------------------------------------------------
+
+// *** WINDOWS ***
+
+#if defined(_WIN32)
+
+ #pragma comment( lib, "winmm.lib" )
+
+ #ifndef WEBRTC_EXTERNAL_TRANSPORT
+ #pragma comment( lib, "ws2_32.lib" )
+ #endif
+
+// ----------------------------------------------------------------------------
+// Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+// Max number of supported channels
+enum { kVoiceEngineMaxNumOfChannels = 32 };
+// Max number of channels which can be played out simultaneously
+enum { kVoiceEngineMaxNumOfActiveChannels = 16 };
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+// Defines
+// ----------------------------------------------------------------------------
+
+ #include <windows.h>
+ #include <mmsystem.h> // timeGetTime
+
+ #define GET_TIME_IN_MS() ::timeGetTime()
+ #define SLEEP(x) ::Sleep(x)
+ // Comparison of two strings without regard to case
+ #define STR_CASE_CMP(x,y) ::_stricmp(x,y)
+ // Compares characters of two strings without regard to case
+ #define STR_NCASE_CMP(x,y,n) ::_strnicmp(x,y,n)
+
+// Default device for Windows PC
+ #define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE \
+ AudioDeviceModule::kDefaultCommunicationDevice
+
+#endif // #if (defined(_WIN32)
+
+// *** LINUX ***
+
+#ifdef WEBRTC_LINUX
+
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#ifndef QNX
+ #include <linux/net.h>
+#ifndef ANDROID
+ #include <sys/soundcard.h>
+#endif // ANDROID
+#endif // QNX
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <time.h>
+#include <sys/time.h>
+
+#define DWORD unsigned long int
+#define WINAPI
+#define LPVOID void *
+#define FALSE 0
+#define TRUE 1
+#define UINT unsigned int
+#define UCHAR unsigned char
+#define TCHAR char
+#ifdef QNX
+#define _stricmp stricmp
+#else
+#define _stricmp strcasecmp
+#endif
+#define GetLastError() errno
+#define WSAGetLastError() errno
+#define LPCTSTR const char*
+#define LPCSTR const char*
+#define wsprintf sprintf
+#define TEXT(a) a
+#define _ftprintf fprintf
+#define _tcslen strlen
+#define FAR
+#define __cdecl
+#define LPSOCKADDR struct sockaddr *
+
+namespace
+{
+ void Sleep(unsigned long x)
+ {
+ timespec t;
+ t.tv_sec = x/1000;
+ t.tv_nsec = (x-(x/1000)*1000)*1000000;
+ nanosleep(&t,NULL);
+ }
+
+ DWORD timeGetTime()
+ {
+ struct timeval tv;
+ struct timezone tz;
+ unsigned long val;
+
+ gettimeofday(&tv, &tz);
+ val= tv.tv_sec*1000+ tv.tv_usec/1000;
+ return(val);
+ }
+}
+
+#define SLEEP(x) ::Sleep(x)
+#define GET_TIME_IN_MS timeGetTime
+
+// Default device for Linux and Android
+#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE 0
+
+#ifdef ANDROID
+
+// ----------------------------------------------------------------------------
+// Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+ // Max number of supported channels
+ enum { kVoiceEngineMaxNumOfChannels = 2 };
+ // Max number of channels which can be played out simultaneously
+ enum { kVoiceEngineMaxNumOfActiveChannels = 2 };
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+// Defines
+// ----------------------------------------------------------------------------
+
+ // Always excluded for Android builds
+ #undef WEBRTC_CODEC_ISAC
+ #undef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+ #undef WEBRTC_CONFERENCING
+ #undef WEBRTC_TYPING_DETECTION
+
+ // Default audio processing states
+ #undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE
+ #undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE
+ #undef WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE
+ #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+ #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+ #define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+
+ // Default audio processing modes
+ #undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE
+ #undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE
+ #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE \
+ NoiseSuppression::kModerate
+ #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE \
+ GainControl::kAdaptiveDigital
+
+ #define ANDROID_NOT_SUPPORTED(stat) \
+ stat.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, \
+ "API call not supported"); \
+ return -1;
+
+#else // LINUX PC
+// ----------------------------------------------------------------------------
+// Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+ // Max number of supported channels
+ enum { kVoiceEngineMaxNumOfChannels = 32 };
+ // Max number of channels which can be played out simultaneously
+ enum { kVoiceEngineMaxNumOfActiveChannels = 16 };
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+// Defines
+// ----------------------------------------------------------------------------
+
+ #define ANDROID_NOT_SUPPORTED(stat)
+
+#endif // ANDROID - LINUX PC
+
+#else
+#define ANDROID_NOT_SUPPORTED(stat)
+#endif // #ifdef WEBRTC_LINUX
+
+// *** WEBRTC_MAC ***
+// including iPhone
+
+#ifdef WEBRTC_MAC
+
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <sys/time.h>
+#include <time.h>
+#include <AudioUnit/AudioUnit.h>
+#if !defined(WEBRTC_IOS)
+ #include <CoreServices/CoreServices.h>
+ #include <CoreAudio/CoreAudio.h>
+ #include <AudioToolbox/DefaultAudioOutput.h>
+ #include <AudioToolbox/AudioConverter.h>
+ #include <CoreAudio/HostTime.h>
+#endif
+
+#define DWORD unsigned long int
+#define WINAPI
+#define LPVOID void *
+#define FALSE 0
+#define TRUE 1
+#define SOCKADDR_IN struct sockaddr_in
+#define UINT unsigned int
+#define UCHAR unsigned char
+#define TCHAR char
+#define _stricmp strcasecmp
+#define GetLastError() errno
+#define WSAGetLastError() errno
+#define LPCTSTR const char*
+#define wsprintf sprintf
+#define TEXT(a) a
+#define _ftprintf fprintf
+#define _tcslen strlen
+#define FAR
+#define __cdecl
+#define LPSOCKADDR struct sockaddr *
+#define LPCSTR const char*
+#define ULONG unsigned long
+
+namespace
+{
+ void Sleep(unsigned long x)
+ {
+ timespec t;
+ t.tv_sec = x/1000;
+ t.tv_nsec = (x-(x/1000)*1000)*1000000;
+ nanosleep(&t,NULL);
+ }
+
+ DWORD WebRtcTimeGetTime()
+ {
+ struct timeval tv;
+ struct timezone tz;
+ unsigned long val;
+
+ gettimeofday(&tv, &tz);
+ val= tv.tv_sec*1000+ tv.tv_usec/1000;
+ return(val);
+ }
+}
+
+#define SLEEP(x) ::Sleep(x)
+#define GET_TIME_IN_MS WebRtcTimeGetTime
+
+// Default device for Mac and iPhone
+#define WEBRTC_VOICE_ENGINE_DEFAULT_DEVICE 0
+
+// iPhone specific
+#if defined(WEBRTC_IOS)
+
+// ----------------------------------------------------------------------------
+// Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+ // Max number of supported channels
+ enum { kVoiceEngineMaxNumOfChannels = 2 };
+ // Max number of channels which can be played out simultaneously
+ enum { kVoiceEngineMaxNumOfActiveChannels = 2 };
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+// Defines
+// ----------------------------------------------------------------------------
+
+ // Always excluded for iPhone builds
+ #undef WEBRTC_CODEC_ISAC
+ #undef WEBRTC_VOE_EXTERNAL_REC_AND_PLAYOUT
+
+ #undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE
+ #undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE
+ #undef WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE
+ #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+ #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+ #define WEBRTC_VOICE_ENGINE_EC_DEFAULT_STATE WEBRTC_AUDIO_PROCESSING_OFF
+
+ #undef WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE
+ #undef WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE
+ #define WEBRTC_VOICE_ENGINE_NS_DEFAULT_MODE \
+ NoiseSuppression::kModerate
+ #define WEBRTC_VOICE_ENGINE_AGC_DEFAULT_MODE \
+ GainControl::kAdaptiveDigital
+
+ #define IPHONE_NOT_SUPPORTED(stat) \
+ stat.SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, \
+ "API call not supported"); \
+ return -1;
+
+#else // Non-iPhone
+
+// ----------------------------------------------------------------------------
+// Enumerators
+// ----------------------------------------------------------------------------
+
+namespace webrtc
+{
+ // Max number of supported channels
+ enum { kVoiceEngineMaxNumOfChannels = 32 };
+ // Max number of channels which can be played out simultaneously
+ enum { kVoiceEngineMaxNumOfActiveChannels = 16 };
+} // namespace webrtc
+
+// ----------------------------------------------------------------------------
+// Defines
+// ----------------------------------------------------------------------------
+
+ #define IPHONE_NOT_SUPPORTED(stat)
+#endif
+
+#else
+#define IPHONE_NOT_SUPPORTED(stat)
+#endif // #ifdef WEBRTC_MAC
+
+
+
+#endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_DEFINES_H
diff --git a/voice_engine/voice_engine_impl.cc b/voice_engine/voice_engine_impl.cc
new file mode 100644
index 0000000..18f841c
--- /dev/null
+++ b/voice_engine/voice_engine_impl.cc
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#if defined(WEBRTC_ANDROID) && !defined(WEBRTC_ANDROID_OPENSLES)
+#include "modules/audio_device/android/audio_device_jni_android.h"
+#endif
+
+#include "voice_engine_impl.h"
+#include "trace.h"
+
+namespace webrtc
+{
+
+// Counter to be ensure that we can add a correct ID in all static trace
+// methods. It is not the nicest solution, especially not since we already
+// have a counter in VoEBaseImpl. In other words, there is room for
+// improvement here.
+static WebRtc_Word32 gVoiceEngineInstanceCounter = 0;
+
+extern "C"
+{
+WEBRTC_DLLEXPORT VoiceEngine* GetVoiceEngine();
+
+VoiceEngine* GetVoiceEngine()
+{
+ VoiceEngineImpl* self = new VoiceEngineImpl();
+ VoiceEngine* ve = reinterpret_cast<VoiceEngine*>(self);
+ if (ve != NULL)
+ {
+ self->AddRef(); // First reference. Released in VoiceEngine::Delete.
+ gVoiceEngineInstanceCounter++;
+ }
+ return ve;
+}
+} // extern "C"
+
+int VoiceEngineImpl::AddRef() {
+ return ++_ref_count;
+}
+
+// This implements the Release() method for all the inherited interfaces.
+int VoiceEngineImpl::Release() {
+ int new_ref = --_ref_count;
+ assert(new_ref >= 0);
+ if (new_ref == 0) {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1,
+ "VoiceEngineImpl self deleting (voiceEngine=0x%p)",
+ this);
+
+ delete this;
+ }
+
+ return new_ref;
+}
+
+VoiceEngine* VoiceEngine::Create()
+{
+#if (defined _WIN32)
+ HMODULE hmod_ = LoadLibrary(TEXT("VoiceEngineTestingDynamic.dll"));
+
+ if (hmod_)
+ {
+ typedef VoiceEngine* (*PfnGetVoiceEngine)(void);
+ PfnGetVoiceEngine pfn = (PfnGetVoiceEngine)GetProcAddress(
+ hmod_,"GetVoiceEngine");
+ if (pfn)
+ {
+ VoiceEngine* self = pfn();
+ return (self);
+ }
+ }
+#endif
+
+ return GetVoiceEngine();
+}
+
+int VoiceEngine::SetTraceFilter(const unsigned int filter)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+ VoEId(gVoiceEngineInstanceCounter, -1),
+ "SetTraceFilter(filter=0x%x)", filter);
+
+ // Remember old filter
+ WebRtc_UWord32 oldFilter = 0;
+ Trace::LevelFilter(oldFilter);
+
+ // Set new filter
+ WebRtc_Word32 ret = Trace::SetLevelFilter(filter);
+
+ // If previous log was ignored, log again after changing filter
+ if (kTraceNone == oldFilter)
+ {
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice, -1,
+ "SetTraceFilter(filter=0x%x)", filter);
+ }
+
+ return (ret);
+}
+
+int VoiceEngine::SetTraceFile(const char* fileNameUTF8,
+ const bool addFileCounter)
+{
+ int ret = Trace::SetTraceFile(fileNameUTF8, addFileCounter);
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+ VoEId(gVoiceEngineInstanceCounter, -1),
+ "SetTraceFile(fileNameUTF8=%s, addFileCounter=%d)",
+ fileNameUTF8, addFileCounter);
+ return (ret);
+}
+
+int VoiceEngine::SetTraceCallback(TraceCallback* callback)
+{
+ WEBRTC_TRACE(kTraceApiCall, kTraceVoice,
+ VoEId(gVoiceEngineInstanceCounter, -1),
+ "SetTraceCallback(callback=0x%x)", callback);
+ return (Trace::SetTraceCallback(callback));
+}
+
+bool VoiceEngine::Delete(VoiceEngine*& voiceEngine)
+{
+ if (voiceEngine == NULL)
+ return false;
+
+ VoiceEngineImpl* s = reinterpret_cast<VoiceEngineImpl*>(voiceEngine);
+ // Release the reference that was added in GetVoiceEngine.
+ int ref = s->Release();
+ voiceEngine = NULL;
+
+ if (ref != 0) {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, -1,
+ "VoiceEngine::Delete did not release the very last reference. "
+ "%d references remain.", ref);
+ }
+
+ return true;
+}
+
+int VoiceEngine::SetAndroidObjects(void* javaVM, void* env, void* context)
+{
+#ifdef WEBRTC_ANDROID
+#ifdef WEBRTC_ANDROID_OPENSLES
+ return 0;
+#else
+ return AudioDeviceAndroidJni::SetAndroidAudioDeviceObjects(
+ javaVM, env, context);
+#endif
+#else
+ return -1;
+#endif
+}
+
+} //namespace webrtc
diff --git a/voice_engine/voice_engine_impl.h b/voice_engine/voice_engine_impl.h
new file mode 100644
index 0000000..7db77be
--- /dev/null
+++ b/voice_engine/voice_engine_impl.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H
+#define WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H
+
+#include "atomic32.h"
+#include "engine_configurations.h"
+#include "voe_base_impl.h"
+
+#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+#include "voe_audio_processing_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+#include "voe_call_report_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+#include "voe_codec_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+#include "voe_dtmf_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+#include "voe_encryption_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+#include "voe_external_media_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_FILE_API
+#include "voe_file_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+#include "voe_hardware_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+#include "voe_neteq_stats_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+#include "voe_network_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+#include "voe_rtp_rtcp_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+#include "voe_video_sync_impl.h"
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+#include "voe_volume_control_impl.h"
+#endif
+
+namespace webrtc
+{
+
+class VoiceEngineImpl : public voe::SharedData, // Must be the first base class
+#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+ public VoEAudioProcessingImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+ public VoECallReportImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+ public VoECodecImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+ public VoEDtmfImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+ public VoEEncryptionImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+ public VoEExternalMediaImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_FILE_API
+ public VoEFileImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+ public VoEHardwareImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+ public VoENetEqStatsImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+ public VoENetworkImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+ public VoERTP_RTCPImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+ public VoEVideoSyncImpl,
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+ public VoEVolumeControlImpl,
+#endif
+ public VoEBaseImpl
+{
+public:
+ VoiceEngineImpl() :
+#ifdef WEBRTC_VOICE_ENGINE_AUDIO_PROCESSING_API
+ VoEAudioProcessingImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CALL_REPORT_API
+ VoECallReportImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_CODEC_API
+ VoECodecImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_DTMF_API
+ VoEDtmfImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_ENCRYPTION_API
+ VoEEncryptionImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_EXTERNAL_MEDIA_API
+ VoEExternalMediaImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_FILE_API
+ VoEFileImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_HARDWARE_API
+ VoEHardwareImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETEQ_STATS_API
+ VoENetEqStatsImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_NETWORK_API
+ VoENetworkImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_RTP_RTCP_API
+ VoERTP_RTCPImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VIDEO_SYNC_API
+ VoEVideoSyncImpl(this),
+#endif
+#ifdef WEBRTC_VOICE_ENGINE_VOLUME_CONTROL_API
+ VoEVolumeControlImpl(this),
+#endif
+ VoEBaseImpl(this),
+ _ref_count(0)
+ {
+ }
+ virtual ~VoiceEngineImpl()
+ {
+ assert(_ref_count.Value() == 0);
+ }
+
+ int AddRef();
+
+ // This implements the Release() method for all the inherited interfaces.
+ virtual int Release();
+
+private:
+ Atomic32 _ref_count;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_VOICE_ENGINE_VOICE_ENGINE_IMPL_H