Unify FrameType and VideoFrameType.

Prevents some heap allocation and frame-type conversion since interfaces
mismatch. Also it's less confusing to have one type for this.

BUG=webrtc:5042
R=magjed@webrtc.org, mflodman@webrtc.org, henrik.lundin@webrtc.org, solenberg@webrtc.org, stefan@webrtc.org

Review URL: https://codereview.webrtc.org/1371043003

Cr-Commit-Position: refs/heads/master@{#10320}
diff --git a/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc b/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
index 76a675d..d2ec552 100644
--- a/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
+++ b/talk/app/webrtc/java/jni/androidmediaencoder_jni.cc
@@ -86,10 +86,9 @@
   int32_t InitEncode(const webrtc::VideoCodec* codec_settings,
                      int32_t /* number_of_cores */,
                      size_t /* max_payload_size */) override;
-  int32_t Encode(
-      const webrtc::VideoFrame& input_image,
-      const webrtc::CodecSpecificInfo* /* codec_specific_info */,
-      const std::vector<webrtc::VideoFrameType>* frame_types) override;
+  int32_t Encode(const webrtc::VideoFrame& input_image,
+                 const webrtc::CodecSpecificInfo* /* codec_specific_info */,
+                 const std::vector<webrtc::FrameType>* frame_types) override;
   int32_t RegisterEncodeCompleteCallback(
       webrtc::EncodedImageCallback* callback) override;
   int32_t Release() override;
@@ -121,7 +120,7 @@
   int32_t InitEncodeOnCodecThread(int width, int height, int kbps, int fps);
   int32_t EncodeOnCodecThread(
       const webrtc::VideoFrame& input_image,
-      const std::vector<webrtc::VideoFrameType>* frame_types);
+      const std::vector<webrtc::FrameType>* frame_types);
   int32_t RegisterEncodeCompleteCallbackOnCodecThread(
       webrtc::EncodedImageCallback* callback);
   int32_t ReleaseOnCodecThread();
@@ -338,7 +337,7 @@
 int32_t MediaCodecVideoEncoder::Encode(
     const webrtc::VideoFrame& frame,
     const webrtc::CodecSpecificInfo* /* codec_specific_info */,
-    const std::vector<webrtc::VideoFrameType>* frame_types) {
+    const std::vector<webrtc::FrameType>* frame_types) {
   return codec_thread_->Invoke<int32_t>(Bind(
       &MediaCodecVideoEncoder::EncodeOnCodecThread, this, frame, frame_types));
 }
@@ -501,7 +500,7 @@
 
 int32_t MediaCodecVideoEncoder::EncodeOnCodecThread(
     const webrtc::VideoFrame& frame,
-    const std::vector<webrtc::VideoFrameType>* frame_types) {
+    const std::vector<webrtc::FrameType>* frame_types) {
   CheckOnCodecThread();
   JNIEnv* jni = AttachCurrentThreadIfNeeded();
   ScopedLocalRefFrame local_ref_frame(jni);
diff --git a/talk/media/webrtc/fakewebrtcvideoengine.h b/talk/media/webrtc/fakewebrtcvideoengine.h
index d205839..8e4c7c8 100644
--- a/talk/media/webrtc/fakewebrtcvideoengine.h
+++ b/talk/media/webrtc/fakewebrtcvideoengine.h
@@ -155,10 +155,9 @@
     return codec_settings_;
   }
 
-  virtual int32_t Encode(
-      const webrtc::VideoFrame& inputImage,
-      const webrtc::CodecSpecificInfo* codecSpecificInfo,
-      const std::vector<webrtc::VideoFrameType>* frame_types) {
+  virtual int32_t Encode(const webrtc::VideoFrame& inputImage,
+                         const webrtc::CodecSpecificInfo* codecSpecificInfo,
+                         const std::vector<webrtc::FrameType>* frame_types) {
     rtc::CritScope lock(&crit_);
     ++num_frames_encoded_;
     return WEBRTC_VIDEO_CODEC_OK;
diff --git a/webrtc/common_types.h b/webrtc/common_types.h
index c11c4d7..6b624bf 100644
--- a/webrtc/common_types.h
+++ b/webrtc/common_types.h
@@ -156,15 +156,22 @@
     kRecordingPreprocessing
 };
 
-enum FrameType
-{
-    kFrameEmpty            = 0,
-    kAudioFrameSpeech      = 1,
-    kAudioFrameCN          = 2,
-    kVideoFrameKey         = 3,    // independent frame
-    kVideoFrameDelta       = 4,    // depends on the previus frame
+enum FrameType {
+  kEmptyFrame = 0,
+  kAudioFrameSpeech = 1,
+  kAudioFrameCN = 2,
+  kVideoFrameKey = 3,
+  kVideoFrameDelta = 4,
+  // TODO(pbos): Remove below aliases (non-kVideo prefixed) as soon as no
+  // VideoEncoder implementation in Chromium uses them.
+  kKeyFrame = kVideoFrameKey,
+  kDeltaFrame = kVideoFrameDelta,
 };
 
+// TODO(pbos): Remove VideoFrameType when VideoEncoder implementations no longer
+// depend on it.
+using VideoFrameType = FrameType;
+
 // Statistics for an RTCP channel
 struct RtcpStatistics {
   RtcpStatistics()
diff --git a/webrtc/frame_callback.h b/webrtc/frame_callback.h
index 3098ba7..b7f2210 100644
--- a/webrtc/frame_callback.h
+++ b/webrtc/frame_callback.h
@@ -21,7 +21,7 @@
 
 struct EncodedFrame {
  public:
-  EncodedFrame() : data_(NULL), length_(0), frame_type_(kFrameEmpty) {}
+  EncodedFrame() : data_(NULL), length_(0), frame_type_(kEmptyFrame) {}
   EncodedFrame(const uint8_t* data, size_t length, FrameType frame_type)
     : data_(data), length_(length), frame_type_(frame_type) {}
 
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest.cc b/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest.cc
index 9b5c17b..c6cd6de 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest.cc
@@ -46,7 +46,7 @@
       : timestamp_(0),
         packet_sent_(false),
         last_packet_send_timestamp_(timestamp_),
-        last_frame_type_(kFrameEmpty) {
+        last_frame_type_(kEmptyFrame) {
     AudioCoding::Config config;
     config.transport = this;
     acm_.reset(new AudioCodingImpl(config));
@@ -121,7 +121,7 @@
                    const uint8_t* payload_data,
                    size_t payload_len_bytes,
                    const RTPFragmentationHeader* fragmentation) override {
-    if (frame_type == kFrameEmpty)
+    if (frame_type == kEmptyFrame)
       return 0;
 
     rtp_header_.header.payloadType = payload_type;
diff --git a/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc
index 0142275..12ea300 100644
--- a/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/acm_receiver_unittest_oldapi.cc
@@ -46,7 +46,7 @@
       : timestamp_(0),
         packet_sent_(false),
         last_packet_send_timestamp_(timestamp_),
-        last_frame_type_(kFrameEmpty) {
+        last_frame_type_(kEmptyFrame) {
     AudioCodingModule::Config config;
     acm_.reset(new AudioCodingModuleImpl(config));
     receiver_.reset(new AcmReceiver(config));
@@ -120,7 +120,7 @@
                const uint8_t* payload_data,
                size_t payload_len_bytes,
                const RTPFragmentationHeader* fragmentation) override {
-    if (frame_type == kFrameEmpty)
+    if (frame_type == kEmptyFrame)
       return 0;
 
     rtp_header_.header.payloadType = payload_type;
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
index a652278..879af49 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_impl.cc
@@ -171,7 +171,7 @@
   ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
   FrameType frame_type;
   if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
-    frame_type = kFrameEmpty;
+    frame_type = kEmptyFrame;
     encoded_info.payload_type = previous_pltype;
   } else {
     RTC_DCHECK_GT(encode_buffer_.size(), 0u);
diff --git a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
index 01c8bb8..e36d4e6 100644
--- a/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
+++ b/webrtc/modules/audio_coding/main/acm2/audio_coding_module_unittest_oldapi.cc
@@ -92,7 +92,7 @@
  public:
   PacketizationCallbackStubOldApi()
       : num_calls_(0),
-        last_frame_type_(kFrameEmpty),
+        last_frame_type_(kEmptyFrame),
         last_payload_type_(-1),
         last_timestamp_(0),
         crit_sect_(CriticalSectionWrapper::CreateCriticalSection()) {}
@@ -416,18 +416,18 @@
       int ix;
       FrameType type;
     } expectation[] = {{2, kAudioFrameCN},
-                       {5, kFrameEmpty},
-                       {8, kFrameEmpty},
+                       {5, kEmptyFrame},
+                       {8, kEmptyFrame},
                        {11, kAudioFrameCN},
-                       {14, kFrameEmpty},
-                       {17, kFrameEmpty},
+                       {14, kEmptyFrame},
+                       {17, kEmptyFrame},
                        {20, kAudioFrameCN},
-                       {23, kFrameEmpty},
-                       {26, kFrameEmpty},
-                       {29, kFrameEmpty},
+                       {23, kEmptyFrame},
+                       {26, kEmptyFrame},
+                       {29, kEmptyFrame},
                        {32, kAudioFrameCN},
-                       {35, kFrameEmpty},
-                       {38, kFrameEmpty}};
+                       {35, kEmptyFrame},
+                       {38, kEmptyFrame}};
     for (int i = 0; i < kLoops; ++i) {
       int num_calls_before = packet_cb_.num_calls();
       EXPECT_EQ(i / blocks_per_packet, num_calls_before);
@@ -447,7 +447,7 @@
 
 // Checks that the transport callback is invoked once per frame period of the
 // underlying speech encoder, even when comfort noise is produced.
-// Also checks that the frame type is kAudioFrameCN or kFrameEmpty.
+// Also checks that the frame type is kAudioFrameCN or kEmptyFrame.
 // This test and the next check the same thing, but differ in the order of
 // speech codec and CNG registration.
 TEST_F(AudioCodingModuleTestWithComfortNoiseOldApi,
diff --git a/webrtc/modules/audio_coding/main/test/Channel.cc b/webrtc/modules/audio_coding/main/test/Channel.cc
index 779718d..1b0a610 100644
--- a/webrtc/modules/audio_coding/main/test/Channel.cc
+++ b/webrtc/modules/audio_coding/main/test/Channel.cc
@@ -42,7 +42,7 @@
   } else {
     rtpInfo.type.Audio.isCNG = false;
   }
-  if (frameType == kFrameEmpty) {
+  if (frameType == kEmptyFrame) {
     // When frame is empty, we should not transmit it. The frame size of the
     // next non-empty frame will be based on the previous frame size.
     _useLastFrameSize = _lastFrameSizeSample > 0;
diff --git a/webrtc/modules/audio_coding/main/test/TestAllCodecs.cc b/webrtc/modules/audio_coding/main/test/TestAllCodecs.cc
index 21d97f1..85c2579 100644
--- a/webrtc/modules/audio_coding/main/test/TestAllCodecs.cc
+++ b/webrtc/modules/audio_coding/main/test/TestAllCodecs.cc
@@ -74,7 +74,7 @@
   } else {
     rtp_info.type.Audio.isCNG = false;
   }
-  if (frame_type == kFrameEmpty) {
+  if (frame_type == kEmptyFrame) {
     // Skip this frame.
     return 0;
   }
diff --git a/webrtc/modules/audio_coding/main/test/TestStereo.cc b/webrtc/modules/audio_coding/main/test/TestStereo.cc
index 32ecadf..b0786be 100644
--- a/webrtc/modules/audio_coding/main/test/TestStereo.cc
+++ b/webrtc/modules/audio_coding/main/test/TestStereo.cc
@@ -58,7 +58,7 @@
   rtp_info.header.sequenceNumber = seq_no_++;
   rtp_info.header.payloadType = payload_type;
   rtp_info.header.timestamp = timestamp;
-  if (frame_type == kFrameEmpty) {
+  if (frame_type == kEmptyFrame) {
     // Skip this frame
     return 0;
   }
diff --git a/webrtc/modules/audio_coding/main/test/TestVADDTX.cc b/webrtc/modules/audio_coding/main/test/TestVADDTX.cc
index 0e42b9f..bd0335a 100644
--- a/webrtc/modules/audio_coding/main/test/TestVADDTX.cc
+++ b/webrtc/modules/audio_coding/main/test/TestVADDTX.cc
@@ -44,7 +44,7 @@
 
 void ActivityMonitor::PrintStatistics() {
   printf("\n");
-  printf("kFrameEmpty       %u\n", counter_[kFrameEmpty]);
+  printf("kEmptyFrame       %u\n", counter_[kEmptyFrame]);
   printf("kAudioFrameSpeech %u\n", counter_[kAudioFrameSpeech]);
   printf("kAudioFrameCN     %u\n", counter_[kAudioFrameCN]);
   printf("kVideoFrameKey    %u\n", counter_[kVideoFrameKey]);
@@ -248,7 +248,7 @@
       32000, 1, out_filename, false, expects);
 
   EXPECT_EQ(0, acm_send_->EnableOpusDtx());
-  expects[kFrameEmpty] = 1;
+  expects[kEmptyFrame] = 1;
   Run(webrtc::test::ResourcePath("audio_coding/testfile32kHz", "pcm"),
       32000, 1, out_filename, true, expects);
 
@@ -256,13 +256,13 @@
   out_filename = webrtc::test::OutputPath() + "testOpusDtx_outFile_stereo.pcm";
   RegisterCodec(kOpusStereo);
   EXPECT_EQ(0, acm_send_->DisableOpusDtx());
-  expects[kFrameEmpty] = 0;
+  expects[kEmptyFrame] = 0;
   Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
       32000, 2, out_filename, false, expects);
 
   EXPECT_EQ(0, acm_send_->EnableOpusDtx());
 
-  expects[kFrameEmpty] = 1;
+  expects[kEmptyFrame] = 1;
   Run(webrtc::test::ResourcePath("audio_coding/teststereo32kHz", "pcm"),
       32000, 2, out_filename, true, expects);
 #endif
diff --git a/webrtc/modules/audio_coding/main/test/TestVADDTX.h b/webrtc/modules/audio_coding/main/test/TestVADDTX.h
index 8ef4228..d34b99f 100644
--- a/webrtc/modules/audio_coding/main/test/TestVADDTX.h
+++ b/webrtc/modules/audio_coding/main/test/TestVADDTX.h
@@ -29,7 +29,7 @@
   void ResetStatistics();
   void GetStatistics(uint32_t* stats);
  private:
-  // 0 - kFrameEmpty
+  // 0 - kEmptyFrame
   // 1 - kAudioFrameSpeech
   // 2 - kAudioFrameCN
   // 3 - kVideoFrameKey (not used by audio)
@@ -60,7 +60,7 @@
   // 0  : there have been no packets of type |x|,
   // 1  : there have been packets of type |x|,
   // with |x| indicates the following packet types
-  // 0 - kFrameEmpty
+  // 0 - kEmptyFrame
   // 1 - kAudioFrameSpeech
   // 2 - kAudioFrameCN
   // 3 - kVideoFrameKey (not used by audio)
diff --git a/webrtc/modules/audio_coding/main/test/utility.cc b/webrtc/modules/audio_coding/main/test/utility.cc
index 949ca617b..86e49f1 100644
--- a/webrtc/modules/audio_coding/main/test/utility.cc
+++ b/webrtc/modules/audio_coding/main/test/utility.cc
@@ -288,7 +288,7 @@
 }
 
 void VADCallback::PrintFrameTypes() {
-  printf("kFrameEmpty......... %d\n", _numFrameTypes[kFrameEmpty]);
+  printf("kEmptyFrame......... %d\n", _numFrameTypes[kEmptyFrame]);
   printf("kAudioFrameSpeech... %d\n", _numFrameTypes[kAudioFrameSpeech]);
   printf("kAudioFrameCN....... %d\n", _numFrameTypes[kAudioFrameCN]);
   printf("kVideoFrameKey...... %d\n", _numFrameTypes[kVideoFrameKey]);
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc
index 3ad5686..1a14b55 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_format_h264_unittest.cc
@@ -83,7 +83,7 @@
   fragmentation.fragmentationOffset[0] = 0;
   fragmentation.fragmentationLength[0] = frame_size;
   rtc::scoped_ptr<RtpPacketizer> packetizer(RtpPacketizer::Create(
-      kRtpVideoH264, max_payload_size, NULL, kFrameEmpty));
+      kRtpVideoH264, max_payload_size, NULL, kEmptyFrame));
   packetizer->SetPayloadData(frame.get(), frame_size, &fragmentation);
 
   rtc::scoped_ptr<uint8_t[]> packet(new uint8_t[max_payload_size]);
@@ -157,7 +157,7 @@
   fragmentation.fragmentationOffset[0] = 0;
   fragmentation.fragmentationLength[0] = sizeof(frame);
   rtc::scoped_ptr<RtpPacketizer> packetizer(
-      RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kFrameEmpty));
+      RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
   packetizer->SetPayloadData(frame, sizeof(frame), &fragmentation);
   uint8_t packet[kMaxPayloadSize] = {0};
   size_t length = 0;
@@ -186,7 +186,7 @@
   frame[fragmentation.fragmentationOffset[1]] = 0x01;
 
   rtc::scoped_ptr<RtpPacketizer> packetizer(
-      RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kFrameEmpty));
+      RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
   packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
 
   uint8_t packet[kMaxPayloadSize] = {0};
@@ -223,7 +223,7 @@
   fragmentation.fragmentationLength[2] =
       kNalHeaderSize + kFrameSize - kPayloadOffset;
   rtc::scoped_ptr<RtpPacketizer> packetizer(
-      RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kFrameEmpty));
+      RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
   packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
 
   uint8_t packet[kMaxPayloadSize] = {0};
@@ -258,7 +258,7 @@
   fragmentation.fragmentationLength[2] =
       kNalHeaderSize + kFrameSize - kPayloadOffset;
   rtc::scoped_ptr<RtpPacketizer> packetizer(
-      RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kFrameEmpty));
+      RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
   packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
 
   uint8_t packet[kMaxPayloadSize] = {0};
@@ -306,7 +306,7 @@
     }
   }
   rtc::scoped_ptr<RtpPacketizer> packetizer(
-      RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kFrameEmpty));
+      RtpPacketizer::Create(kRtpVideoH264, kMaxPayloadSize, NULL, kEmptyFrame));
   packetizer->SetPayloadData(frame, kFrameSize, &fragmentation);
 
   // First expecting two FU-A packets.
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
index 0a26f9e..1a56c63 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender.cc
@@ -35,7 +35,8 @@
 
 const char* FrameTypeToString(FrameType frame_type) {
   switch (frame_type) {
-    case kFrameEmpty: return "empty";
+    case kEmptyFrame:
+      return "empty";
     case kAudioFrameSpeech: return "audio_speech";
     case kAudioFrameCN: return "audio_cn";
     case kVideoFrameKey: return "video_key";
@@ -509,7 +510,7 @@
     TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", capture_timestamp,
                             "Send", "type", FrameTypeToString(frame_type));
     assert(frame_type == kAudioFrameSpeech || frame_type == kAudioFrameCN ||
-           frame_type == kFrameEmpty);
+           frame_type == kEmptyFrame);
 
     ret_val = audio_->SendAudio(frame_type, payload_type, capture_timestamp,
                                 payload_data, payload_size, fragmentation);
@@ -518,7 +519,7 @@
                             "Send", "type", FrameTypeToString(frame_type));
     assert(frame_type != kAudioFrameSpeech && frame_type != kAudioFrameCN);
 
-    if (frame_type == kFrameEmpty)
+    if (frame_type == kEmptyFrame)
       return 0;
 
     ret_val =
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
index 3f55db4..30842bb 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc
@@ -208,8 +208,8 @@
   // A source MAY send events and coded audio packets for the same time
   // but we don't support it
   if (_dtmfEventIsOn) {
-    if (frameType == kFrameEmpty) {
-      // kFrameEmpty is used to drive the DTMF when in CN mode
+    if (frameType == kEmptyFrame) {
+      // kEmptyFrame is used to drive the DTMF when in CN mode
       // it can be triggered more frequently than we want to send the
       // DTMF packets.
       if (packet_size_samples > (captureTimeStamp - _dtmfTimestampLastSent)) {
@@ -259,7 +259,7 @@
     return 0;
   }
   if (payloadSize == 0 || payloadData == NULL) {
-    if (frameType == kFrameEmpty) {
+    if (frameType == kEmptyFrame) {
       // we don't send empty audio RTP packets
       // no error since we use it to drive DTMF when we use VAD
       return 0;
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index 6d30263..e4ace67 100644
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -1266,7 +1266,7 @@
 // audio channel.
 // This test checks the marker bit for the first packet and the consequent
 // packets of the same telephone event. Since it is specifically for DTMF
-// events, ignoring audio packets and sending kFrameEmpty instead of those.
+// events, ignoring audio packets and sending kEmptyFrame instead of those.
 TEST_F(RtpSenderAudioTest, CheckMarkerBitForTelephoneEvents) {
   char payload_name[RTP_PAYLOAD_NAME_SIZE] = "telephone-event";
   uint8_t payload_type = 126;
@@ -1284,13 +1284,13 @@
   // During start, it takes the starting timestamp as last sent timestamp.
   // The duration is calculated as the difference of current and last sent
   // timestamp. So for first call it will skip since the duration is zero.
-  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kFrameEmpty, payload_type,
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
                                              capture_time_ms, 0, nullptr, 0,
                                              nullptr));
   // DTMF Sample Length is (Frequency/1000) * Duration.
   // So in this case, it is (8000/1000) * 500 = 4000.
   // Sending it as two packets.
-  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kFrameEmpty, payload_type,
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
                                              capture_time_ms + 2000, 0, nullptr,
                                              0, nullptr));
   rtc::scoped_ptr<webrtc::RtpHeaderParser> rtp_parser(
@@ -1303,7 +1303,7 @@
   // Marker Bit should be set to 1 for first packet.
   EXPECT_TRUE(rtp_header.markerBit);
 
-  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kFrameEmpty, payload_type,
+  ASSERT_EQ(0, rtp_sender_->SendOutgoingData(kEmptyFrame, payload_type,
                                              capture_time_ms + 4000, 0, nullptr,
                                              0, nullptr));
   ASSERT_TRUE(rtp_parser->Parse(transport_.last_sent_packet_,
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
index 69e52a5..22ace50 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.cc
@@ -242,7 +242,7 @@
 int H264VideoToolboxEncoder::Encode(
     const VideoFrame& input_image,
     const CodecSpecificInfo* codec_specific_info,
-    const std::vector<VideoFrameType>* frame_types) {
+    const std::vector<FrameType>* frame_types) {
   if (input_image.IsZeroSize()) {
     // It's possible to get zero sizes as a signal to produce keyframes (this
     // happens for internal sources). But this shouldn't happen in
diff --git a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
index 28cd63e..f4fb86f 100644
--- a/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
+++ b/webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h
@@ -38,7 +38,7 @@
 
   int Encode(const VideoFrame& input_image,
              const CodecSpecificInfo* codec_specific_info,
-             const std::vector<VideoFrameType>* frame_types) override;
+             const std::vector<FrameType>* frame_types) override;
 
   int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
 
diff --git a/webrtc/modules/video_coding/codecs/i420/main/interface/i420.h b/webrtc/modules/video_coding/codecs/i420/main/interface/i420.h
index 35f7b3e..e54e78d 100644
--- a/webrtc/modules/video_coding/codecs/i420/main/interface/i420.h
+++ b/webrtc/modules/video_coding/codecs/i420/main/interface/i420.h
@@ -50,7 +50,7 @@
 //                                <0 - Error
   int Encode(const VideoFrame& inputImage,
              const CodecSpecificInfo* /*codecSpecificInfo*/,
-             const std::vector<VideoFrameType>* /*frame_types*/) override;
+             const std::vector<FrameType>* /*frame_types*/) override;
 
 // Register an encode complete callback object.
 //
diff --git a/webrtc/modules/video_coding/codecs/i420/main/source/i420.cc b/webrtc/modules/video_coding/codecs/i420/main/source/i420.cc
index 5ac785a..065a2d7 100644
--- a/webrtc/modules/video_coding/codecs/i420/main/source/i420.cc
+++ b/webrtc/modules/video_coding/codecs/i420/main/source/i420.cc
@@ -74,7 +74,7 @@
 
 int I420Encoder::Encode(const VideoFrame& inputImage,
                         const CodecSpecificInfo* /*codecSpecificInfo*/,
-                        const std::vector<VideoFrameType>* /*frame_types*/) {
+                        const std::vector<FrameType>* /*frame_types*/) {
   if (!_inited) {
     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
   }
diff --git a/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h b/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
index 5710446..5243d9a 100644
--- a/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
+++ b/webrtc/modules/video_coding/codecs/interface/mock/mock_video_codec_interface.h
@@ -35,7 +35,7 @@
   MOCK_METHOD3(Encode,
                int32_t(const VideoFrame& inputImage,
                        const CodecSpecificInfo* codecSpecificInfo,
-                       const std::vector<VideoFrameType>* frame_types));
+                       const std::vector<FrameType>* frame_types));
   MOCK_METHOD1(RegisterEncodeCompleteCallback,
                int32_t(EncodedImageCallback* callback));
   MOCK_METHOD0(Release, int32_t());
diff --git a/webrtc/modules/video_coding/codecs/test/stats.h b/webrtc/modules/video_coding/codecs/test/stats.h
index 8dc8f15..83ba108b 100644
--- a/webrtc/modules/video_coding/codecs/test/stats.h
+++ b/webrtc/modules/video_coding/codecs/test/stats.h
@@ -39,7 +39,7 @@
 
   // Copied from EncodedImage
   size_t encoded_frame_length_in_bytes;
-  webrtc::VideoFrameType frame_type;
+  webrtc::FrameType frame_type;
 };
 
 // Handles statistics from a single video processing run.
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
index 888adb8..3bb6b7a 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.cc
@@ -162,7 +162,7 @@
   return encoded_frame_size_;
 }
 
-VideoFrameType VideoProcessorImpl::EncodedFrameType() {
+FrameType VideoProcessorImpl::EncodedFrameType() {
   return encoded_frame_type_;
 }
 
@@ -199,7 +199,7 @@
     source_frame_.set_timestamp(frame_number);
 
     // Decide if we're going to force a keyframe:
-    std::vector<VideoFrameType> frame_types(1, kDeltaFrame);
+    std::vector<FrameType> frame_types(1, kDeltaFrame);
     if (config_.keyframe_interval > 0 &&
         frame_number % config_.keyframe_interval == 0) {
       frame_types[0] = kKeyFrame;
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor.h b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
index 8c9cb12..4b6aed1 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor.h
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor.h
@@ -147,7 +147,7 @@
   virtual size_t EncodedFrameSize() = 0;
 
   // Return the encoded frame type (key or delta).
-  virtual VideoFrameType EncodedFrameType() = 0;
+  virtual FrameType EncodedFrameType() = 0;
 
   // Return the number of dropped frames.
   virtual int NumberDroppedFrames() = 0;
@@ -183,7 +183,7 @@
   // Return the size of the encoded frame in bytes.
   size_t EncodedFrameSize() override;
   // Return the encoded frame type (key or delta).
-  VideoFrameType EncodedFrameType() override;
+  FrameType EncodedFrameType() override;
   // Return the number of dropped frames.
   int NumberDroppedFrames() override;
   // Return the number of spatial resizes.
@@ -212,7 +212,7 @@
   // If Init() has executed successfully.
   bool initialized_;
   size_t encoded_frame_size_;
-  VideoFrameType encoded_frame_type_;
+  FrameType encoded_frame_type_;
   int prev_time_stamp_;
   int num_dropped_frames_;
   int num_spatial_resizes_;
diff --git a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
index 9f8ff49..ec9d756 100644
--- a/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
+++ b/webrtc/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
@@ -268,7 +268,7 @@
   }
 
   // For every encoded frame, update the rate control metrics.
-  void UpdateRateControlMetrics(int frame_num, VideoFrameType frame_type) {
+  void UpdateRateControlMetrics(int frame_num, FrameType frame_type) {
     float encoded_size_kbits = processor_->EncodedFrameSize() * 8.0f / 1000.0f;
     // Update layer data.
     // Update rate mismatch relative to per-frame bandwidth for delta frames.
@@ -450,7 +450,7 @@
     ResetRateControlMetrics(
         rate_profile.frame_index_rate_update[update_index + 1]);
     int frame_number = 0;
-    VideoFrameType frame_type = kDeltaFrame;
+    FrameType frame_type = kDeltaFrame;
     while (processor_->ProcessFrame(frame_number) &&
         frame_number < num_frames) {
       // Get the layer index for the frame |frame_number|.
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
index 9ac2efa..4439267 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.cc
@@ -233,7 +233,7 @@
 int SimulcastEncoderAdapter::Encode(
     const VideoFrame& input_image,
     const CodecSpecificInfo* codec_specific_info,
-    const std::vector<VideoFrameType>* frame_types) {
+    const std::vector<FrameType>* frame_types) {
   if (!Initialized()) {
     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
   }
@@ -267,7 +267,7 @@
     if (!streaminfos_[stream_idx].send_stream)
       continue;
 
-    std::vector<VideoFrameType> stream_frame_types;
+    std::vector<FrameType> stream_frame_types;
     if (send_key_frame) {
       stream_frame_types.push_back(kKeyFrame);
       streaminfos_[stream_idx].key_frame_request = false;
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
index c00d8fb..afec024 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter.h
@@ -42,7 +42,7 @@
                  size_t max_payload_size) override;
   int Encode(const VideoFrame& input_image,
              const CodecSpecificInfo* codec_specific_info,
-             const std::vector<VideoFrameType>* frame_types) override;
+             const std::vector<FrameType>* frame_types) override;
   int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
   int SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
   int SetRates(uint32_t new_bitrate_kbit, uint32_t new_framerate) override;
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
index ed6541c..218b5e2 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_encoder_adapter_unittest.cc
@@ -117,7 +117,7 @@
 
   int32_t Encode(const VideoFrame& inputImage,
                  const CodecSpecificInfo* codecSpecificInfo,
-                 const std::vector<VideoFrameType>* frame_types) override {
+                 const std::vector<FrameType>* frame_types) override {
     return 0;
   }
 
diff --git a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
index 672fa3a..a3d9e5a 100644
--- a/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
+++ b/webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h
@@ -338,7 +338,7 @@
     decoder_->Release();
   }
 
-  void ExpectStreams(VideoFrameType frame_type, int expected_video_streams) {
+  void ExpectStreams(FrameType frame_type, int expected_video_streams) {
     ASSERT_GE(expected_video_streams, 0);
     ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
     if (expected_video_streams >= 1) {
@@ -389,8 +389,7 @@
   // a key frame was only requested for some of them.
   void TestKeyFrameRequestsOnAllStreams() {
     encoder_->SetRates(kMaxBitrates[2], 30);  // To get all three streams.
-    std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                            kDeltaFrame);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
     ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
@@ -424,8 +423,7 @@
   void TestPaddingAllStreams() {
     // We should always encode the base layer.
     encoder_->SetRates(kMinBitrates[0] - 1, 30);
-    std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                            kDeltaFrame);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
     ExpectStreams(kKeyFrame, 1);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
@@ -437,8 +435,7 @@
   void TestPaddingTwoStreams() {
     // We have just enough to get only the first stream and padding for two.
     encoder_->SetRates(kMinBitrates[0], 30);
-    std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                            kDeltaFrame);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
     ExpectStreams(kKeyFrame, 1);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
@@ -451,8 +448,7 @@
     // We are just below limit of sending second stream, so we should get
     // the first stream maxed out (at |maxBitrate|), and padding for two.
     encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
-    std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                            kDeltaFrame);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
     ExpectStreams(kKeyFrame, 1);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
@@ -464,8 +460,7 @@
   void TestPaddingOneStream() {
     // We have just enough to send two streams, so padding for one stream.
     encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
-    std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                            kDeltaFrame);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
     ExpectStreams(kKeyFrame, 2);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
@@ -479,8 +474,7 @@
     // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
     encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
                        kMinBitrates[2] - 1, 30);
-    std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                            kDeltaFrame);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
     ExpectStreams(kKeyFrame, 2);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
@@ -493,8 +487,7 @@
     // We have just enough to send all streams.
     encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
                        kMinBitrates[2], 30);
-    std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                            kDeltaFrame);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
     ExpectStreams(kKeyFrame, 3);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
@@ -507,8 +500,7 @@
     // We should get three media streams.
     encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] +
                        kMaxBitrates[2], 30);
-    std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                            kDeltaFrame);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
     ExpectStreams(kKeyFrame, 3);
     EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
 
@@ -589,8 +581,7 @@
 
     // Encode one frame and verify.
     encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
-    std::vector<VideoFrameType> frame_types(kNumberOfSimulcastStreams,
-                                            kDeltaFrame);
+    std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame);
     EXPECT_CALL(encoder_callback_, Encoded(
         AllOf(Field(&EncodedImage::_frameType, kKeyFrame),
               Field(&EncodedImage::_encodedWidth, width),
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
index 2885bb8..d6f36f2 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.cc
@@ -706,7 +706,7 @@
 
 int VP8EncoderImpl::Encode(const VideoFrame& frame,
                            const CodecSpecificInfo* codec_specific_info,
-                           const std::vector<VideoFrameType>* frame_types) {
+                           const std::vector<FrameType>* frame_types) {
   TRACE_EVENT1("webrtc", "VP8::Encode", "timestamp", frame.timestamp());
 
   if (!inited_)
diff --git a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
index 5ff1485..ba14ed5 100644
--- a/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp8/vp8_impl.h
@@ -48,7 +48,7 @@
 
   virtual int Encode(const VideoFrame& input_image,
                      const CodecSpecificInfo* codec_specific_info,
-                     const std::vector<VideoFrameType>* frame_types);
+                     const std::vector<FrameType>* frame_types);
 
   virtual int RegisterEncodeCompleteCallback(EncodedImageCallback* callback);
 
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
index deb3bca..1a21371 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.cc
@@ -422,7 +422,7 @@
 
 int VP9EncoderImpl::Encode(const VideoFrame& input_image,
                            const CodecSpecificInfo* codec_specific_info,
-                           const std::vector<VideoFrameType>* frame_types) {
+                           const std::vector<FrameType>* frame_types) {
   if (!inited_) {
     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
   }
@@ -432,7 +432,7 @@
   if (encoded_complete_callback_ == NULL) {
     return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
   }
-  VideoFrameType frame_type = kDeltaFrame;
+  FrameType frame_type = kDeltaFrame;
   // We only support one stream at the moment.
   if (frame_types && frame_types->size() > 0) {
     frame_type = (*frame_types)[0];
diff --git a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
index c164a63..f9c1230 100644
--- a/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
+++ b/webrtc/modules/video_coding/codecs/vp9/vp9_impl.h
@@ -35,7 +35,7 @@
 
   int Encode(const VideoFrame& input_image,
              const CodecSpecificInfo* codec_specific_info,
-             const std::vector<VideoFrameType>* frame_types) override;
+             const std::vector<FrameType>* frame_types) override;
 
   int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
 
diff --git a/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc b/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc
index 10f1d6e..feae701 100644
--- a/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/decoding_state_unittest.cc
@@ -181,7 +181,7 @@
   // Now insert empty packet belonging to the same frame.
   packet.timestamp = 1;
   packet.seqNum = 2;
-  packet.frameType = kFrameEmpty;
+  packet.frameType = kEmptyFrame;
   packet.sizeBytes = 0;
   dec_state.UpdateOldPacket(&packet);
   EXPECT_EQ(dec_state.sequence_num(), 2);
@@ -196,7 +196,7 @@
   // sequence number.
   packet.timestamp = 0;
   packet.seqNum = 4;
-  packet.frameType = kFrameEmpty;
+  packet.frameType = kEmptyFrame;
   packet.sizeBytes = 0;
   dec_state.UpdateOldPacket(&packet);
   EXPECT_EQ(dec_state.sequence_num(), 3);
diff --git a/webrtc/modules/video_coding/main/source/encoded_frame.cc b/webrtc/modules/video_coding/main/source/encoded_frame.cc
index 646dae3..92d2cd0 100644
--- a/webrtc/modules/video_coding/main/source/encoded_frame.cc
+++ b/webrtc/modules/video_coding/main/source/encoded_frame.cc
@@ -226,38 +226,4 @@
     }
 }
 
-webrtc::FrameType VCMEncodedFrame::ConvertFrameType(VideoFrameType frameType)
-{
-  switch(frameType) {
-    case kKeyFrame:
-      return  kVideoFrameKey;
-    case kDeltaFrame:
-      return kVideoFrameDelta;
-  }
-  // Bogus default return value.
-  return kVideoFrameDelta;
-}
-
-VideoFrameType VCMEncodedFrame::ConvertFrameType(webrtc::FrameType frame_type) {
-  switch (frame_type) {
-    case kVideoFrameKey:
-      return kKeyFrame;
-    case kVideoFrameDelta:
-      return kDeltaFrame;
-    default:
-      assert(false);
-      return kDeltaFrame;
-  }
-}
-
-void VCMEncodedFrame::ConvertFrameTypes(
-    const std::vector<webrtc::FrameType>& frame_types,
-    std::vector<VideoFrameType>* video_frame_types) {
-  assert(video_frame_types);
-  video_frame_types->reserve(frame_types.size());
-  for (size_t i = 0; i < frame_types.size(); ++i) {
-    (*video_frame_types)[i] = ConvertFrameType(frame_types[i]);
-  }
-}
-
-}
+}  // namespace webrtc
diff --git a/webrtc/modules/video_coding/main/source/encoded_frame.h b/webrtc/modules/video_coding/main/source/encoded_frame.h
index 6caaf42..608578c 100644
--- a/webrtc/modules/video_coding/main/source/encoded_frame.h
+++ b/webrtc/modules/video_coding/main/source/encoded_frame.h
@@ -68,7 +68,7 @@
     /**
     *   Get frame type
     */
-    webrtc::FrameType FrameType() const {return ConvertFrameType(_frameType);}
+    webrtc::FrameType FrameType() const { return _frameType; }
     /**
     *   Get frame rotation
     */
@@ -95,12 +95,6 @@
 
     const RTPFragmentationHeader* FragmentationHeader() const;
 
-    static webrtc::FrameType ConvertFrameType(VideoFrameType frameType);
-    static VideoFrameType ConvertFrameType(webrtc::FrameType frameType);
-    static void ConvertFrameTypes(
-        const std::vector<webrtc::FrameType>& frame_types,
-        std::vector<VideoFrameType>* video_frame_types);
-
 protected:
     /**
     * Verifies that current allocated buffer size is larger than or equal to the input size.
diff --git a/webrtc/modules/video_coding/main/source/frame_buffer.cc b/webrtc/modules/video_coding/main/source/frame_buffer.cc
index 82a755a..192febe 100644
--- a/webrtc/modules/video_coding/main/source/frame_buffer.cc
+++ b/webrtc/modules/video_coding/main/source/frame_buffer.cc
@@ -98,7 +98,7 @@
         // We only take the ntp timestamp of the first packet of a frame.
         ntp_time_ms_ = packet.ntp_time_ms_;
         _codec = packet.codec;
-        if (packet.frameType != kFrameEmpty) {
+        if (packet.frameType != kEmptyFrame) {
             // first media packet
             SetState(kStateIncomplete);
         }
@@ -280,7 +280,7 @@
 #endif
     // Transfer frame information to EncodedFrame and create any codec
     // specific information.
-    _frameType = ConvertFrameType(_sessionInfo.FrameType());
+    _frameType = _sessionInfo.FrameType();
     _completeFrame = _sessionInfo.complete();
     _missingFrame = !continuous;
 }
diff --git a/webrtc/modules/video_coding/main/source/generic_encoder.cc b/webrtc/modules/video_coding/main/source/generic_encoder.cc
index 31c3f17..fe3d5cb 100644
--- a/webrtc/modules/video_coding/main/source/generic_encoder.cc
+++ b/webrtc/modules/video_coding/main/source/generic_encoder.cc
@@ -140,9 +140,8 @@
 int32_t VCMGenericEncoder::Encode(const VideoFrame& inputFrame,
                                   const CodecSpecificInfo* codecSpecificInfo,
                                   const std::vector<FrameType>& frameTypes) {
-  std::vector<VideoFrameType> video_frame_types(frameTypes.size(),
-                                                kDeltaFrame);
-  VCMEncodedFrame::ConvertFrameTypes(frameTypes, &video_frame_types);
+  for (FrameType frame_type : frameTypes)
+    RTC_DCHECK(frame_type == kVideoFrameKey || frame_type == kVideoFrameDelta);
 
   rotation_ = inputFrame.rotation();
 
@@ -153,12 +152,11 @@
     vcm_encoded_frame_callback_->SetRotation(rotation_);
   }
 
-  int32_t result =
-      encoder_->Encode(inputFrame, codecSpecificInfo, &video_frame_types);
+  int32_t result = encoder_->Encode(inputFrame, codecSpecificInfo, &frameTypes);
   if (is_screenshare_ &&
       result == WEBRTC_VIDEO_CODEC_TARGET_BITRATE_OVERSHOOT) {
     // Target bitrate exceeded, encoder state has been reset - try again.
-    return encoder_->Encode(inputFrame, codecSpecificInfo, &video_frame_types);
+    return encoder_->Encode(inputFrame, codecSpecificInfo, &frameTypes);
   }
 
   return result;
@@ -223,10 +221,7 @@
 int32_t VCMGenericEncoder::RequestFrame(
     const std::vector<FrameType>& frame_types) {
   VideoFrame image;
-  std::vector<VideoFrameType> video_frame_types(frame_types.size(),
-                                                kDeltaFrame);
-  VCMEncodedFrame::ConvertFrameTypes(frame_types, &video_frame_types);
-  return encoder_->Encode(image, NULL, &video_frame_types);
+  return encoder_->Encode(image, NULL, &frame_types);
 }
 
 int32_t
@@ -294,6 +289,8 @@
     const EncodedImage& encodedImage,
     const CodecSpecificInfo* codecSpecificInfo,
     const RTPFragmentationHeader* fragmentationHeader) {
+  RTC_DCHECK(encodedImage._frameType == kVideoFrameKey ||
+             encodedImage._frameType == kVideoFrameDelta);
   post_encode_callback_->Encoded(encodedImage, NULL, NULL);
 
   if (_sendCallback == NULL) {
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer.cc b/webrtc/modules/video_coding/main/source/jitter_buffer.cc
index 49c2325..92ffbc9 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer.cc
+++ b/webrtc/modules/video_coding/main/source/jitter_buffer.cc
@@ -643,7 +643,7 @@
 
   // Empty packets may bias the jitter estimate (lacking size component),
   // therefore don't let empty packet trigger the following updates:
-  if (packet.frameType != kFrameEmpty) {
+  if (packet.frameType != kEmptyFrame) {
     if (waiting_for_completion_.timestamp == packet.timestamp) {
       // This can get bad if we have a lot of duplicate packets,
       // we will then count some packet multiple times.
diff --git a/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc b/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc
index 7ba4d68..ab4d8cd 100644
--- a/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/jitter_buffer_unittest.cc
@@ -168,10 +168,9 @@
   }
 
   VCMFrameBufferEnum InsertFrame(FrameType frame_type) {
-    stream_generator_->GenerateFrame(frame_type,
-                                    (frame_type != kFrameEmpty) ? 1 : 0,
-                                    (frame_type == kFrameEmpty) ? 1 : 0,
-                                    clock_->TimeInMilliseconds());
+    stream_generator_->GenerateFrame(
+        frame_type, (frame_type != kEmptyFrame) ? 1 : 0,
+        (frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
     VCMFrameBufferEnum ret = InsertPacketAndPop(0);
     clock_->AdvanceTimeMilliseconds(kDefaultFramePeriodMs);
     return ret;
@@ -1050,7 +1049,7 @@
     packet_->markerBit = false;
     packet_->seqNum = seq_num_;
     packet_->completeNALU = kNaluEnd;
-    packet_->frameType = kFrameEmpty;
+    packet_->frameType = kEmptyFrame;
 
     EXPECT_EQ(jitter_buffer_->InsertPacket(*packet_, &retransmitted),
               kDecodableSession);
@@ -1524,7 +1523,7 @@
     packet_->markerBit = false;
     packet_->seqNum = seq_num_;
     packet_->timestamp = timestamp_;
-    packet_->frameType = kFrameEmpty;
+    packet_->frameType = kEmptyFrame;
 
     EXPECT_EQ(kNoError, jitter_buffer_->InsertPacket(*packet_,
                                                      &retransmitted));
@@ -1895,7 +1894,7 @@
 TEST_F(TestJitterBufferNack, EmptyPackets) {
   // Make sure empty packets doesn't clog the jitter buffer.
   jitter_buffer_->SetNackMode(kNack, media_optimization::kLowRttNackMs, -1);
-  EXPECT_GE(InsertFrames(kMaxNumberOfFrames, kFrameEmpty), kNoError);
+  EXPECT_GE(InsertFrames(kMaxNumberOfFrames, kEmptyFrame), kNoError);
   InsertFrame(kVideoFrameKey);
   EXPECT_TRUE(DecodeCompleteFrame());
 }
diff --git a/webrtc/modules/video_coding/main/source/packet.cc b/webrtc/modules/video_coding/main/source/packet.cc
index 88838f3..fd5a6ab 100644
--- a/webrtc/modules/video_coding/main/source/packet.cc
+++ b/webrtc/modules/video_coding/main/source/packet.cc
@@ -16,23 +16,21 @@
 namespace webrtc {
 
 VCMPacket::VCMPacket()
-  :
-    payloadType(0),
-    timestamp(0),
-    ntp_time_ms_(0),
-    seqNum(0),
-    dataPtr(NULL),
-    sizeBytes(0),
-    markerBit(false),
-    frameType(kFrameEmpty),
-    codec(kVideoCodecUnknown),
-    isFirstPacket(false),
-    completeNALU(kNaluUnset),
-    insertStartCode(false),
-    width(0),
-    height(0),
-    codecSpecificHeader() {
-}
+    : payloadType(0),
+      timestamp(0),
+      ntp_time_ms_(0),
+      seqNum(0),
+      dataPtr(NULL),
+      sizeBytes(0),
+      markerBit(false),
+      frameType(kEmptyFrame),
+      codec(kVideoCodecUnknown),
+      isFirstPacket(false),
+      completeNALU(kNaluUnset),
+      insertStartCode(false),
+      width(0),
+      height(0),
+      codecSpecificHeader() {}
 
 VCMPacket::VCMPacket(const uint8_t* ptr,
                      const size_t size,
@@ -88,7 +86,7 @@
   dataPtr = NULL;
   sizeBytes = 0;
   markerBit = false;
-  frameType = kFrameEmpty;
+  frameType = kEmptyFrame;
   codec = kVideoCodecUnknown;
   isFirstPacket = false;
   completeNALU = kNaluUnset;
diff --git a/webrtc/modules/video_coding/main/source/receiver_unittest.cc b/webrtc/modules/video_coding/main/source/receiver_unittest.cc
index eb5e471..707e1f1 100644
--- a/webrtc/modules/video_coding/main/source/receiver_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/receiver_unittest.cc
@@ -63,10 +63,8 @@
   int32_t InsertFrame(FrameType frame_type, bool complete) {
     int num_of_packets = complete ? 1 : 2;
     stream_generator_->GenerateFrame(
-        frame_type,
-        (frame_type != kFrameEmpty) ? num_of_packets : 0,
-        (frame_type == kFrameEmpty) ? 1 : 0,
-        clock_->TimeInMilliseconds());
+        frame_type, (frame_type != kEmptyFrame) ? num_of_packets : 0,
+        (frame_type == kEmptyFrame) ? 1 : 0, clock_->TimeInMilliseconds());
     int32_t ret = InsertPacketAndPop(0);
     if (!complete) {
       // Drop the second packet.
diff --git a/webrtc/modules/video_coding/main/source/session_info.cc b/webrtc/modules/video_coding/main/source/session_info.cc
index bf6bcb3..7a32504 100644
--- a/webrtc/modules/video_coding/main/source/session_info.cc
+++ b/webrtc/modules/video_coding/main/source/session_info.cc
@@ -464,7 +464,7 @@
                                  uint8_t* frame_buffer,
                                  VCMDecodeErrorMode decode_error_mode,
                                  const FrameData& frame_data) {
-  if (packet.frameType == kFrameEmpty) {
+  if (packet.frameType == kEmptyFrame) {
     // Update sequence number of an empty packet.
     // Only media packets are inserted into the packet list.
     InformOfEmptyPacket(packet.seqNum);
@@ -516,7 +516,7 @@
       LOG(LS_WARNING) << "Received packet with a sequence number which is out "
                          "of frame boundaries";
       return -3;
-    } else if (frame_type_ == kFrameEmpty && packet.frameType != kFrameEmpty) {
+    } else if (frame_type_ == kEmptyFrame && packet.frameType != kEmptyFrame) {
       // Update the frame type with the type of the first media packet.
       // TODO(mikhal): Can this trigger?
       frame_type_ = packet.frameType;
diff --git a/webrtc/modules/video_coding/main/source/session_info_unittest.cc b/webrtc/modules/video_coding/main/source/session_info_unittest.cc
index fae55f4..58c352d 100644
--- a/webrtc/modules/video_coding/main/source/session_info_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/session_info_unittest.cc
@@ -175,7 +175,7 @@
   packet_.markerBit = true;
   packet_.seqNum  = 2;
   packet_.sizeBytes = 0;
-  packet_.frameType = kFrameEmpty;
+  packet_.frameType = kEmptyFrame;
   EXPECT_EQ(0,
             session_.InsertPacket(packet_,
                                   frame_buffer_,
@@ -888,7 +888,7 @@
 TEST_F(TestNalUnits, OnlyReceivedEmptyPacket) {
   packet_.isFirstPacket = false;
   packet_.completeNALU = kNaluComplete;
-  packet_.frameType = kFrameEmpty;
+  packet_.frameType = kEmptyFrame;
   packet_.sizeBytes = 0;
   packet_.seqNum = 0;
   packet_.markerBit = false;
diff --git a/webrtc/modules/video_coding/main/source/test/stream_generator.cc b/webrtc/modules/video_coding/main/source/test/stream_generator.cc
index dcf9b68..664764a 100644
--- a/webrtc/modules/video_coding/main/source/test/stream_generator.cc
+++ b/webrtc/modules/video_coding/main/source/test/stream_generator.cc
@@ -46,8 +46,8 @@
     ++sequence_number_;
   }
   for (int i = 0; i < num_empty_packets; ++i) {
-    packets_.push_back(GeneratePacket(
-        sequence_number_, timestamp, 0, false, false, kFrameEmpty));
+    packets_.push_back(GeneratePacket(sequence_number_, timestamp, 0, false,
+                                      false, kEmptyFrame));
     ++sequence_number_;
   }
 }
diff --git a/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc b/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc
index 1f1996e..16eaed3 100644
--- a/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/video_receiver_unittest.cc
@@ -88,7 +88,7 @@
   const uint8_t payload[kPaddingSize] = {0};
   WebRtcRTPHeader header;
   memset(&header, 0, sizeof(header));
-  header.frameType = kFrameEmpty;
+  header.frameType = kEmptyFrame;
   header.header.markerBit = false;
   header.header.paddingLength = kPaddingSize;
   header.header.payloadType = kUnusedPayloadType;
@@ -112,7 +112,7 @@
   const uint8_t payload[kFrameSize] = {0};
   WebRtcRTPHeader header;
   memset(&header, 0, sizeof(header));
-  header.frameType = kFrameEmpty;
+  header.frameType = kEmptyFrame;
   header.header.markerBit = false;
   header.header.paddingLength = kPaddingSize;
   header.header.payloadType = kUnusedPayloadType;
@@ -127,7 +127,7 @@
   clock_.AdvanceTimeMilliseconds(33);
   header.header.timestamp += 3000;
 
-  header.frameType = kFrameEmpty;
+  header.frameType = kEmptyFrame;
   header.type.Video.isFirstPacket = false;
   header.header.markerBit = false;
   // Insert padding frames.
@@ -163,7 +163,7 @@
   const uint8_t payload[kFrameSize] = {0};
   WebRtcRTPHeader header;
   memset(&header, 0, sizeof(header));
-  header.frameType = kFrameEmpty;
+  header.frameType = kEmptyFrame;
   header.type.Video.isFirstPacket = false;
   header.header.markerBit = false;
   header.header.paddingLength = kPaddingSize;
@@ -188,7 +188,7 @@
     }
 
     // Insert 2 padding only frames.
-    header.frameType = kFrameEmpty;
+    header.frameType = kEmptyFrame;
     header.type.Video.isFirstPacket = false;
     header.header.markerBit = false;
     for (int j = 0; j < 2; ++j) {
diff --git a/webrtc/modules/video_coding/main/source/video_sender.cc b/webrtc/modules/video_coding/main/source/video_sender.cc
index a5ef22a..8401cb2 100644
--- a/webrtc/modules/video_coding/main/source/video_sender.cc
+++ b/webrtc/modules/video_coding/main/source/video_sender.cc
@@ -313,7 +313,7 @@
   }
   // TODO(holmer): Add support for dropping frames per stream. Currently we
   // only have one frame dropper for all streams.
-  if (_nextFrameTypes[0] == kFrameEmpty) {
+  if (_nextFrameTypes[0] == kEmptyFrame) {
     return VCM_OK;
   }
   if (_mediaOpt.DropFrame()) {
diff --git a/webrtc/modules/video_coding/main/source/video_sender_unittest.cc b/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
index 31c7173..13e6b99 100644
--- a/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
+++ b/webrtc/modules/video_coding/main/source/video_sender_unittest.cc
@@ -244,7 +244,7 @@
     }
     assert(stream >= 0);
     assert(stream < kNumberOfStreams);
-    std::vector<VideoFrameType> frame_types(kNumberOfStreams, kDeltaFrame);
+    std::vector<FrameType> frame_types(kNumberOfStreams, kDeltaFrame);
     frame_types[stream] = kKeyFrame;
     EXPECT_CALL(
         encoder_,
diff --git a/webrtc/test/configurable_frame_size_encoder.cc b/webrtc/test/configurable_frame_size_encoder.cc
index ea42e96..2dbda38 100644
--- a/webrtc/test/configurable_frame_size_encoder.cc
+++ b/webrtc/test/configurable_frame_size_encoder.cc
@@ -41,7 +41,7 @@
 int32_t ConfigurableFrameSizeEncoder::Encode(
     const VideoFrame& inputImage,
     const CodecSpecificInfo* codecSpecificInfo,
-    const std::vector<VideoFrameType>* frame_types) {
+    const std::vector<FrameType>* frame_types) {
   EncodedImage encodedImage(
       buffer_.get(), current_frame_size_, max_frame_size_);
   encodedImage._completeFrame = true;
diff --git a/webrtc/test/configurable_frame_size_encoder.h b/webrtc/test/configurable_frame_size_encoder.h
index ffc095b..79514ef 100644
--- a/webrtc/test/configurable_frame_size_encoder.h
+++ b/webrtc/test/configurable_frame_size_encoder.h
@@ -30,7 +30,7 @@
 
   int32_t Encode(const VideoFrame& input_image,
                  const CodecSpecificInfo* codec_specific_info,
-                 const std::vector<VideoFrameType>* frame_types) override;
+                 const std::vector<FrameType>* frame_types) override;
 
   int32_t RegisterEncodeCompleteCallback(
       EncodedImageCallback* callback) override;
diff --git a/webrtc/test/fake_encoder.cc b/webrtc/test/fake_encoder.cc
index f85fb9a..db7a587 100644
--- a/webrtc/test/fake_encoder.cc
+++ b/webrtc/test/fake_encoder.cc
@@ -47,7 +47,7 @@
 
 int32_t FakeEncoder::Encode(const VideoFrame& input_image,
                             const CodecSpecificInfo* codec_specific_info,
-                            const std::vector<VideoFrameType>* frame_types) {
+                            const std::vector<FrameType>* frame_types) {
   assert(config_.maxFramerate > 0);
   int64_t time_since_last_encode_ms = 1000 / config_.maxFramerate;
   int64_t time_now_ms = clock_->TimeInMilliseconds();
@@ -189,7 +189,7 @@
 
 int32_t DelayedEncoder::Encode(const VideoFrame& input_image,
                                const CodecSpecificInfo* codec_specific_info,
-                               const std::vector<VideoFrameType>* frame_types) {
+                               const std::vector<FrameType>* frame_types) {
   SleepMs(delay_ms_);
   return FakeEncoder::Encode(input_image, codec_specific_info, frame_types);
 }
diff --git a/webrtc/test/fake_encoder.h b/webrtc/test/fake_encoder.h
index 3dce336..dc3a1ae 100644
--- a/webrtc/test/fake_encoder.h
+++ b/webrtc/test/fake_encoder.h
@@ -33,7 +33,7 @@
                      size_t max_payload_size) override;
   int32_t Encode(const VideoFrame& input_image,
                  const CodecSpecificInfo* codec_specific_info,
-                 const std::vector<VideoFrameType>* frame_types) override;
+                 const std::vector<FrameType>* frame_types) override;
   int32_t RegisterEncodeCompleteCallback(
       EncodedImageCallback* callback) override;
   int32_t Release() override;
@@ -74,7 +74,7 @@
 
   int32_t Encode(const VideoFrame& input_image,
                  const CodecSpecificInfo* codec_specific_info,
-                 const std::vector<VideoFrameType>* frame_types) override;
+                 const std::vector<FrameType>* frame_types) override;
 
  private:
   const int delay_ms_;
diff --git a/webrtc/video/encoded_frame_callback_adapter.cc b/webrtc/video/encoded_frame_callback_adapter.cc
index 6726a37..407801f 100644
--- a/webrtc/video/encoded_frame_callback_adapter.cc
+++ b/webrtc/video/encoded_frame_callback_adapter.cc
@@ -27,11 +27,8 @@
     const CodecSpecificInfo* codecSpecificInfo,
     const RTPFragmentationHeader* fragmentation) {
   RTC_DCHECK(observer_ != nullptr);
-  FrameType frame_type =
-        VCMEncodedFrame::ConvertFrameType(encodedImage._frameType);
-  const EncodedFrame frame(encodedImage._buffer,
-                           encodedImage._length,
-                           frame_type);
+  const EncodedFrame frame(encodedImage._buffer, encodedImage._length,
+                           encodedImage._frameType);
   observer_->EncodedFrameCallback(frame);
   return 0;
 }
diff --git a/webrtc/video/end_to_end_tests.cc b/webrtc/video/end_to_end_tests.cc
index 01f132f..d889290 100644
--- a/webrtc/video/end_to_end_tests.cc
+++ b/webrtc/video/end_to_end_tests.cc
@@ -1589,7 +1589,7 @@
    public:
     EncodedFrameTestObserver()
         : length_(0),
-          frame_type_(kFrameEmpty),
+          frame_type_(kEmptyFrame),
           called_(EventWrapper::Create()) {}
     virtual ~EncodedFrameTestObserver() {}
 
@@ -2957,7 +2957,7 @@
 
     int32_t Encode(const VideoFrame& input_image,
                    const CodecSpecificInfo* codec_specific_info,
-                   const std::vector<VideoFrameType>* frame_types) override {
+                   const std::vector<FrameType>* frame_types) override {
       {
         rtc::CritScope lock(&test_crit_);
         if (sender_state_ == kNetworkDown) {
@@ -3080,7 +3080,7 @@
      UnusedEncoder() : FakeEncoder(Clock::GetRealTimeClock()) {}
      int32_t Encode(const VideoFrame& input_image,
                     const CodecSpecificInfo* codec_specific_info,
-                    const std::vector<VideoFrameType>* frame_types) override {
+                    const std::vector<FrameType>* frame_types) override {
       ADD_FAILURE() << "Unexpected frame encode.";
       return test::FakeEncoder::Encode(
           input_image, codec_specific_info, frame_types);
diff --git a/webrtc/video/video_encoder.cc b/webrtc/video/video_encoder.cc
index 305406b..3e6ce63 100644
--- a/webrtc/video/video_encoder.cc
+++ b/webrtc/video/video_encoder.cc
@@ -99,7 +99,7 @@
 int32_t VideoEncoderSoftwareFallbackWrapper::Encode(
     const VideoFrame& frame,
     const CodecSpecificInfo* codec_specific_info,
-    const std::vector<VideoFrameType>* frame_types) {
+    const std::vector<FrameType>* frame_types) {
   if (fallback_encoder_)
     return fallback_encoder_->Encode(frame, codec_specific_info, frame_types);
   return encoder_->Encode(frame, codec_specific_info, frame_types);
diff --git a/webrtc/video/video_encoder_unittest.cc b/webrtc/video/video_encoder_unittest.cc
index be0170c..5f2c37a 100644
--- a/webrtc/video/video_encoder_unittest.cc
+++ b/webrtc/video/video_encoder_unittest.cc
@@ -32,7 +32,7 @@
     }
     int32_t Encode(const VideoFrame& frame,
                    const CodecSpecificInfo* codec_specific_info,
-                   const std::vector<VideoFrameType>* frame_types) override {
+                   const std::vector<FrameType>* frame_types) override {
       ++encode_count_;
       return WEBRTC_VIDEO_CODEC_OK;
     }
@@ -120,7 +120,7 @@
   memset(frame_.buffer(webrtc::kVPlane), 128,
          frame_.allocated_size(webrtc::kVPlane));
 
-  std::vector<VideoFrameType> types(1, kKeyFrame);
+  std::vector<FrameType> types(1, kKeyFrame);
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             fallback_wrapper_.Encode(frame_, nullptr, &types));
   EXPECT_EQ(0, fake_encoder_.encode_count_);
@@ -163,7 +163,7 @@
   EXPECT_EQ(&callback2, fake_encoder_.encode_complete_callback_);
 
   // Encoding a frame using the fallback should arrive at the new callback.
-  std::vector<VideoFrameType> types(1, kKeyFrame);
+  std::vector<FrameType> types(1, kKeyFrame);
   frame_.set_timestamp(frame_.timestamp() + 1000);
   EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
             fallback_wrapper_.Encode(frame_, nullptr, &types));
diff --git a/webrtc/video/video_send_stream_tests.cc b/webrtc/video/video_send_stream_tests.cc
index e16b670..e30c903 100644
--- a/webrtc/video/video_send_stream_tests.cc
+++ b/webrtc/video/video_send_stream_tests.cc
@@ -1252,7 +1252,7 @@
 
     int32_t Encode(const VideoFrame& inputImage,
                    const CodecSpecificInfo* codecSpecificInfo,
-                   const std::vector<VideoFrameType>* frame_types) override {
+                   const std::vector<FrameType>* frame_types) override {
       EXPECT_TRUE(IsReadyForEncode());
 
       observation_complete_->Set();
@@ -1451,7 +1451,7 @@
 
   int32_t Encode(const VideoFrame& input_image,
                  const CodecSpecificInfo* codec_specific_info,
-                 const std::vector<VideoFrameType>* frame_types) override {
+                 const std::vector<FrameType>* frame_types) override {
     // Silently skip the encode, FakeEncoder::Encode doesn't produce VP8.
     return 0;
   }
@@ -1742,7 +1742,7 @@
    private:
     int32_t Encode(const VideoFrame& input_image,
                    const CodecSpecificInfo* codecSpecificInfo,
-                   const std::vector<VideoFrameType>* frame_types) override {
+                   const std::vector<FrameType>* frame_types) override {
       CodecSpecificInfo specifics;
       memset(&specifics, 0, sizeof(specifics));
       specifics.codecType = kVideoCodecGeneric;
diff --git a/webrtc/video_encoder.h b/webrtc/video_encoder.h
index a512c98..0858a71 100644
--- a/webrtc/video_encoder.h
+++ b/webrtc/video_encoder.h
@@ -98,7 +98,7 @@
   //                                  WEBRTC_VIDEO_CODEC_TIMEOUT
   virtual int32_t Encode(const VideoFrame& frame,
                          const CodecSpecificInfo* codec_specific_info,
-                         const std::vector<VideoFrameType>* frame_types) = 0;
+                         const std::vector<FrameType>* frame_types) = 0;
 
   // Inform the encoder of the new packet loss rate and the round-trip time of
   // the network.
@@ -147,7 +147,7 @@
   int32_t Release() override;
   int32_t Encode(const VideoFrame& frame,
                  const CodecSpecificInfo* codec_specific_info,
-                 const std::vector<VideoFrameType>* frame_types) override;
+                 const std::vector<FrameType>* frame_types) override;
   int32_t SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
 
   int32_t SetRates(uint32_t bitrate, uint32_t framerate) override;
diff --git a/webrtc/video_engine/vie_channel.h b/webrtc/video_engine/vie_channel.h
index 488923d..cd0646b 100644
--- a/webrtc/video_engine/vie_channel.h
+++ b/webrtc/video_engine/vie_channel.h
@@ -251,10 +251,10 @@
                                int min_playout_delay_ms,
                                int render_delay_ms);
 
-  // Implements VideoFrameTypeCallback.
+  // Implements FrameTypeCallback.
   virtual int32_t RequestKeyFrame();
 
-  // Implements VideoFrameTypeCallback.
+  // Implements FrameTypeCallback.
   virtual int32_t SliceLossIndicationRequest(
       const uint64_t picture_id);
 
diff --git a/webrtc/video_engine/vie_encoder.cc b/webrtc/video_engine/vie_encoder.cc
index 45e9612..4d41e0e 100644
--- a/webrtc/video_engine/vie_encoder.cc
+++ b/webrtc/video_engine/vie_encoder.cc
@@ -558,10 +558,11 @@
     stats_proxy_->OnSendEncodedImage(encoded_image, rtp_video_hdr);
 
   return send_payload_router_->RoutePayload(
-      VCMEncodedFrame::ConvertFrameType(encoded_image._frameType), payload_type,
-      encoded_image._timeStamp, encoded_image.capture_time_ms_,
-      encoded_image._buffer, encoded_image._length, &fragmentation_header,
-      rtp_video_hdr) ? 0 : -1;
+             encoded_image._frameType, payload_type, encoded_image._timeStamp,
+             encoded_image.capture_time_ms_, encoded_image._buffer,
+             encoded_image._length, &fragmentation_header, rtp_video_hdr)
+             ? 0
+             : -1;
 }
 
 int32_t ViEEncoder::SendStatistics(const uint32_t bit_rate,
diff --git a/webrtc/video_frame.h b/webrtc/video_frame.h
index 3c0ad0c..1968a69 100644
--- a/webrtc/video_frame.h
+++ b/webrtc/video_frame.h
@@ -12,6 +12,7 @@
 #define WEBRTC_VIDEO_FRAME_H_
 
 #include "webrtc/base/scoped_ref_ptr.h"
+#include "webrtc/common_types.h"
 #include "webrtc/common_video/interface/video_frame_buffer.h"
 #include "webrtc/common_video/rotation.h"
 #include "webrtc/typedefs.h"
@@ -166,11 +167,6 @@
   VideoRotation rotation_;
 };
 
-enum VideoFrameType {
-  kKeyFrame = 0,
-  kDeltaFrame = 1,
-};
-
 // TODO(pbos): Rename EncodedFrame and reformat this class' members.
 class EncodedImage {
  public:
@@ -192,8 +188,7 @@
   // NTP time of the capture time in local timebase in milliseconds.
   int64_t ntp_time_ms_ = 0;
   int64_t capture_time_ms_ = 0;
-  // TODO(pbos): Use webrtc::FrameType directly (and remove VideoFrameType).
-  VideoFrameType _frameType = kDeltaFrame;
+  FrameType _frameType = kDeltaFrame;
   uint8_t* _buffer;
   size_t _length;
   size_t _size;