Fix mb.py presubmit issues.

* Add a config file for python formatting (.style.yapf).
* Change the default indentation from 4 spaces to 2 spaces.
* Run 'git cl format --python' on a few python files.

Bug: webrtc:13413
Change-Id: Ia71135131276c2c499b00032d57ad16ee5200a5c
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/238982
Reviewed-by: Mirko Bonadei <mbonadei@webrtc.org>
Reviewed-by: Christoffer Jansson <jansson@google.com>
Reviewed-by: Henrik Andreassson <henrika@webrtc.org>
Commit-Queue: Jeremy Leconte <jleconte@google.com>
Cr-Commit-Position: refs/heads/main@{#35500}
diff --git a/.style.yapf b/.style.yapf
new file mode 100644
index 0000000..c34341d
--- /dev/null
+++ b/.style.yapf
@@ -0,0 +1,4 @@
+[style]
+based_on_style = pep8
+indent_width = 2
+column_limit = 80
\ No newline at end of file
diff --git a/OWNERS b/OWNERS
index ac801fe..4702befd9 100644
--- a/OWNERS
+++ b/OWNERS
@@ -17,3 +17,4 @@
 per-file WATCHLISTS=*
 per-file native-api.md=mbonadei@webrtc.org
 per-file ....lua=titovartem@webrtc.org
+per-file .style.yapf=jleconte@webrtc.org
diff --git a/audio/test/low_bandwidth_audio_test.py b/audio/test/low_bandwidth_audio_test.py
index 9aaf30f..a49a6dd 100755
--- a/audio/test/low_bandwidth_audio_test.py
+++ b/audio/test/low_bandwidth_audio_test.py
@@ -31,108 +31,103 @@
     'To fix this run:\n'
     '  python %s %s\n'
     '\n'
-    'Note that these tools are Google-internal due to licensing, so in order to '
-    'use them you will have to get your own license and manually put them in the '
-    'right location.\n'
+    'Note that these tools are Google-internal due to licensing, so in order '
+    'to use them you will have to get your own license and manually put them '
+    'in the right location.\n'
     'See https://cs.chromium.org/chromium/src/third_party/webrtc/tools_webrtc/'
     'download_tools.py?rcl=bbceb76f540159e2dba0701ac03c514f01624130&l=13')
 
 
 def _LogCommand(command):
-    logging.info('Running %r', command)
-    return command
+  logging.info('Running %r', command)
+  return command
 
 
 def _ParseArgs():
-    parser = argparse.ArgumentParser(
-        description='Run low-bandwidth audio tests.')
-    parser.add_argument('build_dir',
-                        help='Path to the build directory (e.g. out/Release).')
-    parser.add_argument('--remove',
-                        action='store_true',
-                        help='Remove output audio files after testing.')
-    parser.add_argument(
-        '--android',
-        action='store_true',
-        help='Perform the test on a connected Android device instead.')
-    parser.add_argument('--adb-path',
-                        help='Path to adb binary.',
-                        default='adb')
-    parser.add_argument('--num-retries',
-                        default='0',
-                        help='Number of times to retry the test on Android.')
-    parser.add_argument(
-        '--isolated-script-test-perf-output',
-        default=None,
-        help='Path to store perf results in histogram proto format.')
-    parser.add_argument('--extra-test-args',
-                        default=[],
-                        action='append',
-                        help='Extra args to path to the test binary.')
+  parser = argparse.ArgumentParser(description='Run low-bandwidth audio tests.')
+  parser.add_argument('build_dir',
+                      help='Path to the build directory (e.g. out/Release).')
+  parser.add_argument('--remove',
+                      action='store_true',
+                      help='Remove output audio files after testing.')
+  parser.add_argument(
+      '--android',
+      action='store_true',
+      help='Perform the test on a connected Android device instead.')
+  parser.add_argument('--adb-path', help='Path to adb binary.', default='adb')
+  parser.add_argument('--num-retries',
+                      default='0',
+                      help='Number of times to retry the test on Android.')
+  parser.add_argument(
+      '--isolated-script-test-perf-output',
+      default=None,
+      help='Path to store perf results in histogram proto format.')
+  parser.add_argument('--extra-test-args',
+                      default=[],
+                      action='append',
+                      help='Extra args to path to the test binary.')
 
-    # Ignore Chromium-specific flags
-    parser.add_argument('--test-launcher-summary-output',
-                        type=str,
-                        default=None)
-    args = parser.parse_args()
+  # Ignore Chromium-specific flags
+  parser.add_argument('--test-launcher-summary-output', type=str, default=None)
+  args = parser.parse_args()
 
-    return args
+  return args
 
 
 def _GetPlatform():
-    if sys.platform == 'win32':
-        return 'win'
-    elif sys.platform == 'darwin':
-        return 'mac'
-    elif sys.platform.startswith('linux'):
-        return 'linux'
+  if sys.platform == 'win32':
+    return 'win'
+  elif sys.platform == 'darwin':
+    return 'mac'
+  elif sys.platform.startswith('linux'):
+    return 'linux'
+  raise AssertionError('Unknown platform %s' % sys.platform)
 
 
 def _GetExtension():
-    return '.exe' if sys.platform == 'win32' else ''
+  return '.exe' if sys.platform == 'win32' else ''
 
 
 def _GetPathToTools():
-    tools_dir = os.path.join(SRC_DIR, 'tools_webrtc')
-    toolchain_dir = os.path.join(tools_dir, 'audio_quality')
+  tools_dir = os.path.join(SRC_DIR, 'tools_webrtc')
+  toolchain_dir = os.path.join(tools_dir, 'audio_quality')
 
-    platform = _GetPlatform()
-    ext = _GetExtension()
+  platform = _GetPlatform()
+  ext = _GetExtension()
 
-    pesq_path = os.path.join(toolchain_dir, platform, 'pesq' + ext)
-    if not os.path.isfile(pesq_path):
-        pesq_path = None
+  pesq_path = os.path.join(toolchain_dir, platform, 'pesq' + ext)
+  if not os.path.isfile(pesq_path):
+    pesq_path = None
 
-    polqa_path = os.path.join(toolchain_dir, platform, 'PolqaOem64' + ext)
-    if not os.path.isfile(polqa_path):
-        polqa_path = None
+  polqa_path = os.path.join(toolchain_dir, platform, 'PolqaOem64' + ext)
+  if not os.path.isfile(polqa_path):
+    polqa_path = None
 
-    if (platform != 'mac' and not polqa_path) or not pesq_path:
-        logging.error(NO_TOOLS_ERROR_MESSAGE, toolchain_dir,
-                      os.path.join(tools_dir, 'download_tools.py'),
-                      toolchain_dir)
+  if (platform != 'mac' and not polqa_path) or not pesq_path:
+    logging.error(NO_TOOLS_ERROR_MESSAGE, toolchain_dir,
+                  os.path.join(tools_dir, 'download_tools.py'), toolchain_dir)
 
-    return pesq_path, polqa_path
+  return pesq_path, polqa_path
 
 
 def ExtractTestRuns(lines, echo=False):
-    """Extracts information about tests from the output of a test runner.
+  """Extracts information about tests from the output of a test runner.
 
   Produces tuples
   (android_device, test_name, reference_file, degraded_file, cur_perf_results).
   """
-    for line in lines:
-        if echo:
-            sys.stdout.write(line)
+  for line in lines:
+    if echo:
+      sys.stdout.write(line)
 
-        # Output from Android has a prefix with the device name.
-        android_prefix_re = r'(?:I\b.+\brun_tests_on_device\((.+?)\)\s*)?'
-        test_re = r'^' + android_prefix_re + (r'TEST (\w+) ([^ ]+?) ([^\s]+)'
-                                              r' ?([^\s]+)?\s*$')
+    # Output from Android has a prefix with the device name.
+    android_prefix_re = r'(?:I\b.+\brun_tests_on_device\((.+?)\)\s*)?'
+    test_re = r'^' + android_prefix_re + (r'TEST (\w+) ([^ ]+?) ([^\s]+)'
+                                          r' ?([^\s]+)?\s*$')
 
-        match = re.search(test_re, line)
-        if match:
-            yield match.groups()
+    match = re.search(test_re, line)
+    if match:
+      yield match.groups()
 
 
 def _GetFile(file_path,
@@ -140,97 +135,95 @@
              move=False,
              android=False,
              adb_prefix=('adb', )):
-    out_file_name = os.path.basename(file_path)
-    out_file_path = os.path.join(out_dir, out_file_name)
+  out_file_name = os.path.basename(file_path)
+  out_file_path = os.path.join(out_dir, out_file_name)
 
-    if android:
-        # Pull the file from the connected Android device.
-        adb_command = adb_prefix + ('pull', file_path, out_dir)
-        subprocess.check_call(_LogCommand(adb_command))
-        if move:
-            # Remove that file.
-            adb_command = adb_prefix + ('shell', 'rm', file_path)
-            subprocess.check_call(_LogCommand(adb_command))
-    elif os.path.abspath(file_path) != os.path.abspath(out_file_path):
-        if move:
-            shutil.move(file_path, out_file_path)
-        else:
-            shutil.copy(file_path, out_file_path)
+  if android:
+    # Pull the file from the connected Android device.
+    adb_command = adb_prefix + ('pull', file_path, out_dir)
+    subprocess.check_call(_LogCommand(adb_command))
+    if move:
+      # Remove that file.
+      adb_command = adb_prefix + ('shell', 'rm', file_path)
+      subprocess.check_call(_LogCommand(adb_command))
+  elif os.path.abspath(file_path) != os.path.abspath(out_file_path):
+    if move:
+      shutil.move(file_path, out_file_path)
+    else:
+      shutil.copy(file_path, out_file_path)
 
-    return out_file_path
+  return out_file_path
 
 
 def _RunPesq(executable_path,
              reference_file,
              degraded_file,
              sample_rate_hz=16000):
-    directory = os.path.dirname(reference_file)
-    assert os.path.dirname(degraded_file) == directory
+  directory = os.path.dirname(reference_file)
+  assert os.path.dirname(degraded_file) == directory
 
-    # Analyze audio.
-    command = [
-        executable_path,
-        '+%d' % sample_rate_hz,
-        os.path.basename(reference_file),
-        os.path.basename(degraded_file)
-    ]
-    # Need to provide paths in the current directory due to a bug in PESQ:
-    # On Mac, for some 'path/to/file.wav', if 'file.wav' is longer than
-    # 'path/to', PESQ crashes.
-    out = subprocess.check_output(_LogCommand(command),
-                                  cwd=directory,
-                                  stderr=subprocess.STDOUT)
+  # Analyze audio.
+  command = [
+      executable_path,
+      '+%d' % sample_rate_hz,
+      os.path.basename(reference_file),
+      os.path.basename(degraded_file)
+  ]
+  # Need to provide paths in the current directory due to a bug in PESQ:
+  # On Mac, for some 'path/to/file.wav', if 'file.wav' is longer than
+  # 'path/to', PESQ crashes.
+  out = subprocess.check_output(_LogCommand(command),
+                                cwd=directory,
+                                stderr=subprocess.STDOUT)
 
-    # Find the scores in stdout of PESQ.
-    match = re.search(
-        r'Prediction \(Raw MOS, MOS-LQO\):\s+=\s+([\d.]+)\s+([\d.]+)', out)
-    if match:
-        raw_mos, _ = match.groups()
-
-        return {'pesq_mos': (raw_mos, 'unitless')}
-    else:
-        logging.error('PESQ: %s', out.splitlines()[-1])
-        return {}
+  # Find the scores in stdout of PESQ.
+  match = re.search(
+      r'Prediction \(Raw MOS, MOS-LQO\):\s+=\s+([\d.]+)\s+([\d.]+)', out)
+  if match:
+    raw_mos, _ = match.groups()
+    return {'pesq_mos': (raw_mos, 'unitless')}
+  logging.error('PESQ: %s', out.splitlines()[-1])
+  return {}
 
 
 def _RunPolqa(executable_path, reference_file, degraded_file):
-    # Analyze audio.
-    command = [
-        executable_path, '-q', '-LC', 'NB', '-Ref', reference_file, '-Test',
-        degraded_file
-    ]
-    process = subprocess.Popen(_LogCommand(command),
-                               stdout=subprocess.PIPE,
-                               stderr=subprocess.PIPE)
-    out, err = process.communicate()
+  # Analyze audio.
+  command = [
+      executable_path, '-q', '-LC', 'NB', '-Ref', reference_file, '-Test',
+      degraded_file
+  ]
+  process = subprocess.Popen(_LogCommand(command),
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE)
+  out, err = process.communicate()
 
-    # Find the scores in stdout of POLQA.
-    match = re.search(r'\bMOS-LQO:\s+([\d.]+)', out)
+  # Find the scores in stdout of POLQA.
+  match = re.search(r'\bMOS-LQO:\s+([\d.]+)', out)
 
-    if process.returncode != 0 or not match:
-        if process.returncode == 2:
-            logging.warning('%s (2)', err.strip())
-            logging.warning('POLQA license error, skipping test.')
-        else:
-            logging.error('%s (%d)', err.strip(), process.returncode)
-        return {}
+  if process.returncode != 0 or not match:
+    if process.returncode == 2:
+      logging.warning('%s (2)', err.strip())
+      logging.warning('POLQA license error, skipping test.')
+    else:
+      logging.error('%s (%d)', err.strip(), process.returncode)
+    return {}
 
-    mos_lqo, = match.groups()
-    return {'polqa_mos_lqo': (mos_lqo, 'unitless')}
+  mos_lqo, = match.groups()
+  return {'polqa_mos_lqo': (mos_lqo, 'unitless')}
 
 
 def _MergeInPerfResultsFromCcTests(histograms, run_perf_results_file):
-    from tracing.value import histogram_set
+  from tracing.value import histogram_set
 
-    cc_histograms = histogram_set.HistogramSet()
-    with open(run_perf_results_file, 'rb') as f:
-        contents = f.read()
-        if not contents:
-            return
+  cc_histograms = histogram_set.HistogramSet()
+  with open(run_perf_results_file, 'rb') as f:
+    contents = f.read()
+    if not contents:
+      return
 
-        cc_histograms.ImportProto(contents)
+    cc_histograms.ImportProto(contents)
 
-    histograms.Merge(cc_histograms)
+  histograms.Merge(cc_histograms)
 
 
 Analyzer = collections.namedtuple(
@@ -238,136 +231,131 @@
 
 
 def _ConfigurePythonPath(args):
-    script_dir = os.path.dirname(os.path.realpath(__file__))
-    checkout_root = os.path.abspath(
-        os.path.join(script_dir, os.pardir, os.pardir))
+  script_dir = os.path.dirname(os.path.realpath(__file__))
+  checkout_root = os.path.abspath(os.path.join(script_dir, os.pardir,
+                                               os.pardir))
 
-    # TODO(https://crbug.com/1029452): Use a copy rule and add these from the out
-    # dir like for the third_party/protobuf code.
-    sys.path.insert(
-        0, os.path.join(checkout_root, 'third_party', 'catapult', 'tracing'))
+  # TODO(https://crbug.com/1029452): Use a copy rule and add these from the
+  # out dir like for the third_party/protobuf code.
+  sys.path.insert(
+      0, os.path.join(checkout_root, 'third_party', 'catapult', 'tracing'))
 
-    # The low_bandwidth_audio_perf_test gn rule will build the protobuf stub for
-    # python, so put it in the path for this script before we attempt to import
-    # it.
-    histogram_proto_path = os.path.join(os.path.abspath(args.build_dir),
-                                        'pyproto', 'tracing', 'tracing',
-                                        'proto')
-    sys.path.insert(0, histogram_proto_path)
-    proto_stub_path = os.path.join(os.path.abspath(args.build_dir), 'pyproto')
-    sys.path.insert(0, proto_stub_path)
+  # The low_bandwidth_audio_perf_test gn rule will build the protobuf stub
+  # for python, so put it in the path for this script before we attempt to
+  # import it.
+  histogram_proto_path = os.path.join(os.path.abspath(args.build_dir),
+                                      'pyproto', 'tracing', 'tracing', 'proto')
+  sys.path.insert(0, histogram_proto_path)
+  proto_stub_path = os.path.join(os.path.abspath(args.build_dir), 'pyproto')
+  sys.path.insert(0, proto_stub_path)
 
-    # Fail early in case the proto hasn't been built.
-    try:
-        import histogram_pb2
-    except ImportError as e:
-        logging.exception(e)
-        raise ImportError(
-            'Could not import histogram_pb2. You need to build the '
-            'low_bandwidth_audio_perf_test target before invoking '
-            'this script. Expected to find '
-            'histogram_pb2.py in %s.' % histogram_proto_path)
+  # Fail early in case the proto hasn't been built.
+  try:
+    #pylint: disable=unused-variable
+    import histogram_pb2
+  except ImportError as e:
+    logging.exception(e)
+    raise ImportError('Could not import histogram_pb2. You need to build the '
+                      'low_bandwidth_audio_perf_test target before invoking '
+                      'this script. Expected to find '
+                      'histogram_pb2.py in %s.' % histogram_proto_path)
 
 
 def main():
-    # pylint: disable=W0101
-    logging.basicConfig(level=logging.INFO)
-    logging.info('Invoked with %s', str(sys.argv))
+  # pylint: disable=W0101
+  logging.basicConfig(level=logging.INFO)
+  logging.info('Invoked with %s', str(sys.argv))
 
-    args = _ParseArgs()
+  args = _ParseArgs()
 
-    _ConfigurePythonPath(args)
+  _ConfigurePythonPath(args)
 
-    # Import catapult modules here after configuring the pythonpath.
-    from tracing.value import histogram_set
-    from tracing.value.diagnostics import reserved_infos
-    from tracing.value.diagnostics import generic_set
+  # Import catapult modules here after configuring the pythonpath.
+  from tracing.value import histogram_set
+  from tracing.value.diagnostics import reserved_infos
+  from tracing.value.diagnostics import generic_set
 
-    pesq_path, polqa_path = _GetPathToTools()
-    if pesq_path is None:
-        return 1
+  pesq_path, polqa_path = _GetPathToTools()
+  if pesq_path is None:
+    return 1
 
-    out_dir = os.path.join(args.build_dir, '..')
-    if args.android:
-        test_command = [
-            os.path.join(args.build_dir, 'bin',
-                         'run_low_bandwidth_audio_test'), '-v',
-            '--num-retries', args.num_retries
-        ]
-    else:
-        test_command = [
-            os.path.join(args.build_dir, 'low_bandwidth_audio_test')
-        ]
+  out_dir = os.path.join(args.build_dir, '..')
+  if args.android:
+    test_command = [
+        os.path.join(args.build_dir, 'bin', 'run_low_bandwidth_audio_test'),
+        '-v', '--num-retries', args.num_retries
+    ]
+  else:
+    test_command = [os.path.join(args.build_dir, 'low_bandwidth_audio_test')]
 
-    analyzers = [Analyzer('pesq', _RunPesq, pesq_path, 16000)]
-    # Check if POLQA can run at all, or skip the 48 kHz tests entirely.
-    example_path = os.path.join(SRC_DIR, 'resources', 'voice_engine',
-                                'audio_tiny48.wav')
-    if polqa_path and _RunPolqa(polqa_path, example_path, example_path):
-        analyzers.append(Analyzer('polqa', _RunPolqa, polqa_path, 48000))
+  analyzers = [Analyzer('pesq', _RunPesq, pesq_path, 16000)]
+  # Check if POLQA can run at all, or skip the 48 kHz tests entirely.
+  example_path = os.path.join(SRC_DIR, 'resources', 'voice_engine',
+                              'audio_tiny48.wav')
+  if polqa_path and _RunPolqa(polqa_path, example_path, example_path):
+    analyzers.append(Analyzer('polqa', _RunPolqa, polqa_path, 48000))
 
-    histograms = histogram_set.HistogramSet()
-    for analyzer in analyzers:
-        # Start the test executable that produces audio files.
-        test_process = subprocess.Popen(_LogCommand(test_command + [
-            '--sample_rate_hz=%d' % analyzer.sample_rate_hz,
-            '--test_case_prefix=%s' % analyzer.name,
-        ] + args.extra_test_args),
-                                        stdout=subprocess.PIPE,
-                                        stderr=subprocess.STDOUT)
-        perf_results_file = None
-        try:
-            lines = iter(test_process.stdout.readline, '')
-            for result in ExtractTestRuns(lines, echo=True):
-                (android_device, test_name, reference_file, degraded_file,
-                 perf_results_file) = result
+  histograms = histogram_set.HistogramSet()
+  for analyzer in analyzers:
+    # Start the test executable that produces audio files.
+    test_process = subprocess.Popen(_LogCommand(test_command + [
+        '--sample_rate_hz=%d' % analyzer.sample_rate_hz,
+        '--test_case_prefix=%s' % analyzer.name,
+    ] + args.extra_test_args),
+                                    stdout=subprocess.PIPE,
+                                    stderr=subprocess.STDOUT)
+    perf_results_file = None
+    try:
+      lines = iter(test_process.stdout.readline, '')
+      for result in ExtractTestRuns(lines, echo=True):
+        (android_device, test_name, reference_file, degraded_file,
+         perf_results_file) = result
 
-                adb_prefix = (args.adb_path, )
-                if android_device:
-                    adb_prefix += ('-s', android_device)
+        adb_prefix = (args.adb_path, )
+        if android_device:
+          adb_prefix += ('-s', android_device)
 
-                reference_file = _GetFile(reference_file,
-                                          out_dir,
-                                          android=args.android,
-                                          adb_prefix=adb_prefix)
-                degraded_file = _GetFile(degraded_file,
-                                         out_dir,
-                                         move=True,
-                                         android=args.android,
-                                         adb_prefix=adb_prefix)
+        reference_file = _GetFile(reference_file,
+                                  out_dir,
+                                  android=args.android,
+                                  adb_prefix=adb_prefix)
+        degraded_file = _GetFile(degraded_file,
+                                 out_dir,
+                                 move=True,
+                                 android=args.android,
+                                 adb_prefix=adb_prefix)
 
-                analyzer_results = analyzer.func(analyzer.executable,
-                                                 reference_file, degraded_file)
-                for metric, (value, units) in analyzer_results.items():
-                    hist = histograms.CreateHistogram(metric, units, [value])
-                    user_story = generic_set.GenericSet([test_name])
-                    hist.diagnostics[reserved_infos.STORIES.name] = user_story
+        analyzer_results = analyzer.func(analyzer.executable, reference_file,
+                                         degraded_file)
+        for metric, (value, units) in analyzer_results.items():
+          hist = histograms.CreateHistogram(metric, units, [value])
+          user_story = generic_set.GenericSet([test_name])
+          hist.diagnostics[reserved_infos.STORIES.name] = user_story
 
-                    # Output human readable results.
-                    print 'RESULT %s: %s= %s %s' % (metric, test_name, value,
-                                                    units)
+          # Output human readable results.
+          print 'RESULT %s: %s= %s %s' % (metric, test_name, value, units)
 
-                if args.remove:
-                    os.remove(reference_file)
-                    os.remove(degraded_file)
-        finally:
-            test_process.terminate()
-        if perf_results_file:
-            perf_results_file = _GetFile(perf_results_file,
-                                         out_dir,
-                                         move=True,
-                                         android=args.android,
-                                         adb_prefix=adb_prefix)
-            _MergeInPerfResultsFromCcTests(histograms, perf_results_file)
-            if args.remove:
-                os.remove(perf_results_file)
+        if args.remove:
+          os.remove(reference_file)
+          os.remove(degraded_file)
+    finally:
+      test_process.terminate()
+    if perf_results_file:
+      perf_results_file = _GetFile(perf_results_file,
+                                   out_dir,
+                                   move=True,
+                                   android=args.android,
+                                   adb_prefix=adb_prefix)
+      _MergeInPerfResultsFromCcTests(histograms, perf_results_file)
+      if args.remove:
+        os.remove(perf_results_file)
 
-    if args.isolated_script_test_perf_output:
-        with open(args.isolated_script_test_perf_output, 'wb') as f:
-            f.write(histograms.AsProto().SerializeToString())
+  if args.isolated_script_test_perf_output:
+    with open(args.isolated_script_test_perf_output, 'wb') as f:
+      f.write(histograms.AsProto().SerializeToString())
 
-    return test_process.wait()
+  return test_process.wait()
 
 
 if __name__ == '__main__':
-    sys.exit(main())
+  sys.exit(main())
diff --git a/pylintrc b/pylintrc
index f26c84a..e353d9e 100644
--- a/pylintrc
+++ b/pylintrc
@@ -97,6 +97,9 @@
 # Maximum number of lines in a module
 max-module-lines=1000
 
+# We use two spaces for indents, instead of the usual four spaces or tab.
+indent-string='  '
+
 
 [BASIC]
 
@@ -192,10 +195,6 @@
 
 [CLASSES]
 
-# List of interface methods to ignore, separated by a comma. This is used for
-# instance to not check methods defines in Zope's Interface base class.
-ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
-
 # List of method names used to declare (i.e. assign) instance attributes.
 defining-attr-methods=__init__,__new__,setUp
 
diff --git a/tools_webrtc/mb/mb.py b/tools_webrtc/mb/mb.py
index 09550fa..b936da0 100755
--- a/tools_webrtc/mb/mb.py
+++ b/tools_webrtc/mb/mb.py
@@ -29,11 +29,9 @@
 import tempfile
 import traceback
 try:
-  from urllib2 import urlopen # for Python2
+  from urllib2 import urlopen  # for Python2
 except ImportError:
-  from urllib.request import urlopen # for Python3
-
-from collections import OrderedDict
+  from urllib.request import urlopen  # for Python3
 
 SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
 SRC_DIR = os.path.dirname(os.path.dirname(SCRIPT_DIR))
@@ -43,1251 +41,1269 @@
 
 
 def main(args):
-    mbw = MetaBuildWrapper()
-    return mbw.Main(args)
+  mbw = MetaBuildWrapper()
+  return mbw.Main(args)
 
 
 class MetaBuildWrapper(object):
-    def __init__(self):
-        self.src_dir = SRC_DIR
-        self.default_config = os.path.join(SCRIPT_DIR, 'mb_config.pyl')
-        self.default_isolate_map = os.path.join(
-            SCRIPT_DIR, 'gn_isolate_map.pyl')
-        self.executable = sys.executable
-        self.platform = sys.platform
-        self.sep = os.sep
-        self.args = argparse.Namespace()
-        self.configs = {}
-        self.builder_groups = {}
-        self.mixins = {}
-        self.isolate_exe = 'isolate.exe' if self.platform.startswith(
-            'win') else 'isolate'
+  def __init__(self):
+    self.src_dir = SRC_DIR
+    self.default_config = os.path.join(SCRIPT_DIR, 'mb_config.pyl')
+    self.default_isolate_map = os.path.join(SCRIPT_DIR, 'gn_isolate_map.pyl')
+    self.executable = sys.executable
+    self.platform = sys.platform
+    self.sep = os.sep
+    self.args = argparse.Namespace()
+    self.configs = {}
+    self.builder_groups = {}
+    self.mixins = {}
+    self.isolate_exe = 'isolate.exe' if self.platform.startswith(
+        'win') else 'isolate'
 
-    def Main(self, args):
-        self.ParseArgs(args)
-        try:
-            ret = self.args.func()
-            if ret:
-                self.DumpInputFiles()
-            return ret
-        except KeyboardInterrupt:
-            self.Print('interrupted, exiting')
-            return 130
-        except Exception:
-            self.DumpInputFiles()
-            s = traceback.format_exc()
-            for l in s.splitlines():
-                self.Print(l)
-            return 1
+  def Main(self, args):
+    self.ParseArgs(args)
+    try:
+      ret = self.args.func()
+      if ret:
+        self.DumpInputFiles()
+      return ret
+    except KeyboardInterrupt:
+      self.Print('interrupted, exiting')
+      return 130
+    except Exception:
+      self.DumpInputFiles()
+      s = traceback.format_exc()
+      for l in s.splitlines():
+        self.Print(l)
+      return 1
 
-    def ParseArgs(self, argv):
-        def AddCommonOptions(subp):
-            subp.add_argument('-b', '--builder',
-                              help='builder name to look up config from')
-            subp.add_argument('-m', '--builder-group',
-                              help='builder group name to look up config from')
-            subp.add_argument('-c', '--config',
-                              help='configuration to analyze')
-            subp.add_argument('--phase',
-                              help='optional phase name (used when builders '
-                                   'do multiple compiles with different '
-                                   'arguments in a single build)')
-            subp.add_argument('-f', '--config-file', metavar='PATH',
-                              default=self.default_config,
-                              help='path to config file '
-                                   '(default is %(default)s)')
-            subp.add_argument('-i', '--isolate-map-file', metavar='PATH',
-                              default=self.default_isolate_map,
-                              help='path to isolate map file '
-                                   '(default is %(default)s)')
-            subp.add_argument('-r', '--realm', default='webrtc:try',
-                              help='optional LUCI realm to use (for example '
-                                   'when triggering tasks on Swarming)')
-            subp.add_argument('-g', '--goma-dir',
-                              help='path to goma directory')
-            subp.add_argument('--android-version-code',
-                              help='Sets GN arg android_default_version_code')
-            subp.add_argument('--android-version-name',
-                              help='Sets GN arg android_default_version_name')
-            subp.add_argument('-n', '--dryrun', action='store_true',
-                              help='Do a dry run (i.e., do nothing, just '
-                                   'print the commands that will run)')
-            subp.add_argument('-v', '--verbose', action='store_true',
-                              help='verbose logging')
+  def ParseArgs(self, argv):
+    def AddCommonOptions(subp):
+      subp.add_argument('-b',
+                        '--builder',
+                        help='builder name to look up config from')
+      subp.add_argument('-m',
+                        '--builder-group',
+                        help='builder group name to look up config from')
+      subp.add_argument('-c', '--config', help='configuration to analyze')
+      subp.add_argument('--phase',
+                        help='optional phase name (used when builders '
+                        'do multiple compiles with different '
+                        'arguments in a single build)')
+      subp.add_argument('-f',
+                        '--config-file',
+                        metavar='PATH',
+                        default=self.default_config,
+                        help='path to config file '
+                        '(default is %(default)s)')
+      subp.add_argument('-i',
+                        '--isolate-map-file',
+                        metavar='PATH',
+                        default=self.default_isolate_map,
+                        help='path to isolate map file '
+                        '(default is %(default)s)')
+      subp.add_argument('-r',
+                        '--realm',
+                        default='webrtc:try',
+                        help='optional LUCI realm to use (for example '
+                        'when triggering tasks on Swarming)')
+      subp.add_argument('-g', '--goma-dir', help='path to goma directory')
+      subp.add_argument('--android-version-code',
+                        help='Sets GN arg android_default_version_code')
+      subp.add_argument('--android-version-name',
+                        help='Sets GN arg android_default_version_name')
+      subp.add_argument('-n',
+                        '--dryrun',
+                        action='store_true',
+                        help='Do a dry run (i.e., do nothing, just '
+                        'print the commands that will run)')
+      subp.add_argument('-v',
+                        '--verbose',
+                        action='store_true',
+                        help='verbose logging')
 
-        parser = argparse.ArgumentParser(prog='mb')
-        subps = parser.add_subparsers()
+    parser = argparse.ArgumentParser(prog='mb')
+    subps = parser.add_subparsers()
 
-        subp = subps.add_parser('analyze',
-                                help='analyze whether changes to a set of '
-                                     'files will cause a set of binaries '
-                                     'to be rebuilt.')
-        AddCommonOptions(subp)
-        subp.add_argument('path', nargs=1,
-                          help='path build was generated into.')
-        subp.add_argument('input_path', nargs=1,
-                          help='path to a file containing the input '
-                               'arguments as a JSON object.')
-        subp.add_argument('output_path', nargs=1,
-                          help='path to a file containing the output '
-                               'arguments as a JSON object.')
-        subp.add_argument('--json-output',
-                          help='Write errors to json.output')
-        subp.set_defaults(func=self.CmdAnalyze)
+    subp = subps.add_parser('analyze',
+                            help='analyze whether changes to a set of '
+                            'files will cause a set of binaries '
+                            'to be rebuilt.')
+    AddCommonOptions(subp)
+    subp.add_argument('path', nargs=1, help='path build was generated into.')
+    subp.add_argument('input_path',
+                      nargs=1,
+                      help='path to a file containing the input '
+                      'arguments as a JSON object.')
+    subp.add_argument('output_path',
+                      nargs=1,
+                      help='path to a file containing the output '
+                      'arguments as a JSON object.')
+    subp.add_argument('--json-output', help='Write errors to json.output')
+    subp.set_defaults(func=self.CmdAnalyze)
 
-        subp = subps.add_parser('export',
-                                help='print out the expanded configuration for'
-                                     'each builder as a JSON object')
-        subp.add_argument('-f', '--config-file', metavar='PATH',
-                          default=self.default_config,
-                          help='path to config file (default is %(default)s)')
-        subp.add_argument('-g', '--goma-dir',
-                          help='path to goma directory')
-        subp.set_defaults(func=self.CmdExport)
+    subp = subps.add_parser('export',
+                            help='print out the expanded configuration for'
+                            'each builder as a JSON object')
+    subp.add_argument('-f',
+                      '--config-file',
+                      metavar='PATH',
+                      default=self.default_config,
+                      help='path to config file (default is %(default)s)')
+    subp.add_argument('-g', '--goma-dir', help='path to goma directory')
+    subp.set_defaults(func=self.CmdExport)
 
-        subp = subps.add_parser('gen',
-                                help='generate a new set of build files')
-        AddCommonOptions(subp)
-        subp.add_argument('--swarming-targets-file',
-                          help='save runtime dependencies for targets listed '
-                               'in file.')
-        subp.add_argument('--json-output',
-                          help='Write errors to json.output')
-        subp.add_argument('path', nargs=1,
-                          help='path to generate build into')
-        subp.set_defaults(func=self.CmdGen)
+    subp = subps.add_parser('gen', help='generate a new set of build files')
+    AddCommonOptions(subp)
+    subp.add_argument('--swarming-targets-file',
+                      help='save runtime dependencies for targets listed '
+                      'in file.')
+    subp.add_argument('--json-output', help='Write errors to json.output')
+    subp.add_argument('path', nargs=1, help='path to generate build into')
+    subp.set_defaults(func=self.CmdGen)
 
-        subp = subps.add_parser('isolate',
-                                help='generate the .isolate files for a given'
-                                     'binary')
-        AddCommonOptions(subp)
-        subp.add_argument('path', nargs=1,
-                          help='path build was generated into')
-        subp.add_argument('target', nargs=1,
-                          help='ninja target to generate the isolate for')
-        subp.set_defaults(func=self.CmdIsolate)
+    subp = subps.add_parser('isolate',
+                            help='generate the .isolate files for a given'
+                            'binary')
+    AddCommonOptions(subp)
+    subp.add_argument('path', nargs=1, help='path build was generated into')
+    subp.add_argument('target',
+                      nargs=1,
+                      help='ninja target to generate the isolate for')
+    subp.set_defaults(func=self.CmdIsolate)
 
-        subp = subps.add_parser('lookup',
-                                help='look up the command for a given config '
-                                     'or builder')
-        AddCommonOptions(subp)
-        subp.add_argument('--quiet', default=False, action='store_true',
-                          help='Print out just the arguments, do '
-                               'not emulate the output of the gen subcommand.')
-        subp.set_defaults(func=self.CmdLookup)
+    subp = subps.add_parser('lookup',
+                            help='look up the command for a given config '
+                            'or builder')
+    AddCommonOptions(subp)
+    subp.add_argument('--quiet',
+                      default=False,
+                      action='store_true',
+                      help='Print out just the arguments, do '
+                      'not emulate the output of the gen subcommand.')
+    subp.set_defaults(func=self.CmdLookup)
 
-        subp = subps.add_parser(
-            'run',
-            help='build and run the isolated version of a '
-                 'binary',
-            formatter_class=argparse.RawDescriptionHelpFormatter)
-        subp.description = (
-            'Build, isolate, and run the given binary with the command line\n'
-            'listed in the isolate. You may pass extra arguments after the\n'
-            'target; use "--" if the extra arguments need to include switches.'
-            '\n\n'
-            'Examples:\n'
-            '\n'
-            '  % tools/mb/mb.py run -m chromium.linux -b "Linux Builder" \\\n'
-            '    //out/Default content_browsertests\n'
-            '\n'
-            '  % tools/mb/mb.py run out/Default content_browsertests\n'
-            '\n'
-            '  % tools/mb/mb.py run out/Default content_browsertests -- \\\n'
-            '    --test-launcher-retry-limit=0'
-            '\n'
-        )
-        AddCommonOptions(subp)
-        subp.add_argument('-j', '--jobs', dest='jobs', type=int,
-                          help='Number of jobs to pass to ninja')
-        subp.add_argument('--no-build', dest='build', default=True,
-                          action='store_false',
-                          help='Do not build, just isolate and run')
-        subp.add_argument('path', nargs=1,
-                          help=('path to generate build into (or use).'
-                                ' This can be either a regular path or a '
-                                'GN-style source-relative path like '
-                                '//out/Default.'))
-        subp.add_argument('-s', '--swarmed', action='store_true',
-                          help='Run under swarming')
-        subp.add_argument('-d', '--dimension', default=[], action='append',
-                          nargs=2, dest='dimensions', metavar='FOO bar',
-                          help='dimension to filter on')
-        subp.add_argument('target', nargs=1,
-                          help='ninja target to build and run')
-        subp.add_argument('extra_args', nargs='*',
-                          help=('extra args to pass to the isolate to run. '
-                                'Use "--" as the first arg if you need to '
-                                'pass switches'))
-        subp.set_defaults(func=self.CmdRun)
+    subp = subps.add_parser(
+        'run',
+        help='build and run the isolated version of a '
+        'binary',
+        formatter_class=argparse.RawDescriptionHelpFormatter)
+    subp.description = (
+        'Build, isolate, and run the given binary with the command line\n'
+        'listed in the isolate. You may pass extra arguments after the\n'
+        'target; use "--" if the extra arguments need to include switches.'
+        '\n\n'
+        'Examples:\n'
+        '\n'
+        '  % tools/mb/mb.py run -m chromium.linux -b "Linux Builder" \\\n'
+        '    //out/Default content_browsertests\n'
+        '\n'
+        '  % tools/mb/mb.py run out/Default content_browsertests\n'
+        '\n'
+        '  % tools/mb/mb.py run out/Default content_browsertests -- \\\n'
+        '    --test-launcher-retry-limit=0'
+        '\n')
+    AddCommonOptions(subp)
+    subp.add_argument('-j',
+                      '--jobs',
+                      dest='jobs',
+                      type=int,
+                      help='Number of jobs to pass to ninja')
+    subp.add_argument('--no-build',
+                      dest='build',
+                      default=True,
+                      action='store_false',
+                      help='Do not build, just isolate and run')
+    subp.add_argument('path',
+                      nargs=1,
+                      help=('path to generate build into (or use).'
+                            ' This can be either a regular path or a '
+                            'GN-style source-relative path like '
+                            '//out/Default.'))
+    subp.add_argument('-s',
+                      '--swarmed',
+                      action='store_true',
+                      help='Run under swarming')
+    subp.add_argument('-d',
+                      '--dimension',
+                      default=[],
+                      action='append',
+                      nargs=2,
+                      dest='dimensions',
+                      metavar='FOO bar',
+                      help='dimension to filter on')
+    subp.add_argument('target', nargs=1, help='ninja target to build and run')
+    subp.add_argument('extra_args',
+                      nargs='*',
+                      help=('extra args to pass to the isolate to run. '
+                            'Use "--" as the first arg if you need to '
+                            'pass switches'))
+    subp.set_defaults(func=self.CmdRun)
 
-        subp = subps.add_parser('validate',
-                                help='validate the config file')
-        subp.add_argument('-f', '--config-file', metavar='PATH',
-                          default=self.default_config,
-                          help='path to config file (default is %(default)s)')
-        subp.set_defaults(func=self.CmdValidate)
+    subp = subps.add_parser('validate', help='validate the config file')
+    subp.add_argument('-f',
+                      '--config-file',
+                      metavar='PATH',
+                      default=self.default_config,
+                      help='path to config file (default is %(default)s)')
+    subp.set_defaults(func=self.CmdValidate)
 
-        subp = subps.add_parser('help',
-                                help='Get help on a subcommand.')
-        subp.add_argument(nargs='?', action='store', dest='subcommand',
-                          help='The command to get help for.')
-        subp.set_defaults(func=self.CmdHelp)
+    subp = subps.add_parser('help', help='Get help on a subcommand.')
+    subp.add_argument(nargs='?',
+                      action='store',
+                      dest='subcommand',
+                      help='The command to get help for.')
+    subp.set_defaults(func=self.CmdHelp)
 
-        self.args = parser.parse_args(argv)
+    self.args = parser.parse_args(argv)
 
-    def DumpInputFiles(self):
+  def DumpInputFiles(self):
+    def DumpContentsOfFilePassedTo(arg_name, path):
+      if path and self.Exists(path):
+        self.Print("\n# To recreate the file passed to %s:" % arg_name)
+        self.Print("%% cat > %s <<EOF" % path)
+        contents = self.ReadFile(path)
+        self.Print(contents)
+        self.Print("EOF\n%\n")
 
-        def DumpContentsOfFilePassedTo(arg_name, path):
-            if path and self.Exists(path):
-                self.Print("\n# To recreate the file passed to %s:" % arg_name)
-                self.Print("%% cat > %s <<EOF" % path)
-                contents = self.ReadFile(path)
-                self.Print(contents)
-                self.Print("EOF\n%\n")
-
-        if getattr(self.args, 'input_path', None):
-            DumpContentsOfFilePassedTo(
-                'argv[0] (input_path)', self.args.input_path[0])
-        if getattr(self.args, 'swarming_targets_file', None):
-            DumpContentsOfFilePassedTo(
-                '--swarming-targets-file', self.args.swarming_targets_file)
-
-    def CmdAnalyze(self):
-        vals = self.Lookup()
-        return self.RunGNAnalyze(vals)
-
-    def CmdExport(self):
-        self.ReadConfigFile()
-        obj = {}
-        for builder_group, builders in self.builder_groups.items():
-            obj[builder_group] = {}
-            for builder in builders:
-                config = self.builder_groups[builder_group][builder]
-                if not config:
-                    continue
-
-                if isinstance(config, dict):
-                    args = {k: self.FlattenConfig(v)['gn_args']
-                            for k, v in config.items()}
-                elif config.startswith('//'):
-                    args = config
-                else:
-                    args = self.FlattenConfig(config)['gn_args']
-                    if 'error' in args:
-                        continue
-
-                obj[builder_group][builder] = args
-
-        # Dump object and trim trailing whitespace.
-        s = '\n'.join(l.rstrip() for l in
-                      json.dumps(obj, sort_keys=True, indent=2).splitlines())
-        self.Print(s)
-        return 0
-
-    def CmdGen(self):
-        vals = self.Lookup()
-        return self.RunGNGen(vals)
-
-    def CmdHelp(self):
-        if self.args.subcommand:
-            self.ParseArgs([self.args.subcommand, '--help'])
-        else:
-            self.ParseArgs(['--help'])
-
-    def CmdIsolate(self):
-        vals = self.GetConfig()
-        if not vals:
-            return 1
-        return self.RunGNIsolate(vals)
-
-    def CmdLookup(self):
-        vals = self.Lookup()
-        gn_args = self.GNArgs(vals)
-        if self.args.quiet:
-            self.Print(gn_args, end='')
-        else:
-            cmd = self.GNCmd('gen', '_path_')
-            self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
-            env = None
-
-            self.PrintCmd(cmd, env)
-        return 0
-
-    def CmdRun(self):
-        vals = self.GetConfig()
-        if not vals:
-            return 1
-
-        build_dir = self.args.path[0]
-        target = self.args.target[0]
-
-        if self.args.build:
-            ret = self.Build(target)
-            if ret:
-                return ret
-        ret = self.RunGNIsolate(vals)
-        if ret:
-            return ret
-
-        if self.args.swarmed:
-            cmd, _ = self.GetSwarmingCommand(self.args.target[0], vals)
-            return self._RunUnderSwarming(build_dir, target, cmd)
-        return self._RunLocallyIsolated(build_dir, target)
-
-    def _RunUnderSwarming(self, build_dir, target, isolate_cmd):
-        cas_instance = 'chromium-swarm'
-        swarming_server = 'chromium-swarm.appspot.com'
-        # TODO(dpranke): Look up the information for the target in
-        # the //testing/buildbot.json file, if possible, so that we
-        # can determine the isolate target, command line, and additional
-        # swarming parameters, if possible.
-        #
-        # TODO(dpranke): Also, add support for sharding and merging results.
-        dimensions = []
-        for k, v in self.args.dimensions:
-            dimensions += ['-d', '%s=%s' % (k, v)]
-
-        archive_json_path = self.ToSrcRelPath(
-            '%s/%s.archive.json' % (build_dir, target))
-        cmd = [
-            self.PathJoin(self.src_dir, 'tools', 'luci-go', self.isolate_exe),
-            'archive',
-            '-i',
-            self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
-            '-cas-instance',
-            cas_instance,
-            '-dump-json',
-            archive_json_path,
-        ]
-
-        # Talking to the isolateserver may fail because we're not logged in.
-        # We trap the command explicitly and rewrite the error output so that
-        # the error message is actually correct for a Chromium check out.
-        self.PrintCmd(cmd, env=None)
-        ret, out, err = self.Run(cmd, force_verbose=False)
-        if ret:
-            self.Print('  -> returned %d' % ret)
-            if out:
-                self.Print(out, end='')
-            if err:
-                self.Print(err, end='', file=sys.stderr)
-
-            return ret
-
-        try:
-            archive_hashes = json.loads(self.ReadFile(archive_json_path))
-        except Exception:
-            self.Print(
-                'Failed to read JSON file "%s"' %
-                archive_json_path, file=sys.stderr)
-            return 1
-        try:
-            cas_digest = archive_hashes[target]
-        except Exception:
-            self.Print(
-                'Cannot find hash for "%s" in "%s", file content: %s' %
-                (target, archive_json_path, archive_hashes),
-                file=sys.stderr)
-            return 1
-
-        try:
-            json_dir = self.TempDir()
-            json_file = self.PathJoin(json_dir, 'task.json')
-
-            cmd = [
-                self.PathJoin('tools', 'luci-go', 'swarming'),
-                'trigger',
-                '-realm',
-                self.args.realm,
-                '-digest',
-                cas_digest,
-                '-server',
-                swarming_server,
-                '-tag=purpose:user-debug-mb',
-                '-relative-cwd',
-                self.ToSrcRelPath(build_dir),
-                '-dump-json',
-                json_file,
-            ] + dimensions + ['--'] + list(isolate_cmd)
-
-            if self.args.extra_args:
-                cmd += ['--'] + self.args.extra_args
-            self.Print('')
-            ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
-            if ret:
-                return ret
-            task_json = self.ReadFile(json_file)
-            task_id = json.loads(task_json)["tasks"][0]['task_id']
-        finally:
-            if json_dir:
-                self.RemoveDirectory(json_dir)
-
-        cmd = [
-            self.PathJoin('tools', 'luci-go', 'swarming'),
-            'collect',
-            '-server',
-            swarming_server,
-            '-task-output-stdout=console',
-            task_id,
-        ]
-        ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
-        return ret
-
-    def _RunLocallyIsolated(self, build_dir, target):
-        cmd = [
-            self.PathJoin(self.src_dir, 'tools', 'luci-go', self.isolate_exe),
-            'run',
-            '-i',
-            self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
-        ]
-        if self.args.extra_args:
-            cmd += ['--'] + self.args.extra_args
-        ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
-        return ret
-
-    def CmdValidate(self, print_ok=True):
-        errs = []
-
-        # Read the file to make sure it parses.
-        self.ReadConfigFile()
-
-        # Build a list of all of the configs referenced by builders.
-        all_configs = {}
-        for builder_group in self.builder_groups:
-            for config in self.builder_groups[builder_group].values():
-                if isinstance(config, dict):
-                    for c in config.values():
-                        all_configs[c] = builder_group
-                else:
-                    all_configs[config] = builder_group
-
-        # Check that every referenced args file or config actually exists.
-        for config, loc in all_configs.items():
-            if config.startswith('//'):
-                if not self.Exists(self.ToAbsPath(config)):
-                    errs.append('Unknown args file "%s" referenced from "%s".' %
-                                (config, loc))
-            elif not config in self.configs:
-                errs.append('Unknown config "%s" referenced from "%s".' %
-                            (config, loc))
-
-        # Check that every actual config is actually referenced.
-        for config in self.configs:
-            if not config in all_configs:
-                errs.append('Unused config "%s".' % config)
-
-        # Figure out the whole list of mixins, and check that every mixin
-        # listed by a config or another mixin actually exists.
-        referenced_mixins = set()
-        for config, mixins in self.configs.items():
-            for mixin in mixins:
-                if not mixin in self.mixins:
-                    errs.append('Unknown mixin "%s" referenced by config "%s".'
-                                % (mixin, config))
-                referenced_mixins.add(mixin)
-
-        for mixin in self.mixins:
-            for sub_mixin in self.mixins[mixin].get('mixins', []):
-                if not sub_mixin in self.mixins:
-                    errs.append('Unknown mixin "%s" referenced by mixin "%s".'
-                                % (sub_mixin, mixin))
-                referenced_mixins.add(sub_mixin)
-
-        # Check that every mixin defined is actually referenced somewhere.
-        for mixin in self.mixins:
-            if not mixin in referenced_mixins:
-                errs.append('Unreferenced mixin "%s".' % mixin)
-
-        if errs:
-            raise MBErr(('mb config file %s has problems:' %
-                         self.args.config_file) + '\n  ' + '\n  '.join(errs))
-
-        if print_ok:
-            self.Print('mb config file %s looks ok.' % self.args.config_file)
-        return 0
-
-    def GetConfig(self):
-        build_dir = self.args.path[0]
-
-        vals = self.DefaultVals()
-        if self.args.builder or self.args.builder_group or self.args.config:
-            vals = self.Lookup()
-            # Re-run gn gen in order to ensure the config is consistent with
-            # the build dir.
-            self.RunGNGen(vals)
-            return vals
-
-        toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
-                                       'toolchain.ninja')
-        if not self.Exists(toolchain_path):
-            self.Print('Must either specify a path to an existing GN build '
-                       'dir or pass in a -m/-b pair or a -c flag to specify '
-                       'the configuration')
-            return {}
-
-        vals['gn_args'] = self.GNArgsFromDir(build_dir)
-        return vals
-
-    def GNArgsFromDir(self, build_dir):
-        args_contents = ""
-        gn_args_path = self.PathJoin(self.ToAbsPath(build_dir), 'args.gn')
-        if self.Exists(gn_args_path):
-            args_contents = self.ReadFile(gn_args_path)
-        gn_args = []
-        for l in args_contents.splitlines():
-            fields = l.split(' ')
-            name = fields[0]
-            val = ' '.join(fields[2:])
-            gn_args.append('%s=%s' % (name, val))
-
-        return ' '.join(gn_args)
-
-    def Lookup(self):
-        self.ReadConfigFile()
-        config = self.ConfigFromArgs()
-        if config.startswith('//'):
-            if not self.Exists(self.ToAbsPath(config)):
-                raise MBErr('args file "%s" not found' % config)
-            vals = self.DefaultVals()
-            vals['args_file'] = config
-        else:
-            if not config in self.configs:
-                raise MBErr('Config "%s" not found in %s' %
-                            (config, self.args.config_file))
-            vals = self.FlattenConfig(config)
-        return vals
-
-    def ReadConfigFile(self):
-        if not self.Exists(self.args.config_file):
-            raise MBErr('config file not found at %s' % self.args.config_file)
-
-        try:
-            contents = ast.literal_eval(self.ReadFile(self.args.config_file))
-        except SyntaxError as e:
-            raise MBErr('Failed to parse config file "%s": %s' %
-                        (self.args.config_file, e))
-
-        self.configs = contents['configs']
-        self.builder_groups = contents['builder_groups']
-        self.mixins = contents['mixins']
-
-    def ReadIsolateMap(self):
-        isolate_map = self.args.isolate_map_file
-        if not self.Exists(isolate_map):
-            raise MBErr('isolate map file not found at %s' % isolate_map)
-        try:
-            return ast.literal_eval(self.ReadFile(isolate_map))
-        except SyntaxError as e:
-            raise MBErr(
-                'Failed to parse isolate map file "%s": %s' % (isolate_map, e))
-
-    def ConfigFromArgs(self):
-        if self.args.config:
-            if self.args.builder_group or self.args.builder:
-                raise MBErr('Can not specific both -c/--config and '
-                            '-m/--builder-group or -b/--builder')
-
-            return self.args.config
-
-        if not self.args.builder_group or not self.args.builder:
-            raise MBErr('Must specify either -c/--config or '
-                        '(-m/--builder-group and -b/--builder)')
+    if getattr(self.args, 'input_path', None):
+      DumpContentsOfFilePassedTo('argv[0] (input_path)',
+                                 self.args.input_path[0])
+    if getattr(self.args, 'swarming_targets_file', None):
+      DumpContentsOfFilePassedTo('--swarming-targets-file',
+                                 self.args.swarming_targets_file)
 
-        if not self.args.builder_group in self.builder_groups:
-            raise MBErr('Master name "%s" not found in "%s"' %
-                        (self.args.builder_group, self.args.config_file))
+  def CmdAnalyze(self):
+    vals = self.Lookup()
+    return self.RunGNAnalyze(vals)
 
-        if (not self.args.builder in
-                self.builder_groups[self.args.builder_group]):
-            raise MBErr(
-                'Builder name "%s"  not found under builder_groups[%s] in "%s"'
-                % (self.args.builder, self.args.builder_group,
-                   self.args.config_file))
+  def CmdExport(self):
+    self.ReadConfigFile()
+    obj = {}
+    for builder_group, builders in self.builder_groups.items():
+      obj[builder_group] = {}
+      for builder in builders:
+        config = self.builder_groups[builder_group][builder]
+        if not config:
+          continue
 
-        config = (
-            self.builder_groups[self.args.builder_group][self.args.builder]
-        )
         if isinstance(config, dict):
-            if self.args.phase is None:
-                raise MBErr('Must specify a build --phase for %s on %s' %
-                            (self.args.builder, self.args.builder_group))
-            phase = str(self.args.phase)
-            if phase not in config:
-                raise MBErr('Phase %s doesn\'t exist for %s on %s' %
-                            (phase, self.args.builder,
-                             self.args.builder_group))
-            return config[phase]
+          args = {
+              k: self.FlattenConfig(v)['gn_args']
+              for k, v in config.items()
+          }
+        elif config.startswith('//'):
+          args = config
+        else:
+          args = self.FlattenConfig(config)['gn_args']
+          if 'error' in args:
+            continue
 
-        if self.args.phase is not None:
-            raise MBErr('Must not specify a build --phase for %s on %s' %
-                        (self.args.builder, self.args.builder_group))
-        return config
+        obj[builder_group][builder] = args
 
-    def FlattenConfig(self, config):
-        mixins = self.configs[config]
-        vals = self.DefaultVals()
+    # Dump object and trim trailing whitespace.
+    s = '\n'.join(
+        l.rstrip()
+        for l in json.dumps(obj, sort_keys=True, indent=2).splitlines())
+    self.Print(s)
+    return 0
 
-        visited = []
-        self.FlattenMixins(mixins, vals, visited)
-        return vals
+  def CmdGen(self):
+    vals = self.Lookup()
+    return self.RunGNGen(vals)
 
-    def DefaultVals(self):
-        return {
-            'args_file': '',
-            'cros_passthrough': False,
-            'gn_args': '',
-        }
+  def CmdHelp(self):
+    if self.args.subcommand:
+      self.ParseArgs([self.args.subcommand, '--help'])
+    else:
+      self.ParseArgs(['--help'])
 
-    def FlattenMixins(self, mixins, vals, visited):
-        for m in mixins:
-            if m not in self.mixins:
-                raise MBErr('Unknown mixin "%s"' % m)
+  def CmdIsolate(self):
+    vals = self.GetConfig()
+    if not vals:
+      return 1
+    return self.RunGNIsolate(vals)
 
-            visited.append(m)
+  def CmdLookup(self):
+    vals = self.Lookup()
+    gn_args = self.GNArgs(vals)
+    if self.args.quiet:
+      self.Print(gn_args, end='')
+    else:
+      cmd = self.GNCmd('gen', '_path_')
+      self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
+      env = None
 
-            mixin_vals = self.mixins[m]
+      self.PrintCmd(cmd, env)
+    return 0
 
-            if 'cros_passthrough' in mixin_vals:
-                vals['cros_passthrough'] = mixin_vals['cros_passthrough']
-            if 'gn_args' in mixin_vals:
-                if vals['gn_args']:
-                    vals['gn_args'] += ' ' + mixin_vals['gn_args']
-                else:
-                    vals['gn_args'] = mixin_vals['gn_args']
+  def CmdRun(self):
+    vals = self.GetConfig()
+    if not vals:
+      return 1
 
-            if 'mixins' in mixin_vals:
-                self.FlattenMixins(mixin_vals['mixins'], vals, visited)
-        return vals
+    build_dir = self.args.path[0]
+    target = self.args.target[0]
 
-    def RunGNGen(self, vals):
-        build_dir = self.args.path[0]
-
-        cmd = self.GNCmd('gen', build_dir, '--check')
-        gn_args = self.GNArgs(vals)
-
-        # Since GN hasn't run yet, the build directory may not even exist.
-        self.MaybeMakeDirectory(self.ToAbsPath(build_dir))
-
-        gn_args_path = self.ToAbsPath(build_dir, 'args.gn')
-        self.WriteFile(gn_args_path, gn_args, force_verbose=True)
-
-        swarming_targets = []
-        if getattr(self.args, 'swarming_targets_file', None):
-            # We need GN to generate the list of runtime dependencies for
-            # the compile targets listed (one per line) in the file so
-            # we can run them via swarming. We use gn_isolate_map.pyl to
-            # convert the compile targets to the matching GN labels.
-            path = self.args.swarming_targets_file
-            if not self.Exists(path):
-                self.WriteFailureAndRaise('"%s" does not exist' % path,
-                                          output_path=None)
-            contents = self.ReadFile(path)
-            swarming_targets = set(contents.splitlines())
-
-            isolate_map = self.ReadIsolateMap()
-            err, labels = self.MapTargetsToLabels(
-                isolate_map, swarming_targets)
-            if err:
-                raise MBErr(err)
-
-            gn_runtime_deps_path = self.ToAbsPath(build_dir, 'runtime_deps')
-            self.WriteFile(gn_runtime_deps_path, '\n'.join(labels) + '\n')
-            cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
-
-        ret, output, _ = self.Run(cmd)
-        if ret:
-            if self.args.json_output:
-                # write errors to json.output
-                self.WriteJSON({'output': output}, self.args.json_output)
-            # If `gn gen` failed, we should exit early rather than trying to
-            # generate isolates. Run() will have already logged any error
-            # output.
-            self.Print('GN gen failed: %d' % ret)
-            return ret
-
-        android = 'target_os="android"' in vals['gn_args']
-        for target in swarming_targets:
-            if android:
-                # Android targets may be either android_apk or executable. The
-                # former will result in runtime_deps associated with the stamp
-                # file, while the latter will result in runtime_deps associated
-                # with the executable.
-                label = isolate_map[target]['label']
-                runtime_deps_targets = [
-                    target + '.runtime_deps',
-                    'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
-            elif isolate_map[target]['type'] == 'gpu_browser_test':
-                if self.platform == 'win32':
-                    runtime_deps_targets = ['browser_tests.exe.runtime_deps']
-                else:
-                    runtime_deps_targets = ['browser_tests.runtime_deps']
-            elif isolate_map[target]['type'] == 'script':
-                label = isolate_map[target]['label'].split(':')[1]
-                runtime_deps_targets = [
-                    '%s.runtime_deps' % label]
-                if self.platform == 'win32':
-                    runtime_deps_targets += [label + '.exe.runtime_deps']
-                else:
-                    runtime_deps_targets += [label + '.runtime_deps']
-            elif self.platform == 'win32':
-                runtime_deps_targets = [target + '.exe.runtime_deps']
-            else:
-                runtime_deps_targets = [target + '.runtime_deps']
-
-            for r in runtime_deps_targets:
-                runtime_deps_path = self.ToAbsPath(build_dir, r)
-                if self.Exists(runtime_deps_path):
-                    break
-            else:
-                raise MBErr('did not generate any of %s' %
-                            ', '.join(runtime_deps_targets))
-
-            command, extra_files = self.GetSwarmingCommand(target, vals)
-
-            runtime_deps = self.ReadFile(runtime_deps_path).splitlines()
-
-            self.WriteIsolateFiles(build_dir, command, target, runtime_deps,
-                                   extra_files)
-
-        return 0
-
-    def RunGNIsolate(self, vals):
-        target = self.args.target[0]
-        isolate_map = self.ReadIsolateMap()
-        err, labels = self.MapTargetsToLabels(isolate_map, [target])
-        if err:
-            raise MBErr(err)
-        label = labels[0]
-
-        build_dir = self.args.path[0]
-        command, extra_files = self.GetSwarmingCommand(target, vals)
-
-        cmd = self.GNCmd('desc', build_dir, label, 'runtime_deps')
-        ret, out, _ = self.Call(cmd)
-        if ret:
-            if out:
-                self.Print(out)
-            return ret
-
-        runtime_deps = out.splitlines()
-
-        self.WriteIsolateFiles(build_dir, command, target, runtime_deps,
-                               extra_files)
-
-        ret, _, _ = self.Run([
-            self.PathJoin(self.src_dir, 'tools', 'luci-go', self.isolate_exe),
-            'check',
-            '-i',
-            self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target))],
-            buffer_output=False)
-
+    if self.args.build:
+      ret = self.Build(target)
+      if ret:
         return ret
+    ret = self.RunGNIsolate(vals)
+    if ret:
+      return ret
 
-    def WriteIsolateFiles(self, build_dir, command, target, runtime_deps,
-                          extra_files):
-        isolate_path = self.ToAbsPath(build_dir, target + '.isolate')
-        self.WriteFile(isolate_path,
-                       pprint.pformat({
-                           'variables': {
-                               'command': command,
-                               'files': sorted(runtime_deps + extra_files),
-                           }
-                       }) + '\n')
+    if self.args.swarmed:
+      cmd, _ = self.GetSwarmingCommand(self.args.target[0], vals)
+      return self._RunUnderSwarming(build_dir, target, cmd)
+    return self._RunLocallyIsolated(build_dir, target)
 
-        self.WriteJSON(
-            {
-                'args': [
-                    '--isolate',
-                    self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
-                ],
-                'dir': self.src_dir,
-                'version': 1,
-            },
-            isolate_path + 'd.gen.json',
-        )
+  def _RunUnderSwarming(self, build_dir, target, isolate_cmd):
+    cas_instance = 'chromium-swarm'
+    swarming_server = 'chromium-swarm.appspot.com'
+    # TODO(dpranke): Look up the information for the target in
+    # the //testing/buildbot.json file, if possible, so that we
+    # can determine the isolate target, command line, and additional
+    # swarming parameters, if possible.
+    #
+    # TODO(dpranke): Also, add support for sharding and merging results.
+    dimensions = []
+    for k, v in self.args.dimensions:
+      dimensions += ['-d', '%s=%s' % (k, v)]
 
-    def MapTargetsToLabels(self, isolate_map, targets):
-        labels = []
-        err = ''
+    archive_json_path = self.ToSrcRelPath('%s/%s.archive.json' %
+                                          (build_dir, target))
+    cmd = [
+        self.PathJoin(self.src_dir, 'tools', 'luci-go', self.isolate_exe),
+        'archive',
+        '-i',
+        self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
+        '-cas-instance',
+        cas_instance,
+        '-dump-json',
+        archive_json_path,
+    ]
 
-        def StripTestSuffixes(target):
-            for suffix in ('_apk_run', '_apk', '_run'):
-                if target.endswith(suffix):
-                    return target[:-len(suffix)], suffix
-            return None, None
+    # Talking to the isolateserver may fail because we're not logged in.
+    # We trap the command explicitly and rewrite the error output so that
+    # the error message is actually correct for a Chromium check out.
+    self.PrintCmd(cmd, env=None)
+    ret, out, err = self.Run(cmd, force_verbose=False)
+    if ret:
+      self.Print('  -> returned %d' % ret)
+      if out:
+        self.Print(out, end='')
+      if err:
+        self.Print(err, end='', file=sys.stderr)
 
-        for target in targets:
-            if target == 'all':
-                labels.append(target)
-            elif target.startswith('//'):
-                labels.append(target)
-            else:
-                if target in isolate_map:
-                    stripped_target, suffix = target, ''
-                else:
-                    stripped_target, suffix = StripTestSuffixes(target)
-                if stripped_target in isolate_map:
-                    if isolate_map[stripped_target]['type'] == 'unknown':
-                        err += ('test target "%s" type is unknown\n' % target)
-                    else:
-                        labels.append(
-                            isolate_map[stripped_target]['label'] + suffix)
-                else:
-                    err += ('target "%s" not found in '
-                            '//testing/buildbot/gn_isolate_map.pyl\n' % target)
+      return ret
 
-        return err, labels
+    try:
+      archive_hashes = json.loads(self.ReadFile(archive_json_path))
+    except Exception:
+      self.Print('Failed to read JSON file "%s"' % archive_json_path,
+                 file=sys.stderr)
+      return 1
+    try:
+      cas_digest = archive_hashes[target]
+    except Exception:
+      self.Print('Cannot find hash for "%s" in "%s", file content: %s' %
+                 (target, archive_json_path, archive_hashes),
+                 file=sys.stderr)
+      return 1
 
-    def GNCmd(self, subcommand, path, *args):
-        if self.platform.startswith('linux'):
-            subdir, exe = 'linux64', 'gn'
-        elif self.platform == 'darwin':
-            subdir, exe = 'mac', 'gn'
+    try:
+      json_dir = self.TempDir()
+      json_file = self.PathJoin(json_dir, 'task.json')
+
+      cmd = [
+          self.PathJoin('tools', 'luci-go', 'swarming'),
+          'trigger',
+          '-realm',
+          self.args.realm,
+          '-digest',
+          cas_digest,
+          '-server',
+          swarming_server,
+          '-tag=purpose:user-debug-mb',
+          '-relative-cwd',
+          self.ToSrcRelPath(build_dir),
+          '-dump-json',
+          json_file,
+      ] + dimensions + ['--'] + list(isolate_cmd)
+
+      if self.args.extra_args:
+        cmd += ['--'] + self.args.extra_args
+      self.Print('')
+      ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
+      if ret:
+        return ret
+      task_json = self.ReadFile(json_file)
+      task_id = json.loads(task_json)["tasks"][0]['task_id']
+    finally:
+      if json_dir:
+        self.RemoveDirectory(json_dir)
+
+    cmd = [
+        self.PathJoin('tools', 'luci-go', 'swarming'),
+        'collect',
+        '-server',
+        swarming_server,
+        '-task-output-stdout=console',
+        task_id,
+    ]
+    ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
+    return ret
+
+  def _RunLocallyIsolated(self, build_dir, target):
+    cmd = [
+        self.PathJoin(self.src_dir, 'tools', 'luci-go', self.isolate_exe),
+        'run',
+        '-i',
+        self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
+    ]
+    if self.args.extra_args:
+      cmd += ['--'] + self.args.extra_args
+    ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
+    return ret
+
+  def CmdValidate(self, print_ok=True):
+    errs = []
+
+    # Read the file to make sure it parses.
+    self.ReadConfigFile()
+
+    # Build a list of all of the configs referenced by builders.
+    all_configs = {}
+    for builder_group in self.builder_groups:
+      for config in self.builder_groups[builder_group].values():
+        if isinstance(config, dict):
+          for c in config.values():
+            all_configs[c] = builder_group
         else:
-            subdir, exe = 'win', 'gn.exe'
+          all_configs[config] = builder_group
 
-        gn_path = self.PathJoin(self.src_dir, 'buildtools', subdir, exe)
-        return [gn_path, subcommand, path] + list(args)
+    # Check that every referenced args file or config actually exists.
+    for config, loc in all_configs.items():
+      if config.startswith('//'):
+        if not self.Exists(self.ToAbsPath(config)):
+          errs.append('Unknown args file "%s" referenced from "%s".' %
+                      (config, loc))
+      elif not config in self.configs:
+        errs.append('Unknown config "%s" referenced from "%s".' % (config, loc))
 
-    def GNArgs(self, vals):
-        if vals['cros_passthrough']:
-            if not 'GN_ARGS' in os.environ:
-                raise MBErr('MB is expecting GN_ARGS to be in the environment')
-            gn_args = os.environ['GN_ARGS']
-            if not re.search('target_os.*=.*"chromeos"', gn_args):
-                raise MBErr('GN_ARGS is missing target_os = "chromeos": '
-                            '(GN_ARGS=%s)' % gn_args)
+    # Check that every actual config is actually referenced.
+    for config in self.configs:
+      if not config in all_configs:
+        errs.append('Unused config "%s".' % config)
+
+    # Figure out the whole list of mixins, and check that every mixin
+    # listed by a config or another mixin actually exists.
+    referenced_mixins = set()
+    for config, mixins in self.configs.items():
+      for mixin in mixins:
+        if not mixin in self.mixins:
+          errs.append('Unknown mixin "%s" referenced by config "%s".' %
+                      (mixin, config))
+        referenced_mixins.add(mixin)
+
+    for mixin in self.mixins:
+      for sub_mixin in self.mixins[mixin].get('mixins', []):
+        if not sub_mixin in self.mixins:
+          errs.append('Unknown mixin "%s" referenced by mixin "%s".' %
+                      (sub_mixin, mixin))
+        referenced_mixins.add(sub_mixin)
+
+    # Check that every mixin defined is actually referenced somewhere.
+    for mixin in self.mixins:
+      if not mixin in referenced_mixins:
+        errs.append('Unreferenced mixin "%s".' % mixin)
+
+    if errs:
+      raise MBErr(('mb config file %s has problems:' % self.args.config_file) +
+                  '\n  ' + '\n  '.join(errs))
+
+    if print_ok:
+      self.Print('mb config file %s looks ok.' % self.args.config_file)
+    return 0
+
+  def GetConfig(self):
+    build_dir = self.args.path[0]
+
+    vals = self.DefaultVals()
+    if self.args.builder or self.args.builder_group or self.args.config:
+      vals = self.Lookup()
+      # Re-run gn gen in order to ensure the config is consistent with
+      # the build dir.
+      self.RunGNGen(vals)
+      return vals
+
+    toolchain_path = self.PathJoin(self.ToAbsPath(build_dir), 'toolchain.ninja')
+    if not self.Exists(toolchain_path):
+      self.Print('Must either specify a path to an existing GN build '
+                 'dir or pass in a -m/-b pair or a -c flag to specify '
+                 'the configuration')
+      return {}
+
+    vals['gn_args'] = self.GNArgsFromDir(build_dir)
+    return vals
+
+  def GNArgsFromDir(self, build_dir):
+    args_contents = ""
+    gn_args_path = self.PathJoin(self.ToAbsPath(build_dir), 'args.gn')
+    if self.Exists(gn_args_path):
+      args_contents = self.ReadFile(gn_args_path)
+    gn_args = []
+    for l in args_contents.splitlines():
+      fields = l.split(' ')
+      name = fields[0]
+      val = ' '.join(fields[2:])
+      gn_args.append('%s=%s' % (name, val))
+
+    return ' '.join(gn_args)
+
+  def Lookup(self):
+    self.ReadConfigFile()
+    config = self.ConfigFromArgs()
+    if config.startswith('//'):
+      if not self.Exists(self.ToAbsPath(config)):
+        raise MBErr('args file "%s" not found' % config)
+      vals = self.DefaultVals()
+      vals['args_file'] = config
+    else:
+      if not config in self.configs:
+        raise MBErr('Config "%s" not found in %s' %
+                    (config, self.args.config_file))
+      vals = self.FlattenConfig(config)
+    return vals
+
+  def ReadConfigFile(self):
+    if not self.Exists(self.args.config_file):
+      raise MBErr('config file not found at %s' % self.args.config_file)
+
+    try:
+      contents = ast.literal_eval(self.ReadFile(self.args.config_file))
+    except SyntaxError as e:
+      raise MBErr('Failed to parse config file "%s": %s' %
+                  (self.args.config_file, e))
+
+    self.configs = contents['configs']
+    self.builder_groups = contents['builder_groups']
+    self.mixins = contents['mixins']
+
+  def ReadIsolateMap(self):
+    isolate_map = self.args.isolate_map_file
+    if not self.Exists(isolate_map):
+      raise MBErr('isolate map file not found at %s' % isolate_map)
+    try:
+      return ast.literal_eval(self.ReadFile(isolate_map))
+    except SyntaxError as e:
+      raise MBErr('Failed to parse isolate map file "%s": %s' %
+                  (isolate_map, e))
+
+  def ConfigFromArgs(self):
+    if self.args.config:
+      if self.args.builder_group or self.args.builder:
+        raise MBErr('Can not specific both -c/--config and '
+                    '-m/--builder-group or -b/--builder')
+
+      return self.args.config
+
+    if not self.args.builder_group or not self.args.builder:
+      raise MBErr('Must specify either -c/--config or '
+                  '(-m/--builder-group and -b/--builder)')
+
+    if not self.args.builder_group in self.builder_groups:
+      raise MBErr('Master name "%s" not found in "%s"' %
+                  (self.args.builder_group, self.args.config_file))
+
+    if not self.args.builder in self.builder_groups[self.args.builder_group]:
+      raise MBErr(
+          'Builder name "%s"  not found under builder_groups[%s] in "%s"' %
+          (self.args.builder, self.args.builder_group, self.args.config_file))
+
+    config = (self.builder_groups[self.args.builder_group][self.args.builder])
+    if isinstance(config, dict):
+      if self.args.phase is None:
+        raise MBErr('Must specify a build --phase for %s on %s' %
+                    (self.args.builder, self.args.builder_group))
+      phase = str(self.args.phase)
+      if phase not in config:
+        raise MBErr('Phase %s doesn\'t exist for %s on %s' %
+                    (phase, self.args.builder, self.args.builder_group))
+      return config[phase]
+
+    if self.args.phase is not None:
+      raise MBErr('Must not specify a build --phase for %s on %s' %
+                  (self.args.builder, self.args.builder_group))
+    return config
+
+  def FlattenConfig(self, config):
+    mixins = self.configs[config]
+    vals = self.DefaultVals()
+
+    visited = []
+    self.FlattenMixins(mixins, vals, visited)
+    return vals
+
+  @staticmethod
+  def DefaultVals():
+    return {
+        'args_file': '',
+        'cros_passthrough': False,
+        'gn_args': '',
+    }
+
+  def FlattenMixins(self, mixins, vals, visited):
+    for m in mixins:
+      if m not in self.mixins:
+        raise MBErr('Unknown mixin "%s"' % m)
+
+      visited.append(m)
+
+      mixin_vals = self.mixins[m]
+
+      if 'cros_passthrough' in mixin_vals:
+        vals['cros_passthrough'] = mixin_vals['cros_passthrough']
+      if 'gn_args' in mixin_vals:
+        if vals['gn_args']:
+          vals['gn_args'] += ' ' + mixin_vals['gn_args']
         else:
-            gn_args = vals['gn_args']
+          vals['gn_args'] = mixin_vals['gn_args']
 
-        if self.args.goma_dir:
-            gn_args += ' goma_dir="%s"' % self.args.goma_dir
+      if 'mixins' in mixin_vals:
+        self.FlattenMixins(mixin_vals['mixins'], vals, visited)
+    return vals
 
-        android_version_code = self.args.android_version_code
-        if android_version_code:
-            gn_args += (' android_default_version_code="%s"' %
-                        android_version_code)
+  def RunGNGen(self, vals):
+    build_dir = self.args.path[0]
 
-        android_version_name = self.args.android_version_name
-        if android_version_name:
-            gn_args += (' android_default_version_name="%s"' %
-                        android_version_name)
+    cmd = self.GNCmd('gen', build_dir, '--check')
+    gn_args = self.GNArgs(vals)
 
-        # Canonicalize the arg string into a sorted, newline-separated list
-        # of key-value pairs, and de-dup the keys if need be so that only
-        # the last instance of each arg is listed.
-        gn_args = gn_helpers.ToGNString(gn_helpers.FromGNArgs(gn_args))
+    # Since GN hasn't run yet, the build directory may not even exist.
+    self.MaybeMakeDirectory(self.ToAbsPath(build_dir))
 
-        args_file = vals.get('args_file', None)
-        if args_file:
-            gn_args = ('import("%s")\n' % vals['args_file']) + gn_args
-        return gn_args
+    gn_args_path = self.ToAbsPath(build_dir, 'args.gn')
+    self.WriteFile(gn_args_path, gn_args, force_verbose=True)
 
-    def GetSwarmingCommand(self, target, vals):
-        isolate_map = self.ReadIsolateMap()
-        test_type = isolate_map[target]['type']
+    swarming_targets = set()
+    if getattr(self.args, 'swarming_targets_file', None):
+      # We need GN to generate the list of runtime dependencies for
+      # the compile targets listed (one per line) in the file so
+      # we can run them via swarming. We use gn_isolate_map.pyl to
+      # convert the compile targets to the matching GN labels.
+      path = self.args.swarming_targets_file
+      if not self.Exists(path):
+        self.WriteFailureAndRaise('"%s" does not exist' % path,
+                                  output_path=None)
+      contents = self.ReadFile(path)
+      swarming_targets = set(contents.splitlines())
 
-        is_android = 'target_os="android"' in vals['gn_args']
-        is_linux = self.platform.startswith('linux') and not is_android
+      isolate_map = self.ReadIsolateMap()
+      err, labels = self.MapTargetsToLabels(isolate_map, swarming_targets)
+      if err:
+        raise MBErr(err)
 
-        if test_type == 'nontest':
-            self.WriteFailureAndRaise('We should not be isolating %s.' %
-                                      target, output_path=None)
-        if test_type not in ('console_test_launcher', 'windowed_test_launcher',
-                             'non_parallel_console_test_launcher', 'raw',
-                             'additional_compile_target', 'junit_test',
-                             'script'):
-            self.WriteFailureAndRaise('No command line for '
-                                      '%s found (test type %s).'
-                                      % (target, test_type), output_path=None)
+      gn_runtime_deps_path = self.ToAbsPath(build_dir, 'runtime_deps')
+      self.WriteFile(gn_runtime_deps_path, '\n'.join(labels) + '\n')
+      cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
 
-        cmdline = []
-        extra_files = [
-            '../../.vpython',
-            '../../testing/test_env.py',
+    ret, output, _ = self.Run(cmd)
+    if ret:
+      if self.args.json_output:
+        # write errors to json.output
+        self.WriteJSON({'output': output}, self.args.json_output)
+      # If `gn gen` failed, we should exit early rather than trying to
+      # generate isolates. Run() will have already logged any error
+      # output.
+      self.Print('GN gen failed: %d' % ret)
+      return ret
+
+    android = 'target_os="android"' in vals['gn_args']
+    for target in swarming_targets:
+      if android:
+        # Android targets may be either android_apk or executable. The
+        # former will result in runtime_deps associated with the stamp
+        # file, while the latter will result in runtime_deps associated
+        # with the executable.
+        label = isolate_map[target]['label']
+        runtime_deps_targets = [
+            target + '.runtime_deps',
+            'obj/%s.stamp.runtime_deps' % label.replace(':', '/')
         ]
-        vpython_exe = 'vpython'
-
-        must_retry = False
-        if test_type == 'script':
-            cmdline += [vpython_exe,
-                        '../../' +
-                        self.ToSrcRelPath(isolate_map[target]['script'])]
-        elif is_android:
-            cmdline += [vpython_exe,
-                        '../../build/android/test_wrapper/logdog_wrapper.py',
-                        '--target', target,
-                        '--logdog-bin-cmd', '../../bin/logdog_butler',
-                        '--logcat-output-file', '${ISOLATED_OUTDIR}/logcats',
-                        '--store-tombstones']
-        else:
-            if test_type == 'raw':
-                cmdline += [vpython_exe,
-                            '../../tools_webrtc/flags_compatibility.py']
-                extra_files.append('../../tools_webrtc/flags_compatibility.py')
-
-            if isolate_map[target].get('use_webcam', False):
-                cmdline += [vpython_exe,
-                            '../../tools_webrtc/ensure_webcam_is_running.py']
-                extra_files.append(
-                    '../../tools_webrtc/ensure_webcam_is_running.py')
-
-            # is_linux uses use_ozone and x11 by default.
-            use_x11 = is_linux
-
-            xvfb = use_x11 and test_type == 'windowed_test_launcher'
-            if xvfb:
-                cmdline += [vpython_exe, '../../testing/xvfb.py']
-                extra_files.append('../../testing/xvfb.py')
-            else:
-                cmdline += [vpython_exe, '../../testing/test_env.py']
-
-            if test_type != 'raw':
-                extra_files += [
-                    '../../third_party/gtest-parallel/gtest-parallel',
-                    '../../third_party/gtest-parallel/gtest_parallel.py',
-                    '../../tools_webrtc/gtest-parallel-wrapper.py',
-                ]
-                sep = '\\' if self.platform == 'win32' else '/'
-                output_dir = '${ISOLATED_OUTDIR}' + sep + 'test_logs'
-                test_results = '${ISOLATED_OUTDIR}' + sep + 'gtest_output.json'
-                timeout = isolate_map[target].get('timeout', 900)
-                cmdline += [
-                    '../../tools_webrtc/gtest-parallel-wrapper.py',
-                    '--output_dir=%s' % output_dir,
-                    '--dump_json_test_results=%s' % test_results,
-                    '--gtest_color=no',
-                    # We tell gtest-parallel to interrupt the test after 900
-                    # seconds, so it can exit cleanly and report results,
-                    # instead of being interrupted by swarming and not
-                    # reporting anything.
-                    '--timeout=%s' % timeout,
-                ]
-                if test_type == 'non_parallel_console_test_launcher':
-                    # Still use the gtest-parallel-wrapper.py script since we
-                    # need it to run tests on swarming, but don't execute tests
-                    # in parallel.
-                    cmdline.append('--workers=1')
-                must_retry = True
-
-            asan = 'is_asan=true' in vals['gn_args']
-            lsan = 'is_lsan=true' in vals['gn_args']
-            msan = 'is_msan=true' in vals['gn_args']
-            tsan = 'is_tsan=true' in vals['gn_args']
-            sanitizer = asan or lsan or msan or tsan
-            if must_retry and not sanitizer:
-                # Retry would hide most sanitizers detections.
-                cmdline.append('--retry_failed=3')
-
-            executable_prefix = '.\\' if self.platform == 'win32' else './'
-            executable_suffix = '.exe' if self.platform == 'win32' else ''
-            executable = executable_prefix + target + executable_suffix
-
-            cmdline.append(executable)
-
-            cmdline.extend([
-                '--asan=%d' % asan,
-                '--lsan=%d' % lsan,
-                '--msan=%d' % msan,
-                '--tsan=%d' % tsan,
-            ])
-
-        cmdline += isolate_map[target].get('args', [])
-
-        return cmdline, extra_files
-
-    def ToAbsPath(self, build_path, *comps):
-        return self.PathJoin(self.src_dir,
-                             self.ToSrcRelPath(build_path),
-                             *comps)
-
-    def ToSrcRelPath(self, path):
-        """Returns a relative path from the top of the repo."""
-        if path.startswith('//'):
-            return path[2:].replace('/', self.sep)
-        return self.RelPath(path, self.src_dir)
-
-    def RunGNAnalyze(self, vals):
-        # Analyze runs before 'gn gen' now, so we need to run gn gen
-        # in order to ensure that we have a build directory.
-        ret = self.RunGNGen(vals)
-        if ret:
-            return ret
-
-        build_path = self.args.path[0]
-        input_path = self.args.input_path[0]
-        gn_input_path = input_path + '.gn'
-        output_path = self.args.output_path[0]
-        gn_output_path = output_path + '.gn'
-
-        inp = self.ReadInputJSON(['files', 'test_targets',
-                                  'additional_compile_targets'])
-        if self.args.verbose:
-            self.Print()
-            self.Print('analyze input:')
-            self.PrintJSON(inp)
-            self.Print()
-
-        # This shouldn't normally happen, but could due to unusual race
-        # conditions, like a try job that gets scheduled before a patch
-        # lands but runs after the patch has landed.
-        if not inp['files']:
-            self.Print(
-                'Warning: No files modified in patch, bailing out early.')
-            self.WriteJSON({
-                'status': 'No dependency',
-                'compile_targets': [],
-                'test_targets': [],
-            }, output_path)
-            return 0
-
-        gn_inp = {}
-        gn_inp['files'] = ['//' + f for f in inp['files']
-                           if not f.startswith('//')]
-
-        isolate_map = self.ReadIsolateMap()
-        err, gn_inp['additional_compile_targets'] = self.MapTargetsToLabels(
-            isolate_map, inp['additional_compile_targets'])
-        if err:
-            raise MBErr(err)
-
-        err, gn_inp['test_targets'] = self.MapTargetsToLabels(
-            isolate_map, inp['test_targets'])
-        if err:
-            raise MBErr(err)
-        labels_to_targets = {}
-        for i, label in enumerate(gn_inp['test_targets']):
-            labels_to_targets[label] = inp['test_targets'][i]
-
-        try:
-            self.WriteJSON(gn_inp, gn_input_path)
-            cmd = self.GNCmd('analyze', build_path,
-                             gn_input_path, gn_output_path)
-            ret, output, _ = self.Run(cmd, force_verbose=True)
-            if ret:
-                if self.args.json_output:
-                    # write errors to json.output
-                    self.WriteJSON({'output': output}, self.args.json_output)
-                return ret
-
-            gn_outp_str = self.ReadFile(gn_output_path)
-            try:
-                gn_outp = json.loads(gn_outp_str)
-            except Exception as e:
-                self.Print("Failed to parse the JSON string GN "
-                           "returned: %s\n%s" % (repr(gn_outp_str), str(e)))
-                raise
-
-            outp = {}
-            if 'status' in gn_outp:
-                outp['status'] = gn_outp['status']
-            if 'error' in gn_outp:
-                outp['error'] = gn_outp['error']
-            if 'invalid_targets' in gn_outp:
-                outp['invalid_targets'] = gn_outp['invalid_targets']
-            if 'compile_targets' in gn_outp:
-                if 'all' in gn_outp['compile_targets']:
-                    outp['compile_targets'] = ['all']
-                else:
-                    outp['compile_targets'] = [
-                        label.replace('//', '')
-                        for label in gn_outp['compile_targets']
-                    ]
-            if 'test_targets' in gn_outp:
-                outp['test_targets'] = [
-                    labels_to_targets[label]
-                    for label in gn_outp['test_targets']
-                ]
-
-            if self.args.verbose:
-                self.Print()
-                self.Print('analyze output:')
-                self.PrintJSON(outp)
-                self.Print()
-
-            self.WriteJSON(outp, output_path)
-
-        finally:
-            if self.Exists(gn_input_path):
-                self.RemoveFile(gn_input_path)
-            if self.Exists(gn_output_path):
-                self.RemoveFile(gn_output_path)
-
-        return 0
-
-    def ReadInputJSON(self, required_keys):
-        path = self.args.input_path[0]
-        output_path = self.args.output_path[0]
-        if not self.Exists(path):
-            self.WriteFailureAndRaise(
-                '"%s" does not exist' % path, output_path)
-
-        try:
-            inp = json.loads(self.ReadFile(path))
-        except Exception as e:
-            self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s'
-                                      % (path, e), output_path)
-
-        for k in required_keys:
-            if not k in inp:
-                self.WriteFailureAndRaise('input file is missing a "%s" key' %
-                                          k, output_path)
-
-        return inp
-
-    def WriteFailureAndRaise(self, msg, output_path):
-        if output_path:
-            self.WriteJSON({'error': msg}, output_path, force_verbose=True)
-        raise MBErr(msg)
-
-    def WriteJSON(self, obj, path, force_verbose=False):
-        try:
-            self.WriteFile(path,
-                           json.dumps(obj, indent=2, sort_keys=True) + '\n',
-                           force_verbose=force_verbose)
-        except Exception as e:
-            raise MBErr('Error %s writing to the output path "%s"' %
-                        (e, path))
-
-    def PrintCmd(self, cmd, env):
+      elif isolate_map[target]['type'] == 'gpu_browser_test':
         if self.platform == 'win32':
-            env_prefix = 'set '
-            env_quoter = QuoteForSet
-            shell_quoter = QuoteForCmd
+          runtime_deps_targets = ['browser_tests.exe.runtime_deps']
         else:
-            env_prefix = ''
-            env_quoter = pipes.quote
-            shell_quoter = pipes.quote
+          runtime_deps_targets = ['browser_tests.runtime_deps']
+      elif isolate_map[target]['type'] == 'script':
+        label = isolate_map[target]['label'].split(':')[1]
+        runtime_deps_targets = ['%s.runtime_deps' % label]
+        if self.platform == 'win32':
+          runtime_deps_targets += [label + '.exe.runtime_deps']
+        else:
+          runtime_deps_targets += [label + '.runtime_deps']
+      elif self.platform == 'win32':
+        runtime_deps_targets = [target + '.exe.runtime_deps']
+      else:
+        runtime_deps_targets = [target + '.runtime_deps']
 
-        def print_env(var):
-            if env and var in env:
-                self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var])))
+      for r in runtime_deps_targets:
+        runtime_deps_path = self.ToAbsPath(build_dir, r)
+        if self.Exists(runtime_deps_path):
+          break
+      else:
+        raise MBErr('did not generate any of %s' %
+                    ', '.join(runtime_deps_targets))
 
-        print_env('LLVM_FORCE_HEAD_REVISION')
+      command, extra_files = self.GetSwarmingCommand(target, vals)
 
-        if cmd[0] == self.executable:
-            cmd = ['python'] + cmd[1:]
-        self.Print(*[shell_quoter(arg) for arg in cmd])
+      runtime_deps = self.ReadFile(runtime_deps_path).splitlines()
 
-    def PrintJSON(self, obj):
-        self.Print(json.dumps(obj, indent=2, sort_keys=True))
+      self.WriteIsolateFiles(build_dir, command, target, runtime_deps,
+                             extra_files)
 
-    def Build(self, target):
-        build_dir = self.ToSrcRelPath(self.args.path[0])
-        ninja_cmd = ['ninja', '-C', build_dir]
-        if self.args.jobs:
-            ninja_cmd.extend(['-j', '%d' % self.args.jobs])
-        ninja_cmd.append(target)
-        ret, _, _ = self.Run(
-            ninja_cmd, force_verbose=False, buffer_output=False)
+    return 0
+
+  def RunGNIsolate(self, vals):
+    target = self.args.target[0]
+    isolate_map = self.ReadIsolateMap()
+    err, labels = self.MapTargetsToLabels(isolate_map, [target])
+    if err:
+      raise MBErr(err)
+    label = labels[0]
+
+    build_dir = self.args.path[0]
+    command, extra_files = self.GetSwarmingCommand(target, vals)
+
+    cmd = self.GNCmd('desc', build_dir, label, 'runtime_deps')
+    ret, out, _ = self.Call(cmd)
+    if ret:
+      if out:
+        self.Print(out)
+      return ret
+
+    runtime_deps = out.splitlines()
+
+    self.WriteIsolateFiles(build_dir, command, target, runtime_deps,
+                           extra_files)
+
+    ret, _, _ = self.Run([
+        self.PathJoin(self.src_dir, 'tools', 'luci-go', self.isolate_exe),
+        'check', '-i',
+        self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target))
+    ],
+                         buffer_output=False)
+
+    return ret
+
+  def WriteIsolateFiles(self, build_dir, command, target, runtime_deps,
+                        extra_files):
+    isolate_path = self.ToAbsPath(build_dir, target + '.isolate')
+    self.WriteFile(
+        isolate_path,
+        pprint.pformat({
+            'variables': {
+                'command': command,
+                'files': sorted(runtime_deps + extra_files),
+            }
+        }) + '\n')
+
+    self.WriteJSON(
+        {
+            'args': [
+                '--isolate',
+                self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
+            ],
+            'dir':
+            self.src_dir,
+            'version':
+            1,
+        },
+        isolate_path + 'd.gen.json',
+    )
+
+  @staticmethod
+  def MapTargetsToLabels(isolate_map, targets):
+    labels = []
+    err = ''
+
+    def StripTestSuffixes(target):
+      for suffix in ('_apk_run', '_apk', '_run'):
+        if target.endswith(suffix):
+          return target[:-len(suffix)], suffix
+      return None, None
+
+    for target in targets:
+      if target == 'all':
+        labels.append(target)
+      elif target.startswith('//'):
+        labels.append(target)
+      else:
+        if target in isolate_map:
+          stripped_target, suffix = target, ''
+        else:
+          stripped_target, suffix = StripTestSuffixes(target)
+        if stripped_target in isolate_map:
+          if isolate_map[stripped_target]['type'] == 'unknown':
+            err += ('test target "%s" type is unknown\n' % target)
+          else:
+            labels.append(isolate_map[stripped_target]['label'] + suffix)
+        else:
+          err += ('target "%s" not found in '
+                  '//testing/buildbot/gn_isolate_map.pyl\n' % target)
+
+    return err, labels
+
+  def GNCmd(self, subcommand, path, *args):
+    if self.platform.startswith('linux'):
+      subdir, exe = 'linux64', 'gn'
+    elif self.platform == 'darwin':
+      subdir, exe = 'mac', 'gn'
+    else:
+      subdir, exe = 'win', 'gn.exe'
+
+    gn_path = self.PathJoin(self.src_dir, 'buildtools', subdir, exe)
+    return [gn_path, subcommand, path] + list(args)
+
+  def GNArgs(self, vals):
+    if vals['cros_passthrough']:
+      if not 'GN_ARGS' in os.environ:
+        raise MBErr('MB is expecting GN_ARGS to be in the environment')
+      gn_args = os.environ['GN_ARGS']
+      if not re.search('target_os.*=.*"chromeos"', gn_args):
+        raise MBErr('GN_ARGS is missing target_os = "chromeos": '
+                    '(GN_ARGS=%s)' % gn_args)
+    else:
+      gn_args = vals['gn_args']
+
+    if self.args.goma_dir:
+      gn_args += ' goma_dir="%s"' % self.args.goma_dir
+
+    android_version_code = self.args.android_version_code
+    if android_version_code:
+      gn_args += (' android_default_version_code="%s"' % android_version_code)
+
+    android_version_name = self.args.android_version_name
+    if android_version_name:
+      gn_args += (' android_default_version_name="%s"' % android_version_name)
+
+    # Canonicalize the arg string into a sorted, newline-separated list
+    # of key-value pairs, and de-dup the keys if need be so that only
+    # the last instance of each arg is listed.
+    gn_args = gn_helpers.ToGNString(gn_helpers.FromGNArgs(gn_args))
+
+    args_file = vals.get('args_file', None)
+    if args_file:
+      gn_args = ('import("%s")\n' % vals['args_file']) + gn_args
+    return gn_args
+
+  def GetSwarmingCommand(self, target, vals):
+    isolate_map = self.ReadIsolateMap()
+    test_type = isolate_map[target]['type']
+
+    is_android = 'target_os="android"' in vals['gn_args']
+    is_linux = self.platform.startswith('linux') and not is_android
+
+    if test_type == 'nontest':
+      self.WriteFailureAndRaise('We should not be isolating %s.' % target,
+                                output_path=None)
+    if test_type not in ('console_test_launcher', 'windowed_test_launcher',
+                         'non_parallel_console_test_launcher', 'raw',
+                         'additional_compile_target', 'junit_test', 'script'):
+      self.WriteFailureAndRaise('No command line for '
+                                '%s found (test type %s).' %
+                                (target, test_type),
+                                output_path=None)
+
+    cmdline = []
+    extra_files = [
+        '../../.vpython',
+        '../../testing/test_env.py',
+    ]
+    vpython_exe = 'vpython'
+
+    must_retry = False
+    if test_type == 'script':
+      cmdline += [
+          vpython_exe,
+          '../../' + self.ToSrcRelPath(isolate_map[target]['script'])
+      ]
+    elif is_android:
+      cmdline += [
+          vpython_exe, '../../build/android/test_wrapper/logdog_wrapper.py',
+          '--target', target, '--logdog-bin-cmd', '../../bin/logdog_butler',
+          '--logcat-output-file', '${ISOLATED_OUTDIR}/logcats',
+          '--store-tombstones'
+      ]
+    else:
+      if test_type == 'raw':
+        cmdline += [vpython_exe, '../../tools_webrtc/flags_compatibility.py']
+        extra_files.append('../../tools_webrtc/flags_compatibility.py')
+
+      if isolate_map[target].get('use_webcam', False):
+        cmdline += [
+            vpython_exe, '../../tools_webrtc/ensure_webcam_is_running.py'
+        ]
+        extra_files.append('../../tools_webrtc/ensure_webcam_is_running.py')
+
+      # is_linux uses use_ozone and x11 by default.
+      use_x11 = is_linux
+
+      xvfb = use_x11 and test_type == 'windowed_test_launcher'
+      if xvfb:
+        cmdline += [vpython_exe, '../../testing/xvfb.py']
+        extra_files.append('../../testing/xvfb.py')
+      else:
+        cmdline += [vpython_exe, '../../testing/test_env.py']
+
+      if test_type != 'raw':
+        extra_files += [
+            '../../third_party/gtest-parallel/gtest-parallel',
+            '../../third_party/gtest-parallel/gtest_parallel.py',
+            '../../tools_webrtc/gtest-parallel-wrapper.py',
+        ]
+        sep = '\\' if self.platform == 'win32' else '/'
+        output_dir = '${ISOLATED_OUTDIR}' + sep + 'test_logs'
+        test_results = '${ISOLATED_OUTDIR}' + sep + 'gtest_output.json'
+        timeout = isolate_map[target].get('timeout', 900)
+        cmdline += [
+            '../../tools_webrtc/gtest-parallel-wrapper.py',
+            '--output_dir=%s' % output_dir,
+            '--dump_json_test_results=%s' % test_results,
+            '--gtest_color=no',
+            # We tell gtest-parallel to interrupt the test after 900
+            # seconds, so it can exit cleanly and report results,
+            # instead of being interrupted by swarming and not
+            # reporting anything.
+            '--timeout=%s' % timeout,
+        ]
+        if test_type == 'non_parallel_console_test_launcher':
+          # Still use the gtest-parallel-wrapper.py script since we
+          # need it to run tests on swarming, but don't execute tests
+          # in parallel.
+          cmdline.append('--workers=1')
+        must_retry = True
+
+      asan = 'is_asan=true' in vals['gn_args']
+      lsan = 'is_lsan=true' in vals['gn_args']
+      msan = 'is_msan=true' in vals['gn_args']
+      tsan = 'is_tsan=true' in vals['gn_args']
+      sanitizer = asan or lsan or msan or tsan
+      if must_retry and not sanitizer:
+        # Retry would hide most sanitizers detections.
+        cmdline.append('--retry_failed=3')
+
+      executable_prefix = '.\\' if self.platform == 'win32' else './'
+      executable_suffix = '.exe' if self.platform == 'win32' else ''
+      executable = executable_prefix + target + executable_suffix
+
+      cmdline.append(executable)
+
+      cmdline.extend([
+          '--asan=%d' % asan,
+          '--lsan=%d' % lsan,
+          '--msan=%d' % msan,
+          '--tsan=%d' % tsan,
+      ])
+
+    cmdline += isolate_map[target].get('args', [])
+
+    return cmdline, extra_files
+
+  def ToAbsPath(self, build_path, *comps):
+    return self.PathJoin(self.src_dir, self.ToSrcRelPath(build_path), *comps)
+
+  def ToSrcRelPath(self, path):
+    """Returns a relative path from the top of the repo."""
+    if path.startswith('//'):
+      return path[2:].replace('/', self.sep)
+    return self.RelPath(path, self.src_dir)
+
+  def RunGNAnalyze(self, vals):
+    # Analyze runs before 'gn gen' now, so we need to run gn gen
+    # in order to ensure that we have a build directory.
+    ret = self.RunGNGen(vals)
+    if ret:
+      return ret
+
+    build_path = self.args.path[0]
+    input_path = self.args.input_path[0]
+    gn_input_path = input_path + '.gn'
+    output_path = self.args.output_path[0]
+    gn_output_path = output_path + '.gn'
+
+    inp = self.ReadInputJSON(
+        ['files', 'test_targets', 'additional_compile_targets'])
+    if self.args.verbose:
+      self.Print()
+      self.Print('analyze input:')
+      self.PrintJSON(inp)
+      self.Print()
+
+    # This shouldn't normally happen, but could due to unusual race
+    # conditions, like a try job that gets scheduled before a patch
+    # lands but runs after the patch has landed.
+    if not inp['files']:
+      self.Print('Warning: No files modified in patch, bailing out early.')
+      self.WriteJSON(
+          {
+              'status': 'No dependency',
+              'compile_targets': [],
+              'test_targets': [],
+          }, output_path)
+      return 0
+
+    gn_inp = {}
+    gn_inp['files'] = ['//' + f for f in inp['files'] if not f.startswith('//')]
+
+    isolate_map = self.ReadIsolateMap()
+    err, gn_inp['additional_compile_targets'] = self.MapTargetsToLabels(
+        isolate_map, inp['additional_compile_targets'])
+    if err:
+      raise MBErr(err)
+
+    err, gn_inp['test_targets'] = self.MapTargetsToLabels(
+        isolate_map, inp['test_targets'])
+    if err:
+      raise MBErr(err)
+    labels_to_targets = {}
+    for i, label in enumerate(gn_inp['test_targets']):
+      labels_to_targets[label] = inp['test_targets'][i]
+
+    try:
+      self.WriteJSON(gn_inp, gn_input_path)
+      cmd = self.GNCmd('analyze', build_path, gn_input_path, gn_output_path)
+      ret, output, _ = self.Run(cmd, force_verbose=True)
+      if ret:
+        if self.args.json_output:
+          # write errors to json.output
+          self.WriteJSON({'output': output}, self.args.json_output)
         return ret
 
-    def Run(self, cmd, env=None, force_verbose=True, buffer_output=True):
-        # This function largely exists so it can be overridden for testing.
-        if self.args.dryrun or self.args.verbose or force_verbose:
-            self.PrintCmd(cmd, env)
-        if self.args.dryrun:
-            return 0, '', ''
+      gn_outp_str = self.ReadFile(gn_output_path)
+      try:
+        gn_outp = json.loads(gn_outp_str)
+      except Exception as e:
+        self.Print("Failed to parse the JSON string GN "
+                   "returned: %s\n%s" % (repr(gn_outp_str), str(e)))
+        raise
 
-        ret, out, err = self.Call(cmd, env=env, buffer_output=buffer_output)
-        if self.args.verbose or force_verbose:
-            if ret:
-                self.Print('  -> returned %d' % ret)
-            if out:
-                self.Print(out, end='')
-            if err:
-                self.Print(err, end='', file=sys.stderr)
-        return ret, out, err
-
-    def Call(self, cmd, env=None, buffer_output=True):
-        if buffer_output:
-            p = subprocess.Popen(cmd, shell=False, cwd=self.src_dir,
-                                 stdout=subprocess.PIPE,
-                                 stderr=subprocess.PIPE,
-                                 env=env)
-            out, err = p.communicate()
+      outp = {}
+      if 'status' in gn_outp:
+        outp['status'] = gn_outp['status']
+      if 'error' in gn_outp:
+        outp['error'] = gn_outp['error']
+      if 'invalid_targets' in gn_outp:
+        outp['invalid_targets'] = gn_outp['invalid_targets']
+      if 'compile_targets' in gn_outp:
+        if 'all' in gn_outp['compile_targets']:
+          outp['compile_targets'] = ['all']
         else:
-            p = subprocess.Popen(cmd, shell=False, cwd=self.src_dir,
-                                 env=env)
-            p.wait()
-            out = err = ''
-        return p.returncode, out, err
+          outp['compile_targets'] = [
+              label.replace('//', '') for label in gn_outp['compile_targets']
+          ]
+      if 'test_targets' in gn_outp:
+        outp['test_targets'] = [
+            labels_to_targets[label] for label in gn_outp['test_targets']
+        ]
 
-    def ExpandUser(self, path):
-        # This function largely exists so it can be overridden for testing.
-        return os.path.expanduser(path)
+      if self.args.verbose:
+        self.Print()
+        self.Print('analyze output:')
+        self.PrintJSON(outp)
+        self.Print()
 
-    def Exists(self, path):
-        # This function largely exists so it can be overridden for testing.
-        return os.path.exists(path)
+      self.WriteJSON(outp, output_path)
 
-    def Fetch(self, url):
-        # This function largely exists so it can be overridden for testing.
-        f = urlopen(url)
-        contents = f.read()
-        f.close()
-        return contents
+    finally:
+      if self.Exists(gn_input_path):
+        self.RemoveFile(gn_input_path)
+      if self.Exists(gn_output_path):
+        self.RemoveFile(gn_output_path)
 
-    def MaybeMakeDirectory(self, path):
-        try:
-            os.makedirs(path)
-        except OSError as e:
-            if e.errno != errno.EEXIST:
-                raise
+    return 0
 
-    def PathJoin(self, *comps):
-        # This function largely exists so it can be overriden for testing.
-        return os.path.join(*comps)
+  def ReadInputJSON(self, required_keys):
+    path = self.args.input_path[0]
+    output_path = self.args.output_path[0]
+    if not self.Exists(path):
+      self.WriteFailureAndRaise('"%s" does not exist' % path, output_path)
 
-    def Print(self, *args, **kwargs):
-        # This function largely exists so it can be overridden for testing.
-        print(*args, **kwargs)
-        if kwargs.get('stream', sys.stdout) == sys.stdout:
-            sys.stdout.flush()
+    try:
+      inp = json.loads(self.ReadFile(path))
+    except Exception as e:
+      self.WriteFailureAndRaise(
+          'Failed to read JSON input from "%s": %s' % (path, e), output_path)
 
-    def ReadFile(self, path):
-        # This function largely exists so it can be overriden for testing.
-        with open(path) as fp:
-            return fp.read()
+    for k in required_keys:
+      if not k in inp:
+        self.WriteFailureAndRaise('input file is missing a "%s" key' % k,
+                                  output_path)
 
-    def RelPath(self, path, start='.'):
-        # This function largely exists so it can be overriden for testing.
-        return os.path.relpath(path, start)
+    return inp
 
-    def RemoveFile(self, path):
-        # This function largely exists so it can be overriden for testing.
-        os.remove(path)
+  def WriteFailureAndRaise(self, msg, output_path):
+    if output_path:
+      self.WriteJSON({'error': msg}, output_path, force_verbose=True)
+    raise MBErr(msg)
 
-    def RemoveDirectory(self, abs_path):
-        if self.platform == 'win32':
-            # In other places in chromium, we often have to retry this command
-            # because we're worried about other processes still holding on to
-            # file handles, but when MB is invoked, it will be early enough in
-            # the build that their should be no other processes to interfere.
-            # We can change this if need be.
-            self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path])
-        else:
-            shutil.rmtree(abs_path, ignore_errors=True)
+  def WriteJSON(self, obj, path, force_verbose=False):
+    try:
+      self.WriteFile(path,
+                     json.dumps(obj, indent=2, sort_keys=True) + '\n',
+                     force_verbose=force_verbose)
+    except Exception as e:
+      raise MBErr('Error %s writing to the output path "%s"' % (e, path))
 
-    def TempDir(self):
-        # This function largely exists so it can be overriden for testing.
-        return tempfile.mkdtemp(prefix='mb_')
+  def PrintCmd(self, cmd, env):
+    if self.platform == 'win32':
+      env_prefix = 'set '
+      env_quoter = QuoteForSet
+      shell_quoter = QuoteForCmd
+    else:
+      env_prefix = ''
+      env_quoter = pipes.quote
+      shell_quoter = pipes.quote
 
-    def TempFile(self, mode='w'):
-        # This function largely exists so it can be overriden for testing.
-        return tempfile.NamedTemporaryFile(mode=mode, delete=False)
+    var = 'LLVM_FORCE_HEAD_REVISION'
+    if env and var in env:
+      self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var])))
 
-    def WriteFile(self, path, contents, force_verbose=False):
-        # This function largely exists so it can be overriden for testing.
-        if self.args.dryrun or self.args.verbose or force_verbose:
-            self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
-        with open(path, 'w') as fp:
-            return fp.write(contents)
+    if cmd[0] == self.executable:
+      cmd = ['python'] + cmd[1:]
+    self.Print(*[shell_quoter(arg) for arg in cmd])
+
+  def PrintJSON(self, obj):
+    self.Print(json.dumps(obj, indent=2, sort_keys=True))
+
+  def Build(self, target):
+    build_dir = self.ToSrcRelPath(self.args.path[0])
+    ninja_cmd = ['ninja', '-C', build_dir]
+    if self.args.jobs:
+      ninja_cmd.extend(['-j', '%d' % self.args.jobs])
+    ninja_cmd.append(target)
+    ret, _, _ = self.Run(ninja_cmd, force_verbose=False, buffer_output=False)
+    return ret
+
+  def Run(self, cmd, env=None, force_verbose=True, buffer_output=True):
+    # This function largely exists so it can be overridden for testing.
+    if self.args.dryrun or self.args.verbose or force_verbose:
+      self.PrintCmd(cmd, env)
+    if self.args.dryrun:
+      return 0, '', ''
+
+    ret, out, err = self.Call(cmd, env=env, buffer_output=buffer_output)
+    if self.args.verbose or force_verbose:
+      if ret:
+        self.Print('  -> returned %d' % ret)
+      if out:
+        self.Print(out, end='')
+      if err:
+        self.Print(err, end='', file=sys.stderr)
+    return ret, out, err
+
+  def Call(self, cmd, env=None, buffer_output=True):
+    if buffer_output:
+      p = subprocess.Popen(cmd,
+                           shell=False,
+                           cwd=self.src_dir,
+                           stdout=subprocess.PIPE,
+                           stderr=subprocess.PIPE,
+                           env=env)
+      out, err = p.communicate()
+    else:
+      p = subprocess.Popen(cmd, shell=False, cwd=self.src_dir, env=env)
+      p.wait()
+      out = err = ''
+    return p.returncode, out, err
+
+  @staticmethod
+  def ExpandUser(path):
+    # This function largely exists so it can be overridden for testing.
+    return os.path.expanduser(path)
+
+  @staticmethod
+  def Exists(path):
+    # This function largely exists so it can be overridden for testing.
+    return os.path.exists(path)
+
+  @staticmethod
+  def Fetch(url):
+    # This function largely exists so it can be overridden for testing.
+    f = urlopen(url)
+    contents = f.read()
+    f.close()
+    return contents
+
+  @staticmethod
+  def MaybeMakeDirectory(path):
+    try:
+      os.makedirs(path)
+    except OSError as e:
+      if e.errno != errno.EEXIST:
+        raise
+
+  @staticmethod
+  def PathJoin(*comps):
+    # This function largely exists so it can be overriden for testing.
+    return os.path.join(*comps)
+
+  @staticmethod
+  def Print(*args, **kwargs):
+    # This function largely exists so it can be overridden for testing.
+    print(*args, **kwargs)
+    if kwargs.get('stream', sys.stdout) == sys.stdout:
+      sys.stdout.flush()
+
+  @staticmethod
+  def ReadFile(path):
+    # This function largely exists so it can be overriden for testing.
+    with open(path) as fp:
+      return fp.read()
+
+  @staticmethod
+  def RelPath(path, start='.'):
+    # This function largely exists so it can be overriden for testing.
+    return os.path.relpath(path, start)
+
+  @staticmethod
+  def RemoveFile(path):
+    # This function largely exists so it can be overriden for testing.
+    os.remove(path)
+
+  def RemoveDirectory(self, abs_path):
+    if self.platform == 'win32':
+      # In other places in chromium, we often have to retry this command
+      # because we're worried about other processes still holding on to
+      # file handles, but when MB is invoked, it will be early enough in
+      # the build that their should be no other processes to interfere.
+      # We can change this if need be.
+      self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path])
+    else:
+      shutil.rmtree(abs_path, ignore_errors=True)
+
+  @staticmethod
+  def TempDir():
+    # This function largely exists so it can be overriden for testing.
+    return tempfile.mkdtemp(prefix='mb_')
+
+  @staticmethod
+  def TempFile(mode='w'):
+    # This function largely exists so it can be overriden for testing.
+    return tempfile.NamedTemporaryFile(mode=mode, delete=False)
+
+  def WriteFile(self, path, contents, force_verbose=False):
+    # This function largely exists so it can be overriden for testing.
+    if self.args.dryrun or self.args.verbose or force_verbose:
+      self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
+    with open(path, 'w') as fp:
+      return fp.write(contents)
 
 
 class MBErr(Exception):
-    pass
+  pass
 
 
 # See http://goo.gl/l5NPDW and http://goo.gl/4Diozm for the painful
@@ -1299,24 +1315,24 @@
 
 
 def QuoteForSet(arg):
-    if any(a in UNSAFE_FOR_SET for a in arg):
-        arg = ''.join('^' + a if a in UNSAFE_FOR_SET else a for a in arg)
-    return arg
+  if any(a in UNSAFE_FOR_SET for a in arg):
+    arg = ''.join('^' + a if a in UNSAFE_FOR_SET else a for a in arg)
+  return arg
 
 
 def QuoteForCmd(arg):
-    # First, escape the arg so that CommandLineToArgvW will parse it properly.
-    if arg == '' or ' ' in arg or '"' in arg:
-        quote_re = re.compile(r'(\\*)"')
-        arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
+  # First, escape the arg so that CommandLineToArgvW will parse it properly.
+  if arg == '' or ' ' in arg or '"' in arg:
+    quote_re = re.compile(r'(\\*)"')
+    arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
 
-    # Then check to see if the arg contains any metacharacters other than
-    # double quotes; if it does, quote everything (including the double
-    # quotes) for safety.
-    if any(a in UNSAFE_FOR_CMD for a in arg):
-        arg = ''.join('^' + a if a in ALL_META_CHARS else a for a in arg)
-    return arg
+  # Then check to see if the arg contains any metacharacters other than
+  # double quotes; if it does, quote everything (including the double
+  # quotes) for safety.
+  if any(a in UNSAFE_FOR_CMD for a in arg):
+    arg = ''.join('^' + a if a in ALL_META_CHARS else a for a in arg)
+  return arg
 
 
 if __name__ == '__main__':
-    sys.exit(main(sys.argv[1:]))
+  sys.exit(main(sys.argv[1:]))
diff --git a/tools_webrtc/mb/mb_unittest.py b/tools_webrtc/mb/mb_unittest.py
index a65a55b..b555f49 100755
--- a/tools_webrtc/mb/mb_unittest.py
+++ b/tools_webrtc/mb/mb_unittest.py
@@ -12,9 +12,9 @@
 import ast
 import json
 try:
-  from StringIO import StringIO # for Python2
+  from StringIO import StringIO  # for Python2
 except ImportError:
-  from io import StringIO # for Python3
+  from io import StringIO  # for Python3
 import os
 import re
 import sys
@@ -61,7 +61,7 @@
 
   def Exists(self, path):
     abs_path = self._AbsPath(path)
-    return (self.files.get(abs_path) is not None or abs_path in self.dirs)
+    return self.files.get(abs_path) is not None or abs_path in self.dirs
 
   def MaybeMakeDirectory(self, path):
     abpath = self._AbsPath(path)
@@ -132,7 +132,7 @@
     self.buf += contents
 
   def close(self):
-     self.files[self.name] = self.buf
+    self.files[self.name] = self.buf
 
 
 TEST_CONFIG = """\
@@ -186,8 +186,7 @@
     mbw = FakeMBW(win32=win32)
     mbw.files.setdefault(mbw.default_config, TEST_CONFIG)
     mbw.files.setdefault(
-      mbw.ToAbsPath('//testing/buildbot/gn_isolate_map.pyl'),
-      '''{
+        mbw.ToAbsPath('//testing/buildbot/gn_isolate_map.pyl'), '''{
         "foo_unittests": {
           "label": "//foo:foo_unittests",
           "type": "console_test_launcher",
@@ -202,7 +201,13 @@
         mbw.files[path] = contents
     return mbw
 
-  def check(self, args, mbw=None, files=None, out=None, err=None, ret=None,
+  def check(self,
+            args,
+            mbw=None,
+            files=None,
+            out=None,
+            err=None,
+            ret=None,
             env=None):
     if not mbw:
       mbw = self.fake_mbw(files)
@@ -223,33 +228,43 @@
     return mbw
 
   def test_analyze(self):
-    files = {'/tmp/in.json': '''{\
+    files = {
+        '/tmp/in.json':
+        '''{\
                "files": ["foo/foo_unittest.cc"],
                "test_targets": ["foo_unittests"],
                "additional_compile_targets": ["all"]
              }''',
-             '/tmp/out.json.gn': '''{\
+        '/tmp/out.json.gn':
+        '''{\
                "status": "Found dependency",
                "compile_targets": ["//foo:foo_unittests"],
                "test_targets": ["//foo:foo_unittests"]
-             }'''}
+             }'''
+    }
 
     mbw = self.fake_mbw(files)
     mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
 
-    self.check(['analyze', '-c', 'debug_goma', '//out/Default',
-                '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+    self.check([
+        'analyze', '-c', 'debug_goma', '//out/Default', '/tmp/in.json',
+        '/tmp/out.json'
+    ],
+               mbw=mbw,
+               ret=0)
     out = json.loads(mbw.files['/tmp/out.json'])
-    self.assertEqual(out, {
-      'status': 'Found dependency',
-      'compile_targets': ['foo:foo_unittests'],
-      'test_targets': ['foo_unittests']
-    })
+    self.assertEqual(
+        out, {
+            'status': 'Found dependency',
+            'compile_targets': ['foo:foo_unittests'],
+            'test_targets': ['foo_unittests']
+        })
 
   def test_gen(self):
     mbw = self.fake_mbw()
     self.check(['gen', '-c', 'debug_goma', '//out/Default', '-g', '/goma'],
-               mbw=mbw, ret=0)
+               mbw=mbw,
+               ret=0)
     self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'],
                               ('goma_dir = "/goma"\n'
                                'is_debug = true\n'
@@ -262,23 +277,25 @@
 
     mbw = self.fake_mbw(win32=True)
     self.check(['gen', '-c', 'debug_goma', '-g', 'c:\\goma', '//out/Debug'],
-               mbw=mbw, ret=0)
+               mbw=mbw,
+               ret=0)
     self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'],
                               ('goma_dir = "c:\\\\goma"\n'
                                'is_debug = true\n'
                                'use_goma = true\n'))
-    self.assertIn('c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug '
-                  '--check\n', mbw.out)
+    self.assertIn(
+        'c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug '
+        '--check\n', mbw.out)
 
     mbw = self.fake_mbw()
-    self.check(['gen', '-m', 'fake_group', '-b', 'fake_args_bot',
-                '//out/Debug'],
-               mbw=mbw, ret=0)
+    self.check(
+        ['gen', '-m', 'fake_group', '-b', 'fake_args_bot', '//out/Debug'],
+        mbw=mbw,
+        ret=0)
     self.assertEqual(
         mbw.files['/fake_src/out/Debug/args.gn'],
         'import("//build/args/bots/fake_group/fake_args_bot.gn")\n\n')
 
-
   def test_gen_fails(self):
     mbw = self.fake_mbw()
     mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
@@ -286,46 +303,47 @@
 
   def test_gen_swarming(self):
     files = {
-      '/tmp/swarming_targets': 'base_unittests\n',
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'raw',"
-          "  'args': [],"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-      ),
+        '/tmp/swarming_targets':
+        'base_unittests\n',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'raw',"
+         "  'args': [],"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"),
     }
     mbw = self.fake_mbw(files)
-    self.check(['gen',
-                '-c', 'debug_goma',
-                '--swarming-targets-file', '/tmp/swarming_targets',
-                '//out/Default'], mbw=mbw, ret=0)
-    self.assertIn('/fake_src/out/Default/base_unittests.isolate',
-                  mbw.files)
+    self.check([
+        'gen', '-c', 'debug_goma', '--swarming-targets-file',
+        '/tmp/swarming_targets', '//out/Default'
+    ],
+               mbw=mbw,
+               ret=0)
+    self.assertIn('/fake_src/out/Default/base_unittests.isolate', mbw.files)
     self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json',
                   mbw.files)
 
   def test_gen_swarming_android(self):
     test_files = {
-      '/tmp/swarming_targets': 'base_unittests\n',
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'additional_compile_target',"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-      ),
+        '/tmp/swarming_targets':
+        'base_unittests\n',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'additional_compile_target',"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"),
     }
-    mbw = self.check(['gen', '-c', 'android_bot', '//out/Default',
-                      '--swarming-targets-file', '/tmp/swarming_targets',
-                      '--isolate-map-file',
-                      '/fake_src/testing/buildbot/gn_isolate_map.pyl'],
-                     files=test_files, ret=0)
+    mbw = self.check([
+        'gen', '-c', 'android_bot', '//out/Default', '--swarming-targets-file',
+        '/tmp/swarming_targets', '--isolate-map-file',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl'
+    ],
+                     files=test_files,
+                     ret=0)
 
     isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
     isolate_file_contents = ast.literal_eval(isolate_file)
@@ -337,30 +355,34 @@
     self.assertEqual(command, [
         'vpython',
         '../../build/android/test_wrapper/logdog_wrapper.py',
-        '--target', 'base_unittests',
-        '--logdog-bin-cmd', '../../bin/logdog_butler',
-        '--logcat-output-file', '${ISOLATED_OUTDIR}/logcats',
+        '--target',
+        'base_unittests',
+        '--logdog-bin-cmd',
+        '../../bin/logdog_butler',
+        '--logcat-output-file',
+        '${ISOLATED_OUTDIR}/logcats',
         '--store-tombstones',
     ])
 
   def test_gen_swarming_android_junit_test(self):
     test_files = {
-      '/tmp/swarming_targets': 'base_unittests\n',
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'junit_test',"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-      ),
+        '/tmp/swarming_targets':
+        'base_unittests\n',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'junit_test',"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"),
     }
-    mbw = self.check(['gen', '-c', 'android_bot', '//out/Default',
-                      '--swarming-targets-file', '/tmp/swarming_targets',
-                      '--isolate-map-file',
-                      '/fake_src/testing/buildbot/gn_isolate_map.pyl'],
-                     files=test_files, ret=0)
+    mbw = self.check([
+        'gen', '-c', 'android_bot', '//out/Default', '--swarming-targets-file',
+        '/tmp/swarming_targets', '--isolate-map-file',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl'
+    ],
+                     files=test_files,
+                     ret=0)
 
     isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
     isolate_file_contents = ast.literal_eval(isolate_file)
@@ -372,31 +394,35 @@
     self.assertEqual(command, [
         'vpython',
         '../../build/android/test_wrapper/logdog_wrapper.py',
-        '--target', 'base_unittests',
-        '--logdog-bin-cmd', '../../bin/logdog_butler',
-        '--logcat-output-file', '${ISOLATED_OUTDIR}/logcats',
+        '--target',
+        'base_unittests',
+        '--logdog-bin-cmd',
+        '../../bin/logdog_butler',
+        '--logcat-output-file',
+        '${ISOLATED_OUTDIR}/logcats',
         '--store-tombstones',
     ])
 
   def test_gen_timeout(self):
     test_files = {
-      '/tmp/swarming_targets': 'base_unittests\n',
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'non_parallel_console_test_launcher',"
-          "  'timeout': 500,"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-      ),
+        '/tmp/swarming_targets':
+        'base_unittests\n',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'non_parallel_console_test_launcher',"
+         "  'timeout': 500,"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"),
     }
-    mbw = self.check(['gen', '-c', 'debug_goma', '//out/Default',
-                      '--swarming-targets-file', '/tmp/swarming_targets',
-                      '--isolate-map-file',
-                      '/fake_src/testing/buildbot/gn_isolate_map.pyl'],
-                     files=test_files, ret=0)
+    mbw = self.check([
+        'gen', '-c', 'debug_goma', '//out/Default', '--swarming-targets-file',
+        '/tmp/swarming_targets', '--isolate-map-file',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl'
+    ],
+                     files=test_files,
+                     ret=0)
 
     isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
     isolate_file_contents = ast.literal_eval(isolate_file)
@@ -430,24 +456,25 @@
 
   def test_gen_script(self):
     test_files = {
-      '/tmp/swarming_targets': 'base_unittests_script\n',
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests_script': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'script',"
-          "  'script': '//base/base_unittests_script.py',"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-          "base_unittests_script.py\n"
-      ),
+        '/tmp/swarming_targets':
+        'base_unittests_script\n',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests_script': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'script',"
+         "  'script': '//base/base_unittests_script.py',"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"
+         "base_unittests_script.py\n"),
     }
-    mbw = self.check(['gen', '-c', 'debug_goma', '//out/Default',
-                      '--swarming-targets-file', '/tmp/swarming_targets',
-                      '--isolate-map-file',
-                      '/fake_src/testing/buildbot/gn_isolate_map.pyl'],
-                     files=test_files, ret=0)
+    mbw = self.check([
+        'gen', '-c', 'debug_goma', '//out/Default', '--swarming-targets-file',
+        '/tmp/swarming_targets', '--isolate-map-file',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl'
+    ],
+                     files=test_files,
+                     ret=0)
 
     isolate_file = (
         mbw.files['/fake_src/out/Default/base_unittests_script.isolate'])
@@ -466,22 +493,23 @@
 
   def test_gen_raw(self):
     test_files = {
-      '/tmp/swarming_targets': 'base_unittests\n',
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'raw',"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-      ),
+        '/tmp/swarming_targets':
+        'base_unittests\n',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'raw',"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"),
     }
-    mbw = self.check(['gen', '-c', 'debug_goma', '//out/Default',
-                      '--swarming-targets-file', '/tmp/swarming_targets',
-                      '--isolate-map-file',
-                      '/fake_src/testing/buildbot/gn_isolate_map.pyl'],
-                     files=test_files, ret=0)
+    mbw = self.check([
+        'gen', '-c', 'debug_goma', '//out/Default', '--swarming-targets-file',
+        '/tmp/swarming_targets', '--isolate-map-file',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl'
+    ],
+                     files=test_files,
+                     ret=0)
 
     isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
     isolate_file_contents = ast.literal_eval(isolate_file)
@@ -508,22 +536,23 @@
 
   def test_gen_non_parallel_console_test_launcher(self):
     test_files = {
-      '/tmp/swarming_targets': 'base_unittests\n',
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'non_parallel_console_test_launcher',"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-      ),
+        '/tmp/swarming_targets':
+        'base_unittests\n',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'non_parallel_console_test_launcher',"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"),
     }
-    mbw = self.check(['gen', '-c', 'debug_goma', '//out/Default',
-                      '--swarming-targets-file', '/tmp/swarming_targets',
-                      '--isolate-map-file',
-                      '/fake_src/testing/buildbot/gn_isolate_map.pyl'],
-                     files=test_files, ret=0)
+    mbw = self.check([
+        'gen', '-c', 'debug_goma', '//out/Default', '--swarming-targets-file',
+        '/tmp/swarming_targets', '--isolate-map-file',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl'
+    ],
+                     files=test_files,
+                     ret=0)
 
     isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
     isolate_file_contents = ast.literal_eval(isolate_file)
@@ -557,23 +586,24 @@
 
   def test_isolate_windowed_test_launcher_linux(self):
     test_files = {
-      '/tmp/swarming_targets': 'base_unittests\n',
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'windowed_test_launcher',"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-          "some_resource_file\n"
-      ),
+        '/tmp/swarming_targets':
+        'base_unittests\n',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'windowed_test_launcher',"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"
+         "some_resource_file\n"),
     }
-    mbw = self.check(['gen', '-c', 'debug_goma', '//out/Default',
-                      '--swarming-targets-file', '/tmp/swarming_targets',
-                      '--isolate-map-file',
-                      '/fake_src/testing/buildbot/gn_isolate_map.pyl'],
-                     files=test_files, ret=0)
+    mbw = self.check([
+        'gen', '-c', 'debug_goma', '//out/Default', '--swarming-targets-file',
+        '/tmp/swarming_targets', '--isolate-map-file',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl'
+    ],
+                     files=test_files,
+                     ret=0)
 
     isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
     isolate_file_contents = ast.literal_eval(isolate_file)
@@ -608,26 +638,26 @@
 
   def test_gen_windowed_test_launcher_win(self):
     files = {
-      'c:\\fake_src\\out\\Default\\tmp\\swarming_targets': 'unittests\n',
-      'c:\\fake_src\\testing\\buildbot\\gn_isolate_map.pyl': (
-          "{'unittests': {"
-          "  'label': '//somewhere:unittests',"
-          "  'type': 'windowed_test_launcher',"
-          "}}\n"
-      ),
-      r'c:\fake_src\out\Default\unittests.exe.runtime_deps': (
-          "unittests.exe\n"
-          "some_dependency\n"
-      ),
+        'c:\\fake_src\\out\\Default\\tmp\\swarming_targets':
+        'unittests\n',
+        'c:\\fake_src\\testing\\buildbot\\gn_isolate_map.pyl':
+        ("{'unittests': {"
+         "  'label': '//somewhere:unittests',"
+         "  'type': 'windowed_test_launcher',"
+         "}}\n"),
+        r'c:\fake_src\out\Default\unittests.exe.runtime_deps':
+        ("unittests.exe\n"
+         "some_dependency\n"),
     }
     mbw = self.fake_mbw(files=files, win32=True)
-    self.check(['gen',
-                '-c', 'debug_goma',
-                '--swarming-targets-file',
-                'c:\\fake_src\\out\\Default\\tmp\\swarming_targets',
-                '--isolate-map-file',
-                'c:\\fake_src\\testing\\buildbot\\gn_isolate_map.pyl',
-                '//out/Default'], mbw=mbw, ret=0)
+    self.check([
+        'gen', '-c', 'debug_goma', '--swarming-targets-file',
+        'c:\\fake_src\\out\\Default\\tmp\\swarming_targets',
+        '--isolate-map-file',
+        'c:\\fake_src\\testing\\buildbot\\gn_isolate_map.pyl', '//out/Default'
+    ],
+               mbw=mbw,
+               ret=0)
 
     isolate_file = mbw.files['c:\\fake_src\\out\\Default\\unittests.isolate']
     isolate_file_contents = ast.literal_eval(isolate_file)
@@ -661,22 +691,23 @@
 
   def test_gen_console_test_launcher(self):
     test_files = {
-      '/tmp/swarming_targets': 'base_unittests\n',
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'console_test_launcher',"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-      ),
+        '/tmp/swarming_targets':
+        'base_unittests\n',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'console_test_launcher',"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"),
     }
-    mbw = self.check(['gen', '-c', 'debug_goma', '//out/Default',
-                      '--swarming-targets-file', '/tmp/swarming_targets',
-                      '--isolate-map-file',
-                      '/fake_src/testing/buildbot/gn_isolate_map.pyl'],
-                     files=test_files, ret=0)
+    mbw = self.check([
+        'gen', '-c', 'debug_goma', '//out/Default', '--swarming-targets-file',
+        '/tmp/swarming_targets', '--isolate-map-file',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl'
+    ],
+                     files=test_files,
+                     ret=0)
 
     isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
     isolate_file_contents = ast.literal_eval(isolate_file)
@@ -709,24 +740,25 @@
 
   def test_isolate_test_launcher_with_webcam(self):
     test_files = {
-      '/tmp/swarming_targets': 'base_unittests\n',
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'console_test_launcher',"
-          "  'use_webcam': True,"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-          "some_resource_file\n"
-      ),
+        '/tmp/swarming_targets':
+        'base_unittests\n',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'console_test_launcher',"
+         "  'use_webcam': True,"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"
+         "some_resource_file\n"),
     }
-    mbw = self.check(['gen', '-c', 'debug_goma', '//out/Default',
-                      '--swarming-targets-file', '/tmp/swarming_targets',
-                      '--isolate-map-file',
-                      '/fake_src/testing/buildbot/gn_isolate_map.pyl'],
-                     files=test_files, ret=0)
+    mbw = self.check([
+        'gen', '-c', 'debug_goma', '//out/Default', '--swarming-targets-file',
+        '/tmp/swarming_targets', '--isolate-map-file',
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl'
+    ],
+                     files=test_files,
+                     ret=0)
 
     isolate_file = mbw.files['/fake_src/out/Default/base_unittests.isolate']
     isolate_file_contents = ast.literal_eval(isolate_file)
@@ -763,42 +795,44 @@
 
   def test_isolate(self):
     files = {
-      '/fake_src/out/Default/toolchain.ninja': "",
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'non_parallel_console_test_launcher',"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-      ),
+        '/fake_src/out/Default/toolchain.ninja':
+        "",
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'non_parallel_console_test_launcher',"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"),
     }
-    self.check(['isolate', '-c', 'debug_goma', '//out/Default',
-                'base_unittests'], files=files, ret=0)
+    self.check(
+        ['isolate', '-c', 'debug_goma', '//out/Default', 'base_unittests'],
+        files=files,
+        ret=0)
 
     # test running isolate on an existing build_dir
     files['/fake_src/out/Default/args.gn'] = 'is_debug = True\n'
     self.check(['isolate', '//out/Default', 'base_unittests'],
-               files=files, ret=0)
+               files=files,
+               ret=0)
     files['/fake_src/out/Default/mb_type'] = 'gn\n'
     self.check(['isolate', '//out/Default', 'base_unittests'],
-               files=files, ret=0)
+               files=files,
+               ret=0)
 
   def test_run(self):
     files = {
-      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
-          "{'base_unittests': {"
-          "  'label': '//base:base_unittests',"
-          "  'type': 'windowed_test_launcher',"
-          "}}\n"
-      ),
-      '/fake_src/out/Default/base_unittests.runtime_deps': (
-          "base_unittests\n"
-      ),
+        '/fake_src/testing/buildbot/gn_isolate_map.pyl':
+        ("{'base_unittests': {"
+         "  'label': '//base:base_unittests',"
+         "  'type': 'windowed_test_launcher',"
+         "}}\n"),
+        '/fake_src/out/Default/base_unittests.runtime_deps':
+        ("base_unittests\n"),
     }
-    self.check(['run', '-c', 'debug_goma', '//out/Default',
-                'base_unittests'], files=files, ret=0)
+    self.check(['run', '-c', 'debug_goma', '//out/Default', 'base_unittests'],
+               files=files,
+               ret=0)
 
   def test_run_swarmed(self):
     files = {
@@ -830,25 +864,33 @@
 
     mbw.ToSrcRelPath = to_src_rel_path_stub
 
-    self.check(['run', '-s', '-c', 'debug_goma', '//out/Default',
-                'base_unittests'], mbw=mbw, ret=0)
+    self.check(
+        ['run', '-s', '-c', 'debug_goma', '//out/Default', 'base_unittests'],
+        mbw=mbw,
+        ret=0)
     mbw = self.fake_mbw(files=files)
     mbw.files[mbw.PathJoin(mbw.TempDir(), 'task.json')] = task_json
     mbw.files[mbw.PathJoin(mbw.TempDir(), 'collect_output.json')] = collect_json
     mbw.ToSrcRelPath = to_src_rel_path_stub
-    self.check(['run', '-s', '-c', 'debug_goma', '-d', 'os', 'Win7',
-                '//out/Default', 'base_unittests'], mbw=mbw, ret=0)
+    self.check([
+        'run', '-s', '-c', 'debug_goma', '-d', 'os', 'Win7', '//out/Default',
+        'base_unittests'
+    ],
+               mbw=mbw,
+               ret=0)
 
   def test_lookup(self):
     self.check(['lookup', '-c', 'debug_goma'], ret=0)
 
   def test_quiet_lookup(self):
-    self.check(['lookup', '-c', 'debug_goma', '--quiet'], ret=0,
+    self.check(['lookup', '-c', 'debug_goma', '--quiet'],
+               ret=0,
                out=('is_debug = true\n'
                     'use_goma = true\n'))
 
   def test_lookup_goma_dir_expansion(self):
-    self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'], ret=0,
+    self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'],
+               ret=0,
                out=('\n'
                     'Writing """\\\n'
                     'enable_doom_melon = true\n'
@@ -875,22 +917,33 @@
     self.assertIn('Must specify a build --phase', mbw.out)
 
     # Check that passing a --phase to a single-phase builder fails.
-    mbw = self.check(['lookup', '-m', 'fake_group', '-b', 'fake_builder',
-                      '--phase', 'phase_1'], ret=1)
+    mbw = self.check([
+        'lookup', '-m', 'fake_group', '-b', 'fake_builder', '--phase', 'phase_1'
+    ],
+                     ret=1)
     self.assertIn('Must not specify a build --phase', mbw.out)
 
     # Check that passing a wrong phase key to a multi-phase builder fails.
-    mbw = self.check(['lookup', '-m', 'fake_group', '-b', 'fake_multi_phase',
-                      '--phase', 'wrong_phase'], ret=1)
+    mbw = self.check([
+        'lookup', '-m', 'fake_group', '-b', 'fake_multi_phase', '--phase',
+        'wrong_phase'
+    ],
+                     ret=1)
     self.assertIn('Phase wrong_phase doesn\'t exist', mbw.out)
 
     # Check that passing a correct phase key to a multi-phase builder passes.
-    mbw = self.check(['lookup', '-m', 'fake_group', '-b', 'fake_multi_phase',
-                      '--phase', 'phase_1'], ret=0)
+    mbw = self.check([
+        'lookup', '-m', 'fake_group', '-b', 'fake_multi_phase', '--phase',
+        'phase_1'
+    ],
+                     ret=0)
     self.assertIn('phase = 1', mbw.out)
 
-    mbw = self.check(['lookup', '-m', 'fake_group', '-b', 'fake_multi_phase',
-                      '--phase', 'phase_2'], ret=0)
+    mbw = self.check([
+        'lookup', '-m', 'fake_group', '-b', 'fake_multi_phase', '--phase',
+        'phase_2'
+    ],
+                     ret=0)
     self.assertIn('phase = 2', mbw.out)
 
   def test_validate(self):