Repurpose upload script to read natively generated histogram json.

This script will be used when tests write proto-backed JSON. It still
has to reside source-side because we need to access the catapult Python
API to get at HistogramSet and reserved_infos, etc.

WebRTC tests will write proto-backed JSON, and this script can read
it because the Histogram class has been made capable of doing it.
Build information diagnostics are added, and then we upload in the
old JSON format (the dashboard can read the new format as well, but
there's no reason to implement export to the new format at this point).

We could imagine more outlandish solutions where the test binaries
themselves do the uploading, but then we would have to pass the
build information to them, and they would have to upload from the
shards. Alternatively, we could pass build information to tests so
they write it right into the histograms.

This solution is probably the best one for now since it's
1) consistent with how Chromium does it
2) flexible in the right ways
3) we don't have to worry if uploading from shards even works.

Bug: webrtc:11084
Change-Id: I8888ce9f24e0ca58f984d2c2e9af7740ee5e89b6
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/166464
Commit-Queue: Patrik Höglund <phoglund@webrtc.org>
Reviewed-by: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#30301}
diff --git a/tools_webrtc/perf/histogram_util.py b/tools_webrtc/perf/histogram_util.py
deleted file mode 100644
index aabd5b8..0000000
--- a/tools_webrtc/perf/histogram_util.py
+++ /dev/null
@@ -1,163 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-"""Upload data to the chrome perf dashboard via add_histograms endpoint."""
-
-import os
-import sys
-import logging
-
-SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
-CHECKOUT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
-sys.path.insert(0, os.path.join(CHECKOUT_ROOT, 'third_party', 'catapult',
-                                'tracing'))
-
-from tracing.value import histogram
-from tracing.value import histogram_set
-from tracing.value.diagnostics import generic_set
-from tracing.value.diagnostics import reserved_infos
-
-# Enums aren't supported in Chromium's python env, so do something similar:
-class ImprovementDirection(object):
-  DEFAULT = 1
-  BIGGER_IS_BETTER = 2
-  SMALLER_IS_BETTER = 3
-
-
-def MakeWebRtcHistogramSet(stats, commit_pos, commit_hash, master, bot,
-                           test_suite, build_url):
-  """Converts a dict of stats into a list of points with additional info.
-
-  Args:
-    stats: A list of histograms to upload.
-    piper_revision: Baseline piper revision that the test was run on.
-    commit_hash: WebRTC commit hash that the test was run on.
-    master:
-    bot: Bot name as it will show up in the perf dashboard.
-    test_suite: Top-level identifier of the test for Chrome perf dashboard.
-    build_url: An URL pointing to the bot status page for this build.
-
-  Returns:
-    A histogram set in format that expect Chrome perf dashboard.
-  """
-  common_diagnostics = {
-      reserved_infos.MASTERS: master,
-      reserved_infos.BOTS: bot,
-      reserved_infos.POINT_ID: commit_pos,
-      reserved_infos.BENCHMARKS: test_suite,
-      reserved_infos.WEBRTC_REVISIONS: str(commit_hash),
-      reserved_infos.BUILD_URLS: build_url,
-  }
-
-  hs = histogram_set.HistogramSet()
-  for h in stats:
-    hs.AddHistogram(h)
-
-  for k, v in common_diagnostics.items():
-    hs.AddSharedDiagnosticToAllHistograms(k.name, generic_set.GenericSet([v]))
-
-  return hs
-
-
-def LoadHistograms(data):
-  """Load histograms from Chart JSON format json file and fix them for API.
-
-  Args:
-    data: parsed json object of Chart JSON format.
-
-  Raises:
-    RuntimeError: input data contains standard deviation section.
-  Returns:
-    list of loaded histograms.
-  """
-  stats = []
-  for metric, story in data['charts'].items():
-    for story_name, story_desc in story.items():
-      units = story_desc['units'].strip()
-      if 'std' in story_desc:
-        # TODO(bugs.webrtc.org/11084): This seems bad to throw away?
-        logging.debug('std is not supported, specify list of values instead.')
-
-      if 'value' in story_desc:
-        values = [story_desc['value']]
-      else:
-        values = list(story_desc['values'])
-
-      improvement_direction = ImprovementDirection.DEFAULT
-      if 'improvement_direction' in story_desc:
-        if story_desc['improvement_direction'] == 'bigger_is_better':
-          improvement_direction = ImprovementDirection.BIGGER_IS_BETTER
-        elif story_desc['improvement_direction'] == 'smaller_is_better':
-          improvement_direction = ImprovementDirection.SMALLER_IS_BETTER
-      if 'higher_is_better' in story_desc:
-        if story_desc['higher_is_better']:
-          improvement_direction = ImprovementDirection.BIGGER_IS_BETTER
-        else:
-          improvement_direction = ImprovementDirection.SMALLER_IS_BETTER
-
-      new_metric, new_units, new_values = _FixUnits(metric, units, values)
-      h = _BuildHistogram(new_metric, story_name, new_units, new_values,
-                          improvement_direction)
-      stats.append(h)
-  return stats
-
-
-def _FixUnits(metric_name, units, values):
-  """Fix units and metric name with values if required.
-
-  Args:
-    metric_name: origin metric name
-    units: raw trimmed units
-    values: origin values
-
-  Returns:
-    (metric_name, units, values) triple with fixed content
-  """
-  if units == 'bps':
-    return metric_name, 'bytesPerSecond', [v / 8.0 for v in values]
-  elif units == 'dB':
-    return metric_name + '_dB', 'unitless', values
-  elif units == 'fps':
-    return metric_name + '_fps', 'Hz', values
-  elif units == 'frames':
-    return metric_name, 'count', values
-  elif units == 'ms':
-    return metric_name, 'msBestFitFormat', values
-  elif units == '%':
-    return metric_name + '_%', 'unitless', values
-  else:
-    return metric_name, units, values
-
-
-def _BuildHistogram(metric_name, story_name, units, values,
-                    improvement_direction):
-  """Build histogram. Uses unitless for unsupported units."""
-  if units not in histogram.UNIT_NAMES:
-    logging.debug(
-        'Unsupported unit %s will be replaced by \'unitless\'', units)
-    units = 'unitless'
-  if improvement_direction is ImprovementDirection.BIGGER_IS_BETTER:
-    units = units + '_biggerIsBetter'
-  elif improvement_direction is ImprovementDirection.SMALLER_IS_BETTER:
-    units = units + '_smallerIsBetter'
-  h = histogram.Histogram(metric_name, units,
-                          histogram.HistogramBinBoundaries.SINGULAR)
-  h.diagnostics[reserved_infos.STORIES.name] = generic_set.GenericSet(
-      [story_name])
-  h.CustomizeSummaryOptions({
-      'std': False,
-      'avg': False,
-      'count': False,
-      'max': False,
-      'min': False,
-      'sum': False
-  })
-  for v in values:
-    h.AddSample(v)
-  return h
diff --git a/tools_webrtc/perf/histogram_util_test.py b/tools_webrtc/perf/histogram_util_test.py
deleted file mode 100644
index 51d9982..0000000
--- a/tools_webrtc/perf/histogram_util_test.py
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
-#
-# Use of this source code is governed by a BSD-style license
-# that can be found in the LICENSE file in the root of the source
-# tree. An additional intellectual property rights grant can be found
-# in the file PATENTS.  All contributing project authors may
-# be found in the AUTHORS file in the root of the source tree.
-
-import os
-import sys
-
-SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
-CHECKOUT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
-sys.path.insert(0, os.path.join(CHECKOUT_ROOT, 'third_party', 'catapult',
-                                'tracing'))
-sys.path.append(os.path.join(CHECKOUT_ROOT, 'third_party', 'pymock'))
-
-import json
-import mock
-import unittest
-
-import histogram_util as u
-
-from tracing.value import histogram
-from tracing.value.diagnostics import generic_set
-from tracing.value.diagnostics import reserved_infos
-
-
-class HistogramUploaderUnittest(unittest.TestCase):
-
-  def testLoadHistogramsWithValues(self):
-    data = json.loads("""
-    {
-      "format_version": "1.0",
-      "charts": {
-        "audio_score": {
-          "AV": {
-            "type": "scalar",
-            "values": [0.6, 0.5, 0.7],
-            "units": "unitless_biggerIsBetter"
-          }
-        }
-      }
-    }
-    """)
-    stats = u.LoadHistograms(data)
-    self.assertEqual(len(stats), 1)
-    self.assertEqual(stats[0].name, "audio_score")
-    self.assertEqual(stats[0].unit, "unitless_biggerIsBetter")
-    self.assertEqual(stats[0].sample_values, [0.6, 0.5, 0.7])
-
-  def testLoadHistogramsWithValue(self):
-    data = json.loads("""
-    {
-      "format_version": "1.0",
-      "charts": {
-        "audio_score": {
-          "AV": {
-            "type": "scalar",
-            "value": 0.3,
-            "units": "unitless_biggerIsBetter"
-          }
-        }
-      }
-    }
-    """)
-    stats = u.LoadHistograms(data)
-    self.assertEqual(len(stats), 1)
-    self.assertEqual(stats[0].name, "audio_score")
-    self.assertEqual(stats[0].unit, "unitless_biggerIsBetter")
-    self.assertEqual(stats[0].sample_values, [0.3])
-
-  def testLoadHistogramsWithUnknownUnit(self):
-    data = json.loads("""
-    {
-      "format_version": "1.0",
-      "charts": {
-        "audio_score": {
-          "AV": {
-            "type": "scalar",
-            "value": 0.3,
-            "units": "good_score_biggerIsBetter"
-          }
-        }
-      }
-    }
-    """)
-    stats = u.LoadHistograms(data)
-    self.assertEqual(len(stats), 1)
-    self.assertEqual(stats[0].name, "audio_score")
-    self.assertEqual(stats[0].unit, "unitless")
-    self.assertEqual(stats[0].sample_values, [0.3])
-
-  def testLoadHistogramsWithStd(self):
-    data = json.loads("""
-    {
-      "format_version": "1.0",
-      "charts": {
-        "audio_score": {
-          "AV": {
-            "type": "scalar",
-            "value": 0.3,
-            "std": 0.1,
-            "units": "unitless",
-            "higher_is_better": true
-          }
-        }
-      }
-    }
-    """)
-    stats = u.LoadHistograms(data)
-    self.assertEqual(len(stats), 1)
-    self.assertEqual(stats[0].name, "audio_score")
-    self.assertEqual(stats[0].unit, "unitless_biggerIsBetter")
-    self.assertEqual(stats[0].sample_values, [0.3])
-
-  def testLoadHistogramsMsBiggerIsBetter(self):
-    data = json.loads("""
-    {
-      "format_version": "1.0",
-      "charts": {
-        "audio_score": {
-          "AV": {
-            "type": "scalar",
-            "value": 0.3,
-            "std": 0.1,
-            "units": "ms",
-            "improvement_direction": "bigger_is_better"
-          }
-        }
-      }
-    }
-    """)
-    stats = u.LoadHistograms(data)
-    self.assertEqual(len(stats), 1)
-    self.assertEqual(stats[0].name, "audio_score")
-    self.assertEqual(stats[0].unit, "msBestFitFormat_biggerIsBetter")
-    self.assertEqual(stats[0].sample_values, [0.3])
-
-  def testLoadHistogramsBps(self):
-    data = json.loads("""
-    {
-      "format_version": "1.0",
-      "charts": {
-        "audio_score": {
-          "AV": {
-            "type": "scalar",
-            "values": [240, 160],
-            "std": 0.1,
-            "units": "bps"
-          }
-        }
-      }
-    }
-    """)
-    stats = u.LoadHistograms(data)
-    self.assertEqual(len(stats), 1)
-    self.assertEqual(stats[0].name, "audio_score")
-    self.assertEqual(stats[0].unit, "bytesPerSecond")
-    self.assertEqual(stats[0].sample_values, [30, 20])
-
-  def testMakeWebRtcHistogramSet(self):
-    h = histogram.Histogram("audio_score", "unitless_biggerIsBetter",
-                            histogram.HistogramBinBoundaries.SINGULAR)
-    h.AddSample(0.5)
-    h.diagnostics[reserved_infos.STORIES.name] = generic_set.GenericSet(["AV"])
-    h.CustomizeSummaryOptions({
-        "std": False,
-        "avg": False,
-        "count": False,
-        "max": False,
-        "min": False,
-        "sum": False
-    })
-    stats = [h]
-    build_url = ('https://ci.chromium.org/p/webrtc/builders/ci/'
-                 'Android64%20%28M%20Nexus5X%29%28dbg%29')
-    hs = u.MakeWebRtcHistogramSet(
-        stats, commit_pos=123456789,
-        commit_hash="da39a3ee5e6b4b0d3255bfef95601890afd80709",
-        master="master", bot="bot", test_suite="webrtc_test_suite",
-        build_url=build_url)
-
-    expected = [{
-        "guid": mock.ANY,
-        "type": "GenericSet",
-        "values": [123456789]
-    }, {
-        "guid": mock.ANY,
-        "type": "GenericSet",
-        "values": ["webrtc_test_suite"]
-    }, {
-        "guid": mock.ANY,
-        "type": "GenericSet",
-        "values": ["bot"]
-    }, {
-        "guid": mock.ANY,
-        "type": "GenericSet",
-        "values": ["master"]
-    }, {
-        "guid": mock.ANY,
-        "type": "GenericSet",
-        "values": ["da39a3ee5e6b4b0d3255bfef95601890afd80709"]
-    }, {
-        "guid": mock.ANY,
-        "type": "GenericSet",
-        "values": [build_url]
-    }, {
-        "sampleValues": [0.5],
-        "name": "audio_score",
-        "running": [1, 0.5, -0.6931471805599453, 0.5, 0.5, 0.5, 0],
-        "diagnostics": {
-            "benchmarks": mock.ANY,
-            "bots": mock.ANY,
-            "buildUrls": mock.ANY,
-            "pointId": mock.ANY,
-            "masters": mock.ANY,
-            "stories": {
-                "type": "GenericSet",
-                "values": ["AV"]
-            },
-            "webrtcRevisions": mock.ANY
-        },
-        "allBins": [[1]],
-        "summaryOptions": {
-            "avg": False,
-            "count": False,
-            "max": False,
-            "min": False,
-            "std": False,
-            "sum": False
-        },
-        "unit": "unitless_biggerIsBetter"
-    }]
-    self.maxDiff = None  # pylint: disable=C0103
-    self.assertItemsEqual(expected, hs.AsDicts())
-
-
-if __name__ == "__main__":
-  unittest.main()
diff --git a/tools_webrtc/perf/webrtc_dashboard_upload.py b/tools_webrtc/perf/webrtc_dashboard_upload.py
index d04374a..2eb1d97 100644
--- a/tools_webrtc/perf/webrtc_dashboard_upload.py
+++ b/tools_webrtc/perf/webrtc_dashboard_upload.py
@@ -7,32 +7,33 @@
 # in the file PATENTS.  All contributing project authors may
 # be found in the AUTHORS file in the root of the source tree.
 
-"""Converts and uploads results to the Chrome perf dashboard.
+"""Adds build info to perf results and uploads them.
 
-This conversion step is needed because test/testsupport/perf_test.cc can't
-output histograms natively. There is, unfortunately, no C++ API for histograms.
-This script is in python so it can depend on Catapult's python API instead.
-See histogram_util.py for how this is done. We should move to the C++ API and
-delete the scripts in this dir as soon as there is a C++ API (less conversions =
-easier to understand).
+The tests don't know which bot executed the tests or at what revision, so we
+need to take their output and enrich it with this information. We load the JSON
+from the tests, add the build information as shared diagnostics and then
+upload it to the dashboard.
 
 This script can't be in recipes, because we can't access the catapult APIs from
 there. It needs to be here source-side.
-
-This script is adapted from the downstream variant like this:
-  * Follows upstream naming conventions.
-  * Downstream-only parameters and concepts go away.
-  * oAuth tokens are generated by luci-auth.
 """
 
 import argparse
 import httplib2
 import json
+import os
 import sys
 import subprocess
 import zlib
 
-import histogram_util
+SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
+CHECKOUT_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir))
+sys.path.insert(0, os.path.join(CHECKOUT_ROOT, 'third_party', 'catapult',
+                                'tracing'))
+
+from tracing.value import histogram_set
+from tracing.value.diagnostics import generic_set
+from tracing.value.diagnostics import reserved_infos
 
 
 def _GenerateOauthToken():
@@ -47,18 +48,23 @@
         (p.stdout.read(), p.stderr.read()))
 
 
-def _SendHistogramSetJson(url, histogram_json, oauth_token):
+def _SendHistogramSet(url, histograms, oauth_token):
   """Make a HTTP POST with the given JSON to the Performance Dashboard.
 
   Args:
     url: URL of Performance Dashboard instance, e.g.
         "https://chromeperf.appspot.com".
-    histogram_json: a JSON object that contains the data to be sent.
+    histograms: a histogram set object that contains the data to be sent.
     oauth_token: An oauth token to use for authorization.
   """
   headers = {'Authorization': 'Bearer %s' % oauth_token}
-  serialized = json.dumps(histogram_json.AsDicts(), indent=4)
-  data = zlib.compress(serialized)
+  serialized = json.dumps(histograms.AsDicts(), indent=4)
+
+  if url.startswith('http://localhost'):
+    # The catapult server turns off compression in developer mode.
+    data = serialized
+  else:
+    data = zlib.compress(serialized)
 
   http = httplib2.Http()
   response, content = http.request(url + '/add_histograms', method='POST',
@@ -66,21 +72,33 @@
   return response, content
 
 
-def _LoadHistogramSetJson(options):
+def _LoadHistogramSetFromJson(options):
   with options.input_results_file as f:
     json_data = json.load(f)
 
-  histograms = histogram_util.LoadHistograms(json_data)
-  hs = histogram_util.MakeWebRtcHistogramSet(
-      stats=histograms,
-      commit_pos=options.commit_position,
-      commit_hash=options.webrtc_git_hash,
-      master=options.perf_dashboard_machine_group,
-      bot=options.bot,
-      test_suite=options.test_suite,
-      build_url=options.build_page_url)
+  histograms = histogram_set.HistogramSet()
+  histograms.ImportDicts(json_data)
+  return histograms
 
-  return hs
+
+def _AddBuildInfo(histograms, options):
+  common_diagnostics = {
+      reserved_infos.MASTERS: options.perf_dashboard_machine_group,
+      reserved_infos.BOTS: options.bot,
+      reserved_infos.POINT_ID: options.commit_position,
+      reserved_infos.BENCHMARKS: options.test_suite,
+      reserved_infos.WEBRTC_REVISIONS: str(options.webrtc_git_hash),
+      reserved_infos.BUILD_URLS: options.build_page_url,
+  }
+
+  for k, v in common_diagnostics.items():
+    histograms.AddSharedDiagnosticToAllHistograms(
+        k.name, generic_set.GenericSet([v]))
+
+
+def _DumpOutput(histograms, output_file):
+  with output_file:
+    json.dump(histograms.AsDicts(), output_file, indent=4)
 
 
 def _CreateParser():
@@ -116,15 +134,15 @@
   parser = _CreateParser()
   options = parser.parse_args(args)
 
-  histogram_json = _LoadHistogramSetJson(options)
+  histograms = _LoadHistogramSetFromJson(options)
+  _AddBuildInfo(histograms, options)
 
   if options.output_json_file:
-    with options.output_json_file as output_file:
-      json.dump(histogram_json.AsDicts(), output_file, indent=4)
+    _DumpOutput(histograms, options.output_json_file)
 
   oauth_token = _GenerateOauthToken()
-  response, content = _SendHistogramSetJson(
-      options.dashboard_url, histogram_json, oauth_token)
+  response, content = _SendHistogramSet(
+      options.dashboard_url, histograms, oauth_token)
 
   if response.status == 200:
     return 0