Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
coverage_report.py
Go to the documentation of this file.
1#!/usr/bin/env vpython3
2# Copyright 2017 The PDFium Authors
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5"""Generates a coverage report for given tests.
6
7Requires that 'use_clang_coverage = true' is set in args.gn.
8Prefers that 'is_component_build = false' is set in args.gn.
9"""
10
11import argparse
12from collections import namedtuple
13import fnmatch
14import os
15import pprint
16import subprocess
17import sys
18
19# Add parent dir to avoid having to set PYTHONPATH.
20sys.path.append(
21 os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
22
23import common
24
25# 'binary' is the file that is to be run for the test.
26# 'use_test_runner' indicates if 'binary' depends on test_runner.py and thus
27# requires special handling.
28# 'opt_args' are optional arguments to pass to the test 'binary'.
29TestSpec = namedtuple('TestSpec', 'binary, use_test_runner, opt_args')
30
31# All of the coverage tests that the script knows how to run.
32COVERAGE_TESTS = {
33 'pdfium_unittests':
34 TestSpec('pdfium_unittests', False, []),
35 'pdfium_embeddertests':
36 TestSpec('pdfium_embeddertests', False, []),
37 'corpus_tests':
38 TestSpec('run_corpus_tests.py', True, []),
39 'corpus_tests_javascript_disabled':
40 TestSpec('run_corpus_tests.py', True, ['--disable-javascript']),
41 'corpus_tests_xfa_disabled':
42 TestSpec('run_corpus_tests.py', True, ['--disable-xfa']),
43 'corpus_tests_render_oneshot':
44 TestSpec('run_corpus_tests.py', True, ['--render-oneshot']),
45 'corpus_tests_reverse_byte_order':
46 TestSpec('run_corpus_tests.py', True, ['--reverse-byte-order']),
47 'javascript_tests':
48 TestSpec('run_javascript_tests.py', True, []),
49 'javascript_tests_javascript_disabled':
50 TestSpec('run_javascript_tests.py', True, ['--disable-javascript']),
51 'javascript_tests_xfa_disabled':
52 TestSpec('run_javascript_tests.py', True, ['--disable-xfa']),
53 'pixel_tests':
54 TestSpec('run_pixel_tests.py', True, []),
55 'pixel_tests_javascript_disabled':
56 TestSpec('run_pixel_tests.py', True, ['--disable-javascript']),
57 'pixel_tests_xfa_disabled':
58 TestSpec('run_pixel_tests.py', True, ['--disable-xfa']),
59 'pixel_tests_render_oneshot':
60 TestSpec('run_pixel_tests.py', True, ['--render-oneshot']),
61 'pixel_tests_reverse_byte_order':
62 TestSpec('run_pixel_tests.py', True, ['--reverse-byte-order']),
63}
64
65
67
68 def __init__(self, parser, args):
69 """Initialize executor based on the current script environment
70
71 Args:
72 parser: argparse.ArgumentParser for handling improper inputs.
73 args: Dictionary of arguments passed into the calling script.
74 """
75 self.dry_run = args['dry_run']
76 self.verbose = args['verbose']
77
78 self.source_directory = args['source_directory']
79 if not os.path.isdir(self.source_directory):
80 parser.error("'%s' needs to be a directory" % self.source_directory)
81
82 self.llvm_directory = os.path.join(self.source_directory, 'third_party',
83 'llvm-build', 'Release+Asserts', 'bin')
84 if not os.path.isdir(self.llvm_directory):
85 parser.error("Cannot find LLVM bin directory , expected it to be in '%s'"
86 % self.llvm_directory)
87
88 self.build_directory = args['build_directory']
89 if not os.path.isdir(self.build_directory):
90 parser.error("'%s' needs to be a directory" % self.build_directory)
91
94 if not self.coverage_tests:
95 parser.error(
96 'No valid tests in set to be run. This is likely due to bad command '
97 'line arguments')
98
99 if not common.GetBooleanGnArg('use_clang_coverage', self.build_directory,
100 self.verbose):
101 parser.error(
102 'use_clang_coverage does not appear to be set to true for build, but '
103 'is needed')
104
105 self.output_directory = args['output_directory']
106 if not os.path.exists(self.output_directory):
107 if not self.dry_run:
108 os.makedirs(self.output_directory)
109 elif not os.path.isdir(self.output_directory):
110 parser.error('%s exists, but is not a directory' % self.output_directory)
111 elif len(os.listdir(self.output_directory)) > 0:
112 parser.error('%s is not empty, cowardly refusing to continue' %
113 self.output_directory)
114
115 self.prof_data = os.path.join(self.output_directory, 'pdfium.profdata')
116
117 def check_output(self, args, dry_run=False, env=None):
118 """Dry run aware wrapper of subprocess.check_output()"""
119 if dry_run:
120 print("Would have run '%s'" % ' '.join(args))
121 return ''
122
123 output = subprocess.check_output(args, env=env)
124
125 if self.verbose:
126 print("check_output(%s) returned '%s'" % (args, output))
127 return output
128
129 def call(self, args, dry_run=False, env=None):
130 """Dry run aware wrapper of subprocess.call()"""
131 if dry_run:
132 print("Would have run '%s'" % ' '.join(args))
133 return 0
134
135 output = subprocess.call(args, env=env)
136
137 if self.verbose:
138 print('call(%s) returned %s' % (args, output))
139 return output
140
141 def call_silent(self, args, dry_run=False, env=None):
142 """Dry run aware wrapper of subprocess.call() that eats output from call"""
143 if dry_run:
144 print("Would have run '%s'" % ' '.join(args))
145 return 0
146
147 with open(os.devnull, 'w') as f:
148 output = subprocess.call(args, env=env, stdout=f)
149
150 if self.verbose:
151 print('call_silent(%s) returned %s' % (args, output))
152 return output
153
155 """Determine which tests should be run."""
156 testing_tools_directory = os.path.join(self.source_directory, 'testing',
157 'tools')
158 tests = args['tests'] if args['tests'] else COVERAGE_TESTS.keys()
159 coverage_tests = {}
160 build_targets = set()
161 for name in tests:
162 test_spec = COVERAGE_TESTS[name]
163 if test_spec.use_test_runner:
164 binary_path = os.path.join(testing_tools_directory, test_spec.binary)
165 build_targets.add('pdfium_diff')
166 build_targets.add('pdfium_test')
167 else:
168 binary_path = os.path.join(self.build_directory, test_spec.binary)
169 build_targets.add(name)
170 coverage_tests[name] = TestSpec(binary_path, test_spec.use_test_runner,
171 test_spec.opt_args)
172
173 build_targets = list(build_targets)
174
175 return coverage_tests, build_targets
176
177 def build_binaries(self):
178 """Build all the binaries that are going to be needed for coverage
179 generation."""
180 call_args = ['autoninja', '-C', self.build_directory]
181 call_args.extend(self.build_targets)
182 return self.call(call_args, dry_run=self.dry_run) == 0
183
184 def generate_coverage(self, name, spec):
185 """Generate the coverage data for a test
186
187 Args:
188 name: Name associated with the test to be run. This is used as a label
189 in the coverage data, so should be unique across all of the tests
190 being run.
191 spec: Tuple containing the TestSpec.
192 """
193 if self.verbose:
194 print("Generating coverage for test '%s', using data '%s'" % (name, spec))
195 if not os.path.exists(spec.binary):
196 print('Unable to generate coverage for %s, since it appears to not exist'
197 ' @ %s' % (name, spec.binary))
198 return False
199
200 binary_args = [spec.binary]
201 if spec.opt_args:
202 binary_args.extend(spec.opt_args)
203 profile_pattern_string = '%8m'
204 expected_profraw_file = '%s.%s.profraw' % (name, profile_pattern_string)
205 expected_profraw_path = os.path.join(self.output_directory,
206 expected_profraw_file)
207
208 env = {
209 'LLVM_PROFILE_FILE': expected_profraw_path,
210 'PATH': os.getenv('PATH') + os.pathsep + self.llvm_directory
211 }
212
213 if spec.use_test_runner:
214 # Test runner performs multi-threading in the wrapper script, not the test
215 # binary, so need to limit the number of instances of the binary being run
216 # to the max value in LLVM_PROFILE_FILE, which is 8.
217 binary_args.extend(['-j', '8', '--build-dir', self.build_directory])
218 if self.call(binary_args, dry_run=self.dry_run, env=env) and self.verbose:
219 print('Running %s appears to have failed, which might affect '
220 'results' % spec.binary)
221
222 return True
223
225 """Merge raw coverage data sets into one one file for report generation."""
226 llvm_profdata_bin = os.path.join(self.llvm_directory, 'llvm-profdata')
227
228 raw_data = []
229 raw_data_pattern = '*.profraw'
230 for file_name in os.listdir(self.output_directory):
231 if fnmatch.fnmatch(file_name, raw_data_pattern):
232 raw_data.append(os.path.join(self.output_directory, file_name))
233
234 return self.call(
235 [llvm_profdata_bin, 'merge', '-o', self.prof_data, '-sparse=true'] +
236 raw_data) == 0
237
239 """Generate HTML report by calling upstream coverage.py"""
240 coverage_bin = os.path.join(self.source_directory, 'tools', 'code_coverage',
241 'coverage.py')
242 report_directory = os.path.join(self.output_directory, 'HTML')
243
244 coverage_args = ['-p', self.prof_data]
245 coverage_args += ['-b', self.build_directory]
246 coverage_args += ['-o', report_directory]
247 coverage_args += self.build_targets
248
249 # Only analyze the directories of interest.
250 coverage_args += ['-f', 'core']
251 coverage_args += ['-f', 'fpdfsdk']
252 coverage_args += ['-f', 'fxbarcode']
253 coverage_args += ['-f', 'fxjs']
254 coverage_args += ['-f', 'public']
255 coverage_args += ['-f', 'samples']
256 coverage_args += ['-f', 'xfa']
257
258 # Ignore test files.
259 coverage_args += ['-i', '.*test.*']
260
261 # Component view is only useful for Chromium
262 coverage_args += ['--no-component-view']
263
264 return self.call([coverage_bin] + coverage_args) == 0
265
266 def run(self):
267 """Setup environment, execute the tests and generate coverage report"""
268 if not self.fetch_profiling_tools():
269 print('Unable to fetch profiling tools')
270 return False
271
272 if not self.build_binaries():
273 print('Failed to successfully build binaries')
274 return False
275
276 for name in self.coverage_tests:
277 if not self.generate_coverage(name, self.coverage_tests[name]):
278 print('Failed to successfully generate coverage data')
279 return False
280
281 if not self.merge_raw_coverage_results():
282 print('Failed to successfully merge raw coverage results')
283 return False
284
285 if not self.generate_html_report():
286 print('Failed to successfully generate HTML report')
287 return False
288
289 return True
290
292 """Call coverage.py with no args to ensure profiling tools are present."""
293 return self.call_silent(
294 os.path.join(self.source_directory, 'tools', 'code_coverage',
295 'coverage.py')) == 0
296
297
298def main():
299 parser = argparse.ArgumentParser()
300 parser.formatter_class = argparse.RawDescriptionHelpFormatter
301 parser.description = 'Generates a coverage report for given tests.'
302
303 parser.add_argument(
304 '-s',
305 '--source_directory',
306 help='Location of PDFium source directory, defaults to CWD',
307 default=os.getcwd())
308 build_default = os.path.join('out', 'Coverage')
309 parser.add_argument(
310 '-b',
311 '--build_directory',
312 help=
313 'Location of PDFium build directory with coverage enabled, defaults to '
314 '%s under CWD' % build_default,
315 default=os.path.join(os.getcwd(), build_default))
316 output_default = 'coverage_report'
317 parser.add_argument(
318 '-o',
319 '--output_directory',
320 help='Location to write out coverage report to, defaults to %s under CWD '
321 % output_default,
322 default=os.path.join(os.getcwd(), output_default))
323 parser.add_argument(
324 '-n',
325 '--dry-run',
326 help='Output commands instead of executing them',
327 action='store_true')
328 parser.add_argument(
329 '-v',
330 '--verbose',
331 help='Output additional diagnostic information',
332 action='store_true')
333 parser.add_argument(
334 'tests',
335 help='Tests to be run, defaults to all. Valid entries are %s' %
336 COVERAGE_TESTS.keys(),
337 nargs='*')
338
339 args = vars(parser.parse_args())
340 if args['verbose']:
341 pprint.pprint(args)
342
343 executor = CoverageExecutor(parser, args)
344 if executor.run():
345 return 0
346 return 1
347
348
349if __name__ == '__main__':
350 sys.exit(main())
call_silent(self, args, dry_run=False, env=None)
call(self, args, dry_run=False, env=None)
check_output(self, args, dry_run=False, env=None)
QDebug print(QDebug debug, QSslError::SslError error)
QFuture< QSet< QChar > > set
QList< int > list
[14]
file open(QIODevice::ReadOnly)