mirror of
https://sourceware.org/git/glibc.git
synced 2025-12-08 02:02:23 +03:00
Also, change sources.redhat.com to sourceware.org.
This patch was automatically generated by running the following shell
script, which uses GNU sed, and which avoids modifying files imported
from upstream:
sed -ri '
s,(http|ftp)(://(.*\.)?(gnu|fsf|sourceware)\.org($|[^.]|\.[^a-z])),https\2,g
s,(http|ftp)(://(.*\.)?)sources\.redhat\.com($|[^.]|\.[^a-z]),https\2sourceware.org\4,g
' \
$(find $(git ls-files) -prune -type f \
! -name '*.po' \
! -name 'ChangeLog*' \
! -path COPYING ! -path COPYING.LIB \
! -path manual/fdl-1.3.texi ! -path manual/lgpl-2.1.texi \
! -path manual/texinfo.tex ! -path scripts/config.guess \
! -path scripts/config.sub ! -path scripts/install-sh \
! -path scripts/mkinstalldirs ! -path scripts/move-if-change \
! -path INSTALL ! -path locale/programs/charmap-kw.h \
! -path po/libc.pot ! -path sysdeps/gnu/errlist.c \
! '(' -name configure \
-execdir test -f configure.ac -o -f configure.in ';' ')' \
! '(' -name preconfigure \
-execdir test -f preconfigure.ac ';' ')' \
-print)
and then by running 'make dist-prepare' to regenerate files built
from the altered files, and then executing the following to cleanup:
chmod a+x sysdeps/unix/sysv/linux/riscv/configure
# Omit irrelevant whitespace and comment-only changes,
# perhaps from a slightly-different Autoconf version.
git checkout -f \
sysdeps/csky/configure \
sysdeps/hppa/configure \
sysdeps/riscv/configure \
sysdeps/unix/sysv/linux/csky/configure
# Omit changes that caused a pre-commit check to fail like this:
# remote: *** error: sysdeps/powerpc/powerpc64/ppc-mcount.S: trailing lines
git checkout -f \
sysdeps/powerpc/powerpc64/ppc-mcount.S \
sysdeps/unix/sysv/linux/s390/s390-64/syscall.S
# Omit change that caused a pre-commit check to fail like this:
# remote: *** error: sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S: last line does not end in newline
git checkout -f sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S
142 lines
4.2 KiB
Python
142 lines
4.2 KiB
Python
#!/usr/bin/python
|
|
# Copyright (C) 2015-2019 Free Software Foundation, Inc.
|
|
# This file is part of the GNU C Library.
|
|
#
|
|
# The GNU C Library is free software; you can redistribute it and/or
|
|
# modify it under the terms of the GNU Lesser General Public
|
|
# License as published by the Free Software Foundation; either
|
|
# version 2.1 of the License, or (at your option) any later version.
|
|
#
|
|
# The GNU C Library is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
# Lesser General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU Lesser General Public
|
|
# License along with the GNU C Library; if not, see
|
|
# <https://www.gnu.org/licenses/>.
|
|
"""Functions to import benchmark data and process it"""
|
|
|
|
import json
|
|
try:
|
|
import jsonschema as validator
|
|
except ImportError:
|
|
print('Could not find jsonschema module.')
|
|
raise
|
|
|
|
|
|
def mean(lst):
|
|
"""Compute and return mean of numbers in a list
|
|
|
|
The numpy average function has horrible performance, so implement our
|
|
own mean function.
|
|
|
|
Args:
|
|
lst: The list of numbers to average.
|
|
Return:
|
|
The mean of members in the list.
|
|
"""
|
|
return sum(lst) / len(lst)
|
|
|
|
|
|
def split_list(bench, func, var):
|
|
""" Split the list into a smaller set of more distinct points
|
|
|
|
Group together points such that the difference between the smallest
|
|
point and the mean is less than 1/3rd of the mean. This means that
|
|
the mean is at most 1.5x the smallest member of that group.
|
|
|
|
mean - xmin < mean / 3
|
|
i.e. 2 * mean / 3 < xmin
|
|
i.e. mean < 3 * xmin / 2
|
|
|
|
For an evenly distributed group, the largest member will be less than
|
|
twice the smallest member of the group.
|
|
Derivation:
|
|
|
|
An evenly distributed series would be xmin, xmin + d, xmin + 2d...
|
|
|
|
mean = (2 * n * xmin + n * (n - 1) * d) / 2 * n
|
|
and max element is xmin + (n - 1) * d
|
|
|
|
Now, mean < 3 * xmin / 2
|
|
|
|
3 * xmin > 2 * mean
|
|
3 * xmin > (2 * n * xmin + n * (n - 1) * d) / n
|
|
3 * n * xmin > 2 * n * xmin + n * (n - 1) * d
|
|
n * xmin > n * (n - 1) * d
|
|
xmin > (n - 1) * d
|
|
2 * xmin > xmin + (n-1) * d
|
|
2 * xmin > xmax
|
|
|
|
Hence, proved.
|
|
|
|
Similarly, it is trivial to prove that for a similar aggregation by using
|
|
the maximum element, the maximum element in the group must be at most 4/3
|
|
times the mean.
|
|
|
|
Args:
|
|
bench: The benchmark object
|
|
func: The function name
|
|
var: The function variant name
|
|
"""
|
|
means = []
|
|
lst = bench['functions'][func][var]['timings']
|
|
last = len(lst) - 1
|
|
while lst:
|
|
for i in range(last + 1):
|
|
avg = mean(lst[i:])
|
|
if avg > 0.75 * lst[last]:
|
|
means.insert(0, avg)
|
|
lst = lst[:i]
|
|
last = i - 1
|
|
break
|
|
bench['functions'][func][var]['timings'] = means
|
|
|
|
|
|
def do_for_all_timings(bench, callback):
|
|
"""Call a function for all timing objects for each function and its
|
|
variants.
|
|
|
|
Args:
|
|
bench: The benchmark object
|
|
callback: The callback function
|
|
"""
|
|
for func in bench['functions'].keys():
|
|
for k in bench['functions'][func].keys():
|
|
if 'timings' not in bench['functions'][func][k].keys():
|
|
continue
|
|
|
|
callback(bench, func, k)
|
|
|
|
|
|
def compress_timings(points):
|
|
"""Club points with close enough values into a single mean value
|
|
|
|
See split_list for details on how the clubbing is done.
|
|
|
|
Args:
|
|
points: The set of points.
|
|
"""
|
|
do_for_all_timings(points, split_list)
|
|
|
|
|
|
def parse_bench(filename, schema_filename):
|
|
"""Parse the input file
|
|
|
|
Parse and validate the json file containing the benchmark outputs. Return
|
|
the resulting object.
|
|
Args:
|
|
filename: Name of the benchmark output file.
|
|
Return:
|
|
The bench dictionary.
|
|
"""
|
|
with open(schema_filename, 'r') as schemafile:
|
|
schema = json.load(schemafile)
|
|
with open(filename, 'r') as benchfile:
|
|
bench = json.load(benchfile)
|
|
validator.validate(bench, schema)
|
|
do_for_all_timings(bench, lambda b, f, v:
|
|
b['functions'][f][v]['timings'].sort())
|
|
return bench
|