mirror of
https://github.com/facebook/zstd.git
synced 2025-07-29 11:21:22 +03:00
test-zstd-speed.py: generate warning on noticeable compression ratio losses
added --ratioLimit option
This commit is contained in:
@ -59,15 +59,15 @@ def send_email(emails, topic, text, have_mutt, have_mail):
|
|||||||
log("e-mail cannot be sent (mail or mutt not found)")
|
log("e-mail cannot be sent (mail or mutt not found)")
|
||||||
|
|
||||||
|
|
||||||
def send_email_with_attachments(branch, commit, last_commit, emails, text, results_files, logFileName, lower_limit, have_mutt, have_mail):
|
def send_email_with_attachments(branch, commit, last_commit, args, text, results_files, logFileName, have_mutt, have_mail):
|
||||||
with open(logFileName, "w") as myfile:
|
with open(logFileName, "w") as myfile:
|
||||||
myfile.writelines(text)
|
myfile.writelines(text)
|
||||||
myfile.close()
|
myfile.close()
|
||||||
email_topic = '%s:%s Warning for %s:%s last_commit=%s speed<%s' % (email_header, pid, branch, commit, last_commit, lower_limit)
|
email_topic = '%s:%s Warning for %s:%s last_commit=%s speed<%s ratio<%s' % (email_header, pid, branch, commit, last_commit, args.lowerLimit, args.ratioLimit)
|
||||||
if have_mutt:
|
if have_mutt:
|
||||||
execute('mutt -s "' + email_topic + '" ' + emails + ' -a ' + results_files + ' < ' + logFileName)
|
execute('mutt -s "' + email_topic + '" ' + args.emails + ' -a ' + results_files + ' < ' + logFileName)
|
||||||
elif have_mail:
|
elif have_mail:
|
||||||
execute('mail -s "' + email_topic + '" ' + emails + ' < ' + logFileName)
|
execute('mail -s "' + email_topic + '" ' + args.emails + ' < ' + logFileName)
|
||||||
else:
|
else:
|
||||||
log("e-mail cannot be sent (mail or mutt not found)")
|
log("e-mail cannot be sent (mail or mutt not found)")
|
||||||
|
|
||||||
@ -93,8 +93,9 @@ def git_get_changes(branch, commit, last_commit):
|
|||||||
|
|
||||||
def get_last_results(resultsFileName):
|
def get_last_results(resultsFileName):
|
||||||
if not os.path.isfile(resultsFileName):
|
if not os.path.isfile(resultsFileName):
|
||||||
return None, None, None
|
return None, None, None, None
|
||||||
commit = None
|
commit = None
|
||||||
|
csize = []
|
||||||
cspeed = []
|
cspeed = []
|
||||||
dspeed = []
|
dspeed = []
|
||||||
with open(resultsFileName,'r') as f:
|
with open(resultsFileName,'r') as f:
|
||||||
@ -102,23 +103,25 @@ def get_last_results(resultsFileName):
|
|||||||
words = line.split()
|
words = line.split()
|
||||||
if len(words) == 2: # branch + commit
|
if len(words) == 2: # branch + commit
|
||||||
commit = words[1];
|
commit = words[1];
|
||||||
|
csize = []
|
||||||
cspeed = []
|
cspeed = []
|
||||||
dspeed = []
|
dspeed = []
|
||||||
if (len(words) == 8): # results
|
if (len(words) == 8): # results
|
||||||
|
csize.append(int(words[1]))
|
||||||
cspeed.append(float(words[3]))
|
cspeed.append(float(words[3]))
|
||||||
dspeed.append(float(words[5]))
|
dspeed.append(float(words[5]))
|
||||||
return commit, cspeed, dspeed
|
return commit, csize, cspeed, dspeed
|
||||||
|
|
||||||
|
|
||||||
def benchmark_and_compare(branch, commit, resultsFileName, lastCLevel, testFilePath, fileName, last_cspeed, last_dspeed, lower_limit, maxLoadAvg, message):
|
def benchmark_and_compare(branch, commit, args, resultsFileName, testFilePath, fileName, last_csize, last_cspeed, last_dspeed):
|
||||||
sleepTime = 30
|
sleepTime = 30
|
||||||
while os.getloadavg()[0] > maxLoadAvg:
|
while os.getloadavg()[0] > args.maxLoadAvg:
|
||||||
log("WARNING: bench loadavg=%.2f is higher than %s, sleeping for %s seconds" % (os.getloadavg()[0], maxLoadAvg, sleepTime))
|
log("WARNING: bench loadavg=%.2f is higher than %s, sleeping for %s seconds" % (os.getloadavg()[0], args.maxLoadAvg, sleepTime))
|
||||||
time.sleep(sleepTime)
|
time.sleep(sleepTime)
|
||||||
start_load = str(os.getloadavg())
|
start_load = str(os.getloadavg())
|
||||||
result = execute('programs/zstd -qi5b1e%s %s' % (lastCLevel, testFilePath), print_output=True)
|
result = execute('programs/zstd -qi5b1e%s %s' % (args.lastCLevel, testFilePath), print_output=True)
|
||||||
end_load = str(os.getloadavg())
|
end_load = str(os.getloadavg())
|
||||||
linesExpected = lastCLevel + 2;
|
linesExpected = args.lastCLevel + 2;
|
||||||
if len(result) != linesExpected:
|
if len(result) != linesExpected:
|
||||||
raise RuntimeError("ERROR: number of result lines=%d is different that expected %d\n%s" % (len(result), linesExpected, '\n'.join(result)))
|
raise RuntimeError("ERROR: number of result lines=%d is different that expected %d\n%s" % (len(result), linesExpected, '\n'.join(result)))
|
||||||
with open(resultsFileName, "a") as myfile:
|
with open(resultsFileName, "a") as myfile:
|
||||||
@ -128,16 +131,18 @@ def benchmark_and_compare(branch, commit, resultsFileName, lastCLevel, testFileP
|
|||||||
if (last_cspeed == None):
|
if (last_cspeed == None):
|
||||||
log("WARNING: No data for comparison for branch=%s file=%s " % (branch, fileName))
|
log("WARNING: No data for comparison for branch=%s file=%s " % (branch, fileName))
|
||||||
return ""
|
return ""
|
||||||
commit, cspeed, dspeed = get_last_results(resultsFileName)
|
commit, csize, cspeed, dspeed = get_last_results(resultsFileName)
|
||||||
text = ""
|
text = ""
|
||||||
for i in range(0, min(len(cspeed), len(last_cspeed))):
|
for i in range(0, min(len(cspeed), len(last_cspeed))):
|
||||||
print("%s:%s -%d cspeed=%6.2f clast=%6.2f cdiff=%1.4f dspeed=%6.2f dlast=%6.2f ddiff=%1.4f %s" % (branch, commit, i+1, cspeed[i], last_cspeed[i], cspeed[i]/last_cspeed[i], dspeed[i], last_dspeed[i], dspeed[i]/last_dspeed[i], fileName))
|
print("%s:%s -%d cSpeed=%6.2f cLast=%6.2f cDiff=%1.4f dSpeed=%6.2f dLast=%6.2f dDiff=%1.4f ratioDiff=%1.4f %s" % (branch, commit, i+1, cspeed[i], last_cspeed[i], cspeed[i]/last_cspeed[i], dspeed[i], last_dspeed[i], dspeed[i]/last_dspeed[i], float(csize[i])/last_csize[i], fileName))
|
||||||
if (cspeed[i]/last_cspeed[i] < lower_limit):
|
if (cspeed[i]/last_cspeed[i] < args.lowerLimit):
|
||||||
text += "WARNING: -%d cspeed=%.2f clast=%.2f cdiff=%.4f %s\n" % (i+1, cspeed[i], last_cspeed[i], cspeed[i]/last_cspeed[i], fileName)
|
text += "WARNING: -%d cSpeed=%.2f cLast=%.2f cDiff=%.4f %s\n" % (i+1, cspeed[i], last_cspeed[i], cspeed[i]/last_cspeed[i], fileName)
|
||||||
if (dspeed[i]/last_dspeed[i] < lower_limit):
|
if (dspeed[i]/last_dspeed[i] < args.lowerLimit):
|
||||||
text += "WARNING: -%d dspeed=%.2f dlast=%.2f ddiff=%.4f %s\n" % (i+1, dspeed[i], last_dspeed[i], dspeed[i]/last_dspeed[i], fileName)
|
text += "WARNING: -%d dSpeed=%.2f dLast=%.2f dDiff=%.4f %s\n" % (i+1, dspeed[i], last_dspeed[i], dspeed[i]/last_dspeed[i], fileName)
|
||||||
|
if (float(csize[i])/last_csize[i] < args.ratioLimit):
|
||||||
|
text += "WARNING: -%d cSize=%d last_cSize=%d diff=%.4f %s\n" % (i+1, csize[i], last_csize[i], float(csize[i])/last_csize[i], fileName)
|
||||||
if text:
|
if text:
|
||||||
text = message + ("\nmaxLoadAvg=%s load average at start=%s end=%s\n" % (maxLoadAvg, start_load, end_load)) + text
|
text = args.message + ("\nmaxLoadAvg=%s load average at start=%s end=%s\n" % (args.maxLoadAvg, start_load, end_load)) + text
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
@ -161,17 +166,17 @@ def test_commit(branch, commit, last_commit, args, testFilePaths, have_mutt, hav
|
|||||||
for filePath in testFilePaths:
|
for filePath in testFilePaths:
|
||||||
fileName = filePath.rpartition('/')[2]
|
fileName = filePath.rpartition('/')[2]
|
||||||
resultsFileName = working_path + "/results_" + branch.replace("/", "_") + "_" + fileName.replace(".", "_") + ".txt"
|
resultsFileName = working_path + "/results_" + branch.replace("/", "_") + "_" + fileName.replace(".", "_") + ".txt"
|
||||||
last_commit, cspeed, dspeed = get_last_results(resultsFileName)
|
last_commit, csize, cspeed, dspeed = get_last_results(resultsFileName)
|
||||||
if not args.dry_run:
|
if not args.dry_run:
|
||||||
text = benchmark_and_compare(branch, commit, resultsFileName, args.lastCLevel, filePath, fileName, cspeed, dspeed, args.lowerLimit, args.maxLoadAvg, args.message)
|
text = benchmark_and_compare(branch, commit, args, resultsFileName, filePath, fileName, csize, cspeed, dspeed)
|
||||||
if text:
|
if text:
|
||||||
log("WARNING: redoing tests for branch %s: commit %s" % (branch, commit))
|
log("WARNING: redoing tests for branch %s: commit %s" % (branch, commit))
|
||||||
text = benchmark_and_compare(branch, commit, resultsFileName, args.lastCLevel, filePath, fileName, cspeed, dspeed, args.lowerLimit, args.maxLoadAvg, args.message)
|
text = benchmark_and_compare(branch, commit, args, resultsFileName, filePath, fileName, csize, cspeed, dspeed)
|
||||||
if text:
|
if text:
|
||||||
text_to_send.append(text)
|
text_to_send.append(text)
|
||||||
results_files += resultsFileName + " "
|
results_files += resultsFileName + " "
|
||||||
if text_to_send:
|
if text_to_send:
|
||||||
send_email_with_attachments(branch, commit, last_commit, args.emails, text_to_send, results_files, logFileName, args.lowerLimit, have_mutt, have_mail)
|
send_email_with_attachments(branch, commit, last_commit, args, text_to_send, results_files, logFileName, have_mutt, have_mail)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
@ -181,6 +186,7 @@ if __name__ == '__main__':
|
|||||||
parser.add_argument('--message', help='attach an additional message to e-mail', default="")
|
parser.add_argument('--message', help='attach an additional message to e-mail', default="")
|
||||||
parser.add_argument('--repoURL', help='changes default repository URL', default=default_repo_url)
|
parser.add_argument('--repoURL', help='changes default repository URL', default=default_repo_url)
|
||||||
parser.add_argument('--lowerLimit', type=float, help='send email if speed is lower than given limit', default=0.98)
|
parser.add_argument('--lowerLimit', type=float, help='send email if speed is lower than given limit', default=0.98)
|
||||||
|
parser.add_argument('--ratioLimit', type=float, help='send email if ratio is lower than given limit', default=0.999)
|
||||||
parser.add_argument('--maxLoadAvg', type=float, help='maximum load average to start testing', default=0.75)
|
parser.add_argument('--maxLoadAvg', type=float, help='maximum load average to start testing', default=0.75)
|
||||||
parser.add_argument('--lastCLevel', type=int, help='last compression level for testing', default=5)
|
parser.add_argument('--lastCLevel', type=int, help='last compression level for testing', default=5)
|
||||||
parser.add_argument('--sleepTime', type=int, help='frequency of repository checking in seconds', default=300)
|
parser.add_argument('--sleepTime', type=int, help='frequency of repository checking in seconds', default=300)
|
||||||
@ -216,6 +222,7 @@ if __name__ == '__main__':
|
|||||||
print("emails=%s" % args.emails)
|
print("emails=%s" % args.emails)
|
||||||
print("maxLoadAvg=%s" % args.maxLoadAvg)
|
print("maxLoadAvg=%s" % args.maxLoadAvg)
|
||||||
print("lowerLimit=%s" % args.lowerLimit)
|
print("lowerLimit=%s" % args.lowerLimit)
|
||||||
|
print("ratioLimit=%s" % args.ratioLimit)
|
||||||
print("lastCLevel=%s" % args.lastCLevel)
|
print("lastCLevel=%s" % args.lastCLevel)
|
||||||
print("sleepTime=%s" % args.sleepTime)
|
print("sleepTime=%s" % args.sleepTime)
|
||||||
print("dry_run=%s" % args.dry_run)
|
print("dry_run=%s" % args.dry_run)
|
||||||
|
Reference in New Issue
Block a user