1 #!/usr/bin/env python3
2 #
3 # Parallel VM test case executor
4 # Copyright (c) 2014-2024, Jouni Malinen <j@w1.fi>
5 #
6 # This software may be distributed under the terms of the BSD license.
7 # See README for more details.
8 
9 from __future__ import print_function
10 import curses
11 import fcntl
12 import logging
13 import multiprocessing
14 import os
15 import selectors
16 import shutil
17 import signal
18 import subprocess
19 import sys
20 import time
21 import errno
22 
23 logger = logging.getLogger()
24 
25 # Test cases that take significantly longer time to execute than average.
26 long_tests = ["ap_roam_open",
27               "hostapd_oom_wpa2_eap_connect_1",
28               "hostapd_oom_wpa2_eap_connect_2",
29               "hostapd_oom_wpa2_eap_connect_3",
30               "hostapd_oom_wpa2_eap_connect_4",
31               "hostapd_oom_wpa2_eap_connect_5",
32               "ap_wpa2_eap_eke_many",
33               "wpas_mesh_password_mismatch_retry",
34               "wpas_mesh_password_mismatch",
35               "hostapd_oom_wpa2_psk_connect",
36               "ap_hs20_fetch_osu_stop",
37               "ap_roam_wpa2_psk",
38               "ibss_wpa_none_ccmp",
39               "nfc_wps_er_handover_pk_hash_mismatch_sta",
40               "go_neg_peers_force_diff_freq",
41               "p2p_cli_invite",
42               "sta_ap_scan_2b",
43               "ap_pmf_sta_unprot_deauth_burst",
44               "ap_bss_add_remove_during_ht_scan",
45               "wext_scan_hidden",
46               "autoscan_exponential",
47               "nfc_p2p_client",
48               "wnm_bss_keep_alive",
49               "ap_inactivity_disconnect",
50               "scan_bss_expiration_age",
51               "autoscan_periodic",
52               "discovery_group_client",
53               "concurrent_p2pcli",
54               "ap_bss_add_remove",
55               "wpas_ap_wps",
56               "wext_pmksa_cache",
57               "ibss_wpa_none",
58               "ap_ht_40mhz_intolerant_ap",
59               "ibss_rsn",
60               "discovery_pd_retries",
61               "ap_wps_setup_locked_timeout",
62               "ap_vht160",
63               'he160',
64               'he160b',
65               "dfs_radar",
66               "dfs",
67               "dfs_ht40_minus",
68               "dfs_etsi",
69               "dfs_radar_vht80_downgrade",
70               "ap_acs_dfs",
71               "grpform_cred_ready_timeout",
72               "wpas_ap_dfs",
73               "autogo_many",
74               "hostapd_oom_wpa2_eap",
75               "ibss_open",
76               "proxyarp_open_ebtables",
77               "proxyarp_open_ebtables_ipv6",
78               "radius_failover",
79               "obss_scan_40_intolerant",
80               "dbus_connect_oom",
81               "proxyarp_open",
82               "proxyarp_open_ipv6",
83               "ap_wps_iteration",
84               "ap_wps_iteration_error",
85               "ap_wps_pbc_timeout",
86               "ap_wps_pbc_ap_timeout",
87               "ap_wps_pin_ap_timeout",
88               "ap_wps_http_timeout",
89               "p2p_go_move_reg_change",
90               "p2p_go_move_active",
91               "p2p_go_move_scm",
92               "p2p_go_move_scm_peer_supports",
93               "p2p_go_move_scm_peer_does_not_support",
94               "p2p_go_move_scm_multi"]
95 
96 # Test cases that have common, but random, issues with UML.
97 uml_issue_tests = ["eht_connect_invalid_link",
98                    "eht_mld_connect_probes",
99                    "gas_anqp_address3_ap_forced",
100                    "gas_anqp_address3_ap_non_compliant",
101                    "gas_anqp_address3_not_assoc",
102                    "gas_anqp_address3_assoc",
103                    "p2p_channel_random_social_with_op_class_change",
104                    "ap_open_ps_mc_buf",
105                    "wmediumd_path_rann",
106                    "mbo_assoc_disallow",
107                    "ieee8021x_reauth_wep"]
108 
109 # Test cases that depend on dumping full process memory
110 memory_read_tests = ["ap_wpa2_eap_fast_pac_lifetime",
111                      "wpa2_eap_ttls_pap_key_lifetime_in_memory",
112                      "wpa2_eap_peap_gtc_key_lifetime_in_memory",
113                      "ft_psk_key_lifetime_in_memory",
114                      "wpa2_psk_key_lifetime_in_memory",
115                      "ap_wpa2_tdls_long_lifetime",
116                      "ap_wpa2_tdls_wrong_lifetime_resp",
117                      "erp_key_lifetime_in_memory",
118                      "sae_key_lifetime_in_memory",
119                      "sae_okc_pmk_lifetime",
120                      "sae_pmk_lifetime",
121                      "wpas_ap_lifetime_in_memory",
122                      "wpas_ap_lifetime_in_memory2"]
123 
124 def get_failed(vm):
125     failed = []
126     for i in range(num_servers):
127         failed += vm[i]['failed']
128     return failed
129 
130 def vm_read_stdout(vm, test_queue):
131     global total_started, total_passed, total_failed, total_skipped
132     global rerun_failures
133     global first_run_failures
134     global all_failed
135 
136     ready = False
137     try:
138         out = vm['proc'].stdout.read()
139         if out == None:
140             return False
141         out = out.decode()
142     except IOError as e:
143         if e.errno == errno.EAGAIN:
144             return False
145         raise
146     vm['last_stdout'] = time.time()
147     logger.debug("VM[%d] stdout.read[%s]" % (vm['idx'], out.rstrip()))
148     pending = vm['pending'] + out
149     lines = []
150     while True:
151         pos = pending.find('\n')
152         if pos < 0:
153             break
154         line = pending[0:pos].rstrip()
155         pending = pending[(pos + 1):]
156         logger.debug("VM[%d] stdout full line[%s]" % (vm['idx'], line))
157         if line.startswith("READY"):
158             vm['starting'] = False
159             vm['started'] = True
160             ready = True
161         elif line.startswith("PASS"):
162             ready = True
163             total_passed += 1
164             vm['current_name'] = None
165         elif line.startswith("FAIL"):
166             ready = True
167             total_failed += 1
168             vals = line.split(' ')
169             if len(vals) < 2:
170                 logger.info("VM[%d] incomplete FAIL line: %s" % (vm['idx'],
171                                                                  line))
172                 name = line
173             else:
174                 name = vals[1]
175             logger.debug("VM[%d] test case failed: %s" % (vm['idx'], name))
176             vm['failed'].append(name)
177             all_failed.append(name)
178             if name != vm['current_name']:
179                 logger.info("VM[%d] test result mismatch: %s (expected %s)" % (vm['idx'], name, vm['current_name']))
180             else:
181                 vm['current_name'] = None
182                 count = vm['current_count']
183                 if count == 0:
184                     first_run_failures.append(name)
185                 if rerun_failures and count < 1:
186                     logger.debug("Requeue test case %s" % name)
187                     test_queue.append((name, vm['current_count'] + 1))
188         elif line.startswith("NOT-FOUND"):
189             ready = True
190             total_failed += 1
191             logger.info("VM[%d] test case not found" % vm['idx'])
192         elif line.startswith("SKIP"):
193             ready = True
194             total_skipped += 1
195             vm['current_name'] = None
196         elif line.startswith("REASON"):
197             vm['skip_reason'].append(line[7:])
198         elif line.startswith("START"):
199             total_started += 1
200             if len(vm['failed']) == 0:
201                 vals = line.split(' ')
202                 if len(vals) >= 2:
203                     vm['fail_seq'].append(vals[1])
204         vm['out'] += line + '\n'
205         lines.append(line)
206     vm['pending'] = pending
207     return ready
208 
209 def start_vm(vm, sel):
210     logger.info("VM[%d] starting up" % (vm['idx'] + 1))
211     vm['starting'] = True
212     vm['proc'] = subprocess.Popen(vm['cmd'],
213                                   stdin=subprocess.PIPE,
214                                   stdout=subprocess.PIPE,
215                                   stderr=subprocess.PIPE)
216     vm['cmd'] = None
217     for stream in [vm['proc'].stdout, vm['proc'].stderr]:
218         fd = stream.fileno()
219         fl = fcntl.fcntl(fd, fcntl.F_GETFL)
220         fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
221         sel.register(stream, selectors.EVENT_READ, vm)
222 
223 def num_vm_starting():
224     count = 0
225     for i in range(num_servers):
226         if vm[i]['starting']:
227             count += 1
228     return count
229 
230 def vm_read_stderr(vm):
231     try:
232         err = vm['proc'].stderr.read()
233         if err != None:
234             err = err.decode()
235             if len(err) > 0:
236                 vm['err'] += err
237                 logger.info("VM[%d] stderr.read[%s]" % (vm['idx'], err))
238     except IOError as e:
239         if e.errno != errno.EAGAIN:
240             raise
241 
242 def vm_next_step(_vm, scr, test_queue):
243     max_y, max_x = scr.getmaxyx()
244     status_line = num_servers
245     if status_line >= max_y:
246         status_line = max_y - 1
247     if _vm['idx'] < status_line:
248         scr.move(_vm['idx'], 10)
249         scr.clrtoeol()
250     if not test_queue:
251         _vm['proc'].stdin.write(b'\n')
252         _vm['proc'].stdin.flush()
253         if _vm['idx'] < status_line:
254             scr.addstr("shutting down")
255         logger.info("VM[%d] shutting down" % _vm['idx'])
256         return
257     (name, count) = test_queue.pop(0)
258     _vm['current_name'] = name
259     _vm['current_count'] = count
260     _vm['proc'].stdin.write(name.encode() + b'\n')
261     _vm['proc'].stdin.flush()
262     if _vm['idx'] < status_line:
263         scr.addstr(name)
264     logger.debug("VM[%d] start test %s" % (_vm['idx'], name))
265 
266 def check_vm_start(scr, sel, test_queue):
267     running = False
268     max_y, max_x = scr.getmaxyx()
269     status_line = num_servers
270     if status_line >= max_y:
271         status_line = max_y - 1
272     for i in range(num_servers):
273         if vm[i]['proc']:
274             running = True
275             continue
276 
277         # Either not yet started or already stopped VM
278         max_start = multiprocessing.cpu_count()
279         if max_start > 4:
280             max_start /= 2
281         num_starting = num_vm_starting()
282         if vm[i]['cmd'] and len(test_queue) > num_starting and \
283            num_starting < max_start:
284             if i < status_line:
285                 scr.move(i, 10)
286                 scr.clrtoeol()
287                 scr.addstr(i, 10, "starting VM")
288             start_vm(vm[i], sel)
289             return True, True
290 
291     return running, False
292 
293 def vm_terminated(_vm, scr, sel, test_queue):
294     logger.debug("VM[%s] terminated" % _vm['idx'])
295     updated = False
296     for stream in [_vm['proc'].stdout, _vm['proc'].stderr]:
297         sel.unregister(stream)
298     _vm['proc'] = None
299     max_y, max_x = scr.getmaxyx()
300     status_line = num_servers
301     if status_line >= max_y:
302         status_line = max_y - 1
303     if _vm['idx'] < status_line:
304         scr.move(_vm['idx'], 10)
305         scr.clrtoeol()
306     log = '{}/{}.srv.{}/console'.format(dir, timestamp, _vm['idx'] + 1)
307     with open(log, 'r') as f:
308         if "Kernel panic" in f.read():
309             if _vm['idx'] < status_line:
310                 scr.addstr("kernel panic")
311             logger.info("VM[%d] kernel panic" % _vm['idx'])
312             updated = True
313     if test_queue:
314         num_vm = 0
315         for i in range(num_servers):
316             if vm[i]['proc']:
317                 num_vm += 1
318         if len(test_queue) > num_vm:
319             if _vm['idx'] < status_line:
320                 scr.addstr("unexpected exit")
321             logger.info("VM[%d] unexpected exit" % _vm['idx'])
322             updated = True
323 
324     if _vm['current_name']:
325         global total_failed, all_failed, first_run_failures, rerun_failures
326         name = _vm['current_name']
327         if _vm['idx'] < status_line:
328             if updated:
329                 scr.addstr(" - ")
330             scr.addstr(name + " - did not complete")
331         logger.info("VM[%d] did not complete test %s" % (_vm['idx'], name))
332         total_failed += 1
333         _vm['failed'].append(name)
334         all_failed.append(name)
335 
336         count = _vm['current_count']
337         if count == 0:
338             first_run_failures.append(name)
339         if rerun_failures and count < 1:
340             logger.debug("Requeue test case %s" % name)
341             test_queue.append((name, count + 1))
342         updated = True
343 
344     return updated
345 
346 def update_screen(scr, total_tests):
347     max_y, max_x = scr.getmaxyx()
348     status_line = num_servers
349     if status_line >= max_y:
350         status_line = max_y - 1
351     scr.move(status_line, 10)
352     scr.clrtoeol()
353     scr.addstr("{} %".format(int(100.0 * (total_passed + total_failed + total_skipped) / total_tests)))
354     scr.addstr(status_line, 20,
355                "TOTAL={} STARTED={} PASS={} FAIL={} SKIP={}".format(total_tests, total_started, total_passed, total_failed, total_skipped))
356     global all_failed
357     max_y, max_x = scr.getmaxyx()
358     max_lines = max_y - num_servers - 3
359     if len(all_failed) > 0 and max_lines > 0 and num_servers + 1 < max_y - 1:
360         scr.move(num_servers + 1, 0)
361         scr.addstr("Last failed test cases:")
362         if max_lines >= len(all_failed):
363             max_lines = len(all_failed)
364         count = 0
365         for i in range(len(all_failed) - max_lines, len(all_failed)):
366             count += 1
367             if num_servers + 2 + count >= max_y:
368                 break
369             scr.move(num_servers + 1 + count, 0)
370             scr.addstr(all_failed[i])
371             scr.clrtoeol()
372     scr.refresh()
373 
374 def has_uml_mconsole(_vm):
375     if not shutil.which('uml_mconsole'):
376         return False
377     dir = os.path.join(os.path.expanduser('~/.uml'), 'hwsim-' + _vm['DATE'])
378     return os.path.exists(dir)
379 
380 def kill_uml_vm(_vm):
381     fname = os.path.join(os.path.expanduser('~/.uml'), 'hwsim-' + _vm['DATE'],
382                          'pid')
383     with open(fname, 'r') as f:
384         pid = int(f.read())
385     logger.info("Kill hung VM[%d] PID[%d]" % (_vm['idx'] + 1, pid))
386     try:
387         subprocess.call(['uml_mconsole', 'hwsim-' + _vm['DATE'],
388                          'halt'], stdout=open('/dev/null', 'w'),
389                         timeout=5)
390     except subprocess.TimeoutExpired:
391         logger.info("uml_console timed out - kill UML process")
392         try:
393             os.kill(pid, signal.SIGTERM)
394         except Exception as e:
395             logger.info("os.kill() failed: " + str(e))
396 
397 def show_progress(scr):
398     global num_servers
399     global vm
400     global dir
401     global timestamp
402     global tests
403     global first_run_failures
404     global total_started, total_passed, total_failed, total_skipped
405     global rerun_failures
406 
407     sel = selectors.DefaultSelector()
408     total_tests = len(tests)
409     logger.info("Total tests: %d" % total_tests)
410     test_queue = [(t, 0) for t in tests]
411     start_vm(vm[0], sel)
412 
413     scr.leaveok(1)
414     max_y, max_x = scr.getmaxyx()
415     status_line = num_servers
416     if status_line > max_y:
417         status_line = max_y
418     for i in range(0, num_servers):
419         if i < status_line:
420             scr.addstr(i, 0, "VM %d:" % (i + 1), curses.A_BOLD)
421             status = "starting VM" if vm[i]['proc'] else "not yet started"
422             scr.addstr(i, 10, status)
423     scr.addstr(status_line, 0, "Total:", curses.A_BOLD)
424     scr.addstr(status_line, 20, "TOTAL={} STARTED=0 PASS=0 FAIL=0 SKIP=0".format(total_tests))
425     scr.refresh()
426 
427     while True:
428         updated = False
429         events = sel.select(timeout=1)
430         for key, mask in events:
431             _vm = key.data
432             if not _vm['proc']:
433                 continue
434             vm_read_stderr(_vm)
435             if vm_read_stdout(_vm, test_queue):
436                 vm_next_step(_vm, scr, test_queue)
437                 updated = True
438             vm_read_stderr(_vm)
439             if _vm['proc'].poll() is not None:
440                 vm_terminated(_vm, scr, sel, test_queue)
441                 updated = True
442 
443         running, run_update = check_vm_start(scr, sel, test_queue)
444         if updated or run_update:
445             update_screen(scr, total_tests)
446         if not running:
447             break
448 
449         status_line = num_servers
450         if status_line >= max_y:
451             status_line = max_y - 1
452         max_y, max_x = scr.getmaxyx()
453         updated = False
454 
455         now = time.time()
456         for i in range(num_servers):
457             _vm = vm[i]
458             if not _vm['proc']:
459                 continue
460             last = _vm['last_stdout']
461             if last and now - last > 10:
462                 if _vm['idx'] < status_line:
463                     scr.move(_vm['idx'], max_x - 25)
464                     scr.clrtoeol()
465                     scr.addstr("(no update in %d s)" % (now - last))
466                     updated = True
467             if (not _vm['killed']) and has_uml_mconsole(_vm) and \
468                last and now - last > 120:
469                 if _vm['idx'] < status_line:
470                     scr.move(_vm['idx'], 10)
471                     scr.clrtoeol()
472                     scr.addstr("terminating due to no updates received")
473                 _vm['killed'] = True
474                 kill_uml_vm(_vm)
475         if updated:
476             scr.refresh()
477 
478     sel.close()
479 
480     for i in range(num_servers):
481         if not vm[i]['proc']:
482             continue
483         vm[i]['proc'] = None
484         scr.move(i, 10)
485         scr.clrtoeol()
486         scr.addstr("still running")
487         logger.info("VM[%d] still running" % i)
488 
489     scr.refresh()
490     time.sleep(0.3)
491 
492 def known_output(tests, line):
493     if not line:
494         return True
495     if line in tests:
496         return True
497     known = ["START ", "PASS ", "FAIL ", "SKIP ", "REASON ", "ALL-PASSED",
498              "READY",
499              "  ", "Exception: ", "Traceback (most recent call last):",
500              "./run-all.sh: running",
501              "./run-all.sh: passing",
502              "Test run completed", "Logfiles are at", "Starting test run",
503              "passed all", "skipped ", "failed tests:"]
504     for k in known:
505         if line.startswith(k):
506             return True
507     return False
508 
509 def main():
510     import argparse
511     import os
512     global num_servers
513     global vm
514     global all_failed
515     global dir
516     global timestamp
517     global tests
518     global first_run_failures
519     global total_started, total_passed, total_failed, total_skipped
520     global rerun_failures
521 
522     total_started = 0
523     total_passed = 0
524     total_failed = 0
525     total_skipped = 0
526 
527     debug_level = logging.INFO
528     rerun_failures = True
529     start_time = time.time()
530     timestamp = int(start_time)
531 
532     scriptsdir = os.path.dirname(os.path.realpath(sys.argv[0]))
533 
534     p = argparse.ArgumentParser(description='run multiple testing VMs in parallel')
535     p.add_argument('num_servers', metavar='number of VMs', type=int, choices=range(1, 100),
536                    help="number of VMs to start")
537     p.add_argument('-f', dest='testmodules', metavar='<test module>',
538                    help='execute only tests from these test modules',
539                    type=str, nargs='+')
540     p.add_argument('-1', dest='no_retry', action='store_const', const=True, default=False,
541                    help="don't retry failed tests automatically")
542     p.add_argument('--debug', dest='debug', action='store_const', const=True, default=False,
543                    help="enable debug logging")
544     p.add_argument('--codecov', dest='codecov', action='store_const', const=True, default=False,
545                    help="enable code coverage collection")
546     p.add_argument('--shuffle-tests', dest='shuffle', action='store_const', const=True, default=False,
547                    help="shuffle test cases to randomize order")
548     p.add_argument('--short', dest='short', action='store_const', const=True,
549                    default=False,
550                    help="only run short-duration test cases")
551     p.add_argument('--long', dest='long', action='store_const', const=True,
552                    default=False,
553                    help="include long-duration test cases")
554     p.add_argument('--valgrind', dest='valgrind', action='store_const',
555                    const=True, default=False,
556                    help="run tests under valgrind")
557     p.add_argument('--telnet', dest='telnet', metavar='<baseport>', type=int,
558                    help="enable telnet server inside VMs, specify the base port here")
559     p.add_argument('--nocurses', dest='nocurses', action='store_const',
560                    const=True, default=False, help="Don't use curses for output")
561     p.add_argument('params', nargs='*')
562     args = p.parse_args()
563 
564     dir = os.environ.get('HWSIM_TEST_LOG_DIR', '/tmp/hwsim-test-logs')
565     try:
566         os.makedirs(dir)
567     except OSError as e:
568         if e.errno != errno.EEXIST:
569             raise
570 
571     num_servers = args.num_servers
572     rerun_failures = not args.no_retry
573     if args.debug:
574         debug_level = logging.DEBUG
575     extra_args = []
576     if args.valgrind:
577         extra_args += ['--valgrind']
578     if args.long:
579         extra_args += ['--long']
580     if args.codecov:
581         print("Code coverage - build separate binaries")
582         logdir = os.path.join(dir, str(timestamp))
583         os.makedirs(logdir)
584         subprocess.check_call([os.path.join(scriptsdir, 'build-codecov.sh'),
585                                logdir])
586         codecov_args = ['--codecov_dir', logdir]
587         codecov = True
588     else:
589         codecov_args = []
590         codecov = False
591 
592     first_run_failures = []
593     if args.params:
594         tests = args.params
595     else:
596         tests = []
597         cmd = [os.path.join(os.path.dirname(scriptsdir), 'run-tests.py'), '-L']
598         if args.testmodules:
599             cmd += ["-f"]
600             cmd += args.testmodules
601         lst = subprocess.Popen(cmd, stdout=subprocess.PIPE)
602         for l in lst.stdout.readlines():
603             name = l.decode().split(' ')[0]
604             tests.append(name)
605     if len(tests) == 0:
606         sys.exit("No test cases selected")
607 
608     if args.shuffle:
609         from random import shuffle
610         shuffle(tests)
611     elif num_servers > 2 and len(tests) > 100:
612         # Move test cases with long duration to the beginning as an
613         # optimization to avoid last part of the test execution running a long
614         # duration test case on a single VM while all other VMs have already
615         # completed their work.
616         for l in long_tests:
617             if l in tests:
618                 tests.remove(l)
619                 tests.insert(0, l)
620 
621         # Move test cases that have shown frequent, but random, issues UML
622         # to the beginning of the run to minimize risk of false failures.
623         for l in uml_issue_tests:
624             if l in tests:
625                 tests.remove(l)
626                 tests.insert(0, l)
627 
628         # Test cases that read full wpa_supplicant or hostapd process memory
629         # can apparently cause resources issues at least with newer python3
630         # versions, so run them first before possible memory fragmentation has
631         # made this need more resources.
632         for l in memory_read_tests:
633             if l in tests:
634                 tests.remove(l)
635                 tests.insert(0, l)
636     if args.short:
637         tests = [t for t in tests if t not in long_tests]
638 
639     logger.setLevel(debug_level)
640     if not args.nocurses:
641         log_handler = logging.FileHandler('parallel-vm.log')
642     else:
643         log_handler = logging.StreamHandler(sys.stdout)
644     log_handler.setLevel(debug_level)
645     fmt = "%(asctime)s %(levelname)s %(message)s"
646     log_formatter = logging.Formatter(fmt)
647     log_handler.setFormatter(log_formatter)
648     logger.addHandler(log_handler)
649 
650     all_failed = []
651     vm = {}
652     for i in range(0, num_servers):
653         cmd = [os.path.join(scriptsdir, 'vm-run.sh'),
654                '--timestamp', str(timestamp),
655                '--ext', 'srv.%d' % (i + 1),
656                '-i'] + codecov_args + extra_args
657         if args.telnet:
658             cmd += ['--telnet', str(args.telnet + i)]
659         vm[i] = {}
660         vm[i]['idx'] = i
661         vm[i]['DATE'] = str(timestamp) + '.srv.' + str(i + 1)
662         vm[i]['starting'] = False
663         vm[i]['started'] = False
664         vm[i]['cmd'] = cmd
665         vm[i]['proc'] = None
666         vm[i]['out'] = ""
667         vm[i]['pending'] = ""
668         vm[i]['err'] = ""
669         vm[i]['failed'] = []
670         vm[i]['fail_seq'] = []
671         vm[i]['skip_reason'] = []
672         vm[i]['current_name'] = None
673         vm[i]['last_stdout'] = None
674         vm[i]['killed'] = False
675     print('')
676 
677     if not args.nocurses:
678         curses.wrapper(show_progress)
679     else:
680         class FakeScreen:
681             def leaveok(self, n):
682                 pass
683             def refresh(self):
684                 pass
685             def addstr(self, *args, **kw):
686                 pass
687             def move(self, x, y):
688                 pass
689             def clrtoeol(self):
690                 pass
691             def getmaxyx(self):
692                 return (25, 80)
693 
694         show_progress(FakeScreen())
695 
696     with open('{}/{}-parallel.log'.format(dir, timestamp), 'w') as f:
697         for i in range(0, num_servers):
698             f.write('VM {}\n{}\n{}\n'.format(i + 1, vm[i]['out'], vm[i]['err']))
699         first = True
700         for i in range(0, num_servers):
701             for line in vm[i]['out'].splitlines():
702                 if line.startswith("FAIL "):
703                     if first:
704                         first = False
705                         print("Logs for failed test cases:")
706                         f.write("Logs for failed test cases:\n")
707                     fname = "%s/%d.srv.%d/%s.log" % (dir, timestamp, i + 1,
708                                                      line.split(' ')[1])
709                     print(fname)
710                     f.write("%s\n" % fname)
711 
712     failed = get_failed(vm)
713 
714     if first_run_failures:
715         print("To re-run same failure sequence(s):")
716         for i in range(0, num_servers):
717             if len(vm[i]['failed']) == 0:
718                 continue
719             print("./vm-run.sh", end=' ')
720             if args.long:
721                 print("--long", end=' ')
722             skip = len(vm[i]['fail_seq'])
723             skip -= min(skip, 30)
724             for t in vm[i]['fail_seq']:
725                 if skip > 0:
726                     skip -= 1
727                     continue
728                 print(t, end=' ')
729             logger.info("Failure sequence: " + " ".join(vm[i]['fail_seq']))
730             print('')
731         print("Failed test cases:")
732         for f in first_run_failures:
733             print(f, end=' ')
734             logger.info("Failed: " + f)
735         print('')
736     double_failed = []
737     for name in failed:
738         double_failed.append(name)
739     for test in first_run_failures:
740         double_failed.remove(test)
741     if not rerun_failures:
742         pass
743     elif failed and not double_failed:
744         print("All failed cases passed on retry")
745         logger.info("All failed cases passed on retry")
746     elif double_failed:
747         print("Failed even on retry:")
748         for f in double_failed:
749             print(f, end=' ')
750             logger.info("Failed on retry: " + f)
751         print('')
752     res = "TOTAL={} PASS={} FAIL={} SKIP={}".format(total_started,
753                                                     total_passed,
754                                                     total_failed,
755                                                     total_skipped)
756     print(res)
757     logger.info(res)
758     print("Logs: " + dir + '/' + str(timestamp))
759     logger.info("Logs: " + dir + '/' + str(timestamp))
760 
761     skip_reason = []
762     for i in range(num_servers):
763         if not vm[i]['started']:
764             continue
765         skip_reason += vm[i]['skip_reason']
766         if len(vm[i]['pending']) > 0:
767             logger.info("Unprocessed stdout from VM[%d]: '%s'" %
768                         (i, vm[i]['pending']))
769         log = '{}/{}.srv.{}/console'.format(dir, timestamp, i + 1)
770         with open(log, 'r') as f:
771             if "Kernel panic" in f.read():
772                 print("Kernel panic in " + log)
773                 logger.info("Kernel panic in " + log)
774     missing = {}
775     missing['OCV not supported'] = 'OCV'
776     missing['sigma_dut not available'] = 'sigma_dut'
777     missing['Skip test case with long duration due to --long not specified'] = 'long'
778     missing['TEST_ALLOC_FAIL not supported' ] = 'TEST_FAIL'
779     missing['TEST_ALLOC_FAIL not supported in the build'] = 'TEST_FAIL'
780     missing['TEST_FAIL not supported' ] = 'TEST_FAIL'
781     missing['veth not supported (kernel CONFIG_VETH)'] = 'KERNEL:CONFIG_VETH'
782     missing['WPA-EAP-SUITE-B-192 not supported'] = 'CONFIG_SUITEB192'
783     missing['WPA-EAP-SUITE-B not supported'] = 'CONFIG_SUITEB'
784     missing['wmediumd not available'] = 'wmediumd'
785     missing['DPP not supported'] = 'CONFIG_DPP'
786     missing['DPP version 2 not supported'] = 'CONFIG_DPP2'
787     missing['EAP method PWD not supported in the build'] = 'CONFIG_EAP_PWD'
788     missing['EAP method TEAP not supported in the build'] = 'CONFIG_EAP_TEAP'
789     missing['FILS not supported'] = 'CONFIG_FILS'
790     missing['FILS-SK-PFS not supported'] = 'CONFIG_FILS_SK_PFS'
791     missing['OWE not supported'] = 'CONFIG_OWE'
792     missing['SAE not supported'] = 'CONFIG_SAE'
793     missing['Not using OpenSSL'] = 'CONFIG_TLS=openssl'
794     missing['wpa_supplicant TLS library is not OpenSSL: internal'] = 'CONFIG_TLS=openssl'
795     missing_items = []
796     other_reasons = []
797     for reason in sorted(set(skip_reason)):
798         if reason in missing:
799             missing_items.append(missing[reason])
800         elif reason.startswith('OCSP-multi not supported with this TLS library'):
801             missing_items.append('OCSP-MULTI')
802         else:
803             other_reasons.append(reason)
804     if missing_items:
805         print("Missing items (SKIP):", missing_items)
806     if other_reasons:
807         print("Other skip reasons:", other_reasons)
808 
809     for i in range(num_servers):
810         unknown = ""
811         for line in vm[i]['out'].splitlines():
812             if not known_output(tests, line):
813                 unknown += line + "\n"
814         if unknown:
815             print("\nVM %d - unexpected stdout output:\n%s" % (i + 1, unknown))
816         if vm[i]['err']:
817             print("\nVM %d - unexpected stderr output:\n%s\n" % (i + 1, vm[i]['err']))
818 
819     if codecov:
820         print("Code coverage - preparing report")
821         for i in range(num_servers):
822             subprocess.check_call([os.path.join(scriptsdir,
823                                                 'process-codecov.sh'),
824                                    logdir + ".srv.%d" % (i + 1),
825                                    str(i)])
826         subprocess.check_call([os.path.join(scriptsdir, 'combine-codecov.sh'),
827                                logdir])
828         print("file://%s/index.html" % logdir)
829         logger.info("Code coverage report: file://%s/index.html" % logdir)
830 
831     end_time = time.time()
832     try:
833         cmd = subprocess.Popen(['git', 'describe'], stdout=subprocess.PIPE)
834         ver = cmd.stdout.read().decode().strip()
835     except:
836         ver = "unknown"
837         pass
838     logger.info("Tests run: {}  Tests failed: {}  Total time: {}  Version: {}".format(total_started, total_failed, end_time - start_time, ver))
839 
840     if double_failed or (failed and not rerun_failures):
841         logger.info("Test run complete - failures found")
842         sys.exit(2)
843     if failed:
844         logger.info("Test run complete - failures found on first run; passed on retry")
845         sys.exit(1)
846     logger.info("Test run complete - no failures")
847     sys.exit(0)
848 
849 if __name__ == "__main__":
850     main()
851