1+ import numpy as np
12import pytest
2-
3- from mpi4py import MPI
4-
53from _pytest ._code .code import (
64 ExceptionChainRepr ,
75 ReprTraceback ,
8- ReprEntry ,
96 ReprEntryNative ,
107 ReprFileLocation ,
118)
9+ from mpi4py import MPI
1210
11+ from .algo import partition , lower_bound
1312from .utils import (
1413 number_of_working_processes ,
1514 get_n_proc_for_test ,
1615 mark_skip ,
1716 add_n_procs ,
1817 is_dyn_master_process ,
1918)
20- from .algo import partition , lower_bound
21- import operator
22- import numpy as np
2319
2420
2521def gather_report (mpi_reports , n_sub_rank ):
@@ -133,8 +129,8 @@ def pytest_collection_modifyitems(self, config, items):
133129 @pytest .hookimpl (hookwrapper = True , tryfirst = True )
134130 def pytest_runtestloop (self , session ) -> bool :
135131 outcome = yield
136-
137- # prevent return value being non-zero (ExitCode.NO_TESTS_COLLECTED) when no test run on non-master
132+ # prevent return value being non-zero (ExitCode.NO_TESTS_COLLECTED)
133+ # when no test run on non-master
138134 if self .global_comm .Get_rank () != 0 and session .testscollected == 0 :
139135 session .testscollected = 1
140136 return True
@@ -156,25 +152,25 @@ def pytest_runtest_logreport(self, report):
156152
157153def group_items_by_parallel_steps (items , n_workers ):
158154 add_n_procs (items )
159- items .sort (key = lambda item : item .n_procs , reverse = True )
155+ items .sort (key = lambda item : item .n_proc , reverse = True )
160156
161157 remaining_n_procs_by_step = []
162158 items_by_step = []
163159 items_to_skip = []
164160 for item in items :
165- if item .n_procs > n_workers :
161+ if item .n_proc > n_workers :
166162 items_to_skip += [item ]
167163 else :
168164 found_step = False
169165 for idx , remaining_procs in enumerate (remaining_n_procs_by_step ):
170- if item .n_procs <= remaining_procs :
166+ if item .n_proc <= remaining_procs :
171167 items_by_step [idx ] += [item ]
172- remaining_n_procs_by_step [idx ] -= item .n_procs
168+ remaining_n_procs_by_step [idx ] -= item .n_proc
173169 found_step = True
174170 break
175171 if not found_step :
176172 items_by_step += [[item ]]
177- remaining_n_procs_by_step += [n_workers - item .n_procs ]
173+ remaining_n_procs_by_step += [n_workers - item .n_proc ]
178174
179175 return items_by_step , items_to_skip
180176
@@ -332,24 +328,25 @@ def sub_comm_from_ranks(global_comm, sub_ranks):
332328
333329
334330def item_with_biggest_admissible_n_proc (items , n_av_procs ):
335- key = lambda item : item .n_procs
331+ def _key (item ):
332+ return item .n_proc
333+
336334 # Preconditions:
337335 # sorted(items, key)
338336 # len(items)>0
339337
340338 # best choices: tests requiring the most procs while still 'runnable'
341339 # among those, we favor the first in the array for 'stability' reasons (no reordering when not needed)
342- idx = lower_bound (items , n_av_procs , key )
343- if idx == 0 and items [idx ].n_procs > n_av_procs : # all items ask too much
340+ idx = lower_bound (items , n_av_procs , _key )
341+ if idx == 0 and items [idx ].n_proc > n_av_procs : # all items ask too much
344342 return - 1
345- elif idx < len (items ) and items [idx ].n_procs == n_av_procs :
343+ if idx < len (items ) and items [idx ].n_proc == n_av_procs :
346344 # we find the first internal item with matching n_proc
347345 return idx
348- else :
349- # we did not find an item with exactly the matching n_proc,
350- # in this case, the item just before gives the new n_proc we are searching for
351- max_needed_n_proc = items [idx - 1 ].n_procs
352- return lower_bound (items , max_needed_n_proc , key )
346+ # we did not find an item with exactly the matching n_proc,
347+ # in this case, the item just before gives the new n_proc we are searching for
348+ max_needed_n_proc = items [idx - 1 ].n_proc
349+ return lower_bound (items , max_needed_n_proc , _key )
353350
354351
355352def mark_original_index (items ):
@@ -372,7 +369,7 @@ def mark_original_index(items):
372369
373370####### Server #######
374371def schedule_test (item , available_procs , inter_comm ):
375- n_procs = item .n_procs
372+ n_procs = item .n_proc
376373 original_idx = item .original_index
377374
378375 sub_ranks = []
@@ -411,7 +408,7 @@ def wait_test_to_complete(items_to_run, session, available_procs, inter_comm):
411408
412409 # get associated item
413410 item = items_to_run [original_idx ]
414- n_proc = item .n_procs
411+ n_proc = item .n_proc
415412 sub_ranks = item .sub_ranks
416413 assert first_rank_done in sub_ranks
417414
@@ -479,7 +476,7 @@ def pytest_pyfunc_call(self, pyfuncitem):
479476
480477 @pytest .hookimpl (tryfirst = True )
481478 def pytest_runtestloop (self , session ) -> bool :
482- # same begining as PyTest default's
479+ # same beginning as PyTest default's
483480 if (
484481 session .testsfailed
485482 and not session .config .option .continue_on_collection_errors
@@ -499,7 +496,7 @@ def pytest_runtestloop(self, session) -> bool:
499496 add_n_procs (session .items )
500497
501498 ## isolate skips
502- has_enough_procs = lambda item : item .n_procs <= n_workers
499+ has_enough_procs = lambda item : item .n_proc <= n_workers
503500 items_to_run , items_to_skip = partition (session .items , has_enough_procs )
504501
505502 ## remember original position
@@ -517,7 +514,7 @@ def pytest_runtestloop(self, session) -> bool:
517514 run_item_test (item , nextitem , session )
518515
519516 # schedule tests to run
520- items_left_to_run = sorted (items_to_run , key = lambda item : item .n_procs )
517+ items_left_to_run = sorted (items_to_run , key = lambda item : item .n_proc )
521518 available_procs = np .ones (n_workers , dtype = np .int8 )
522519
523520 while len (items_left_to_run ) > 0 :
@@ -576,11 +573,7 @@ def pytest_runtest_logreport(self, report):
576573 report
577574 ) # has been 'run' locally: do nothing special
578575 else :
579- assert (
580- report .when == "setup"
581- or report .when == "call"
582- or report .when == "teardown"
583- ) # only know tags
576+ assert report .when in ("setup" , "call" , "teardown" ) # only known tags
584577 tag = WHEN_TAGS [report .when ]
585578
586579 # master ranks of each sub_comm must send their report to rank 0
0 commit comments