multiprocessing with pickled sampling / test

This commit is contained in:
2021-05-15 21:15:47 -04:00
parent d0451b3f36
commit 466d7ec7d6
3 changed files with 55 additions and 51 deletions

View File

@ -1,13 +1,14 @@
# directly running the DOE because existing surrogates can be explored with another workflow # directly running the DOE because existing surrogates can be explored with another workflow
from os import path from os import path
import importlib.util import importlib.util
import multiprocessing import multiprocessing
import pathos.multiprocessing as mp
import time import time
import re import re
from numpy import random as r from numpy import random as r
from numpy import * from numpy import *
import statistics import statistics
from functools import partial
import shutil import shutil
# Surrogate modelling # Surrogate modelling
@ -31,7 +32,41 @@ class counter:
self.count += 1 self.count += 1
return self.func(*args, **kwargs) return self.func(*args, **kwargs)
def simulate(algName, algPath, funcname, funcpath, objs, args, initpoint):
# loading the heuristic object into the namespace and memory
spec = importlib.util.spec_from_file_location(algName, algPath)
heuristic = importlib.util.module_from_spec(spec)
spec.loader.exec_module(heuristic)
# loading the test function object into the namespace and memory
testspec = importlib.util.spec_from_file_location(funcname, funcpath)
func = importlib.util.module_from_spec(testspec)
testspec.loader.exec_module(func)
# defining a countable test function
@counter
def testfunc(args):
return func.main(args)
# using a try statement to handle potential exceptions raised by child processes like the algorithm or test functions or the pooling algorithm
try:
#This timer calculates directly the CPU time of the process (Nanoseconds)
tic = time.process_time_ns()
# running the test by calling the heuritic script with the test function as argument
quality = heuristic.main(testfunc, objs, initpoint, args)
toc = time.process_time_ns()
# ^^ The timer ends right above this; the CPU time is then calculated below by simple difference ^^
# CPU time in seconds
cpuTime = (toc - tic)*(10**-9)
numCalls = testfunc.count
converged = 1
except:
quality = NaN
cpuTime = NaN
numCalls = testfunc.count
converged = 0
return cpuTime, quality, numCalls, converged
def measure(heuristicpath, heuristic_name, funcpath, funcname, objs, args, scale, connection): def measure(heuristicpath, heuristic_name, funcpath, funcname, objs, args, scale, connection):
''' '''
@ -41,56 +76,26 @@ def measure(heuristicpath, heuristic_name, funcpath, funcname, objs, args, scale
# Seeding the random module for generating the initial point of the optimizer: Utilising random starting point for experimental validity # Seeding the random module for generating the initial point of the optimizer: Utilising random starting point for experimental validity
r.seed(int(time.time())) r.seed(int(time.time()))
# loading the heuristic object into the namespace and memory
spec = importlib.util.spec_from_file_location(heuristic_name, heuristicpath)
heuristic = importlib.util.module_from_spec(spec)
spec.loader.exec_module(heuristic)
testspec = importlib.util.spec_from_file_location(funcname, funcpath) # Defining random initial points to start testing the algorithms
func = importlib.util.module_from_spec(testspec) initpoints = [[r.random() * scale, r.random() * scale] for run in range(3)] #update the inner as [r.random() * scale for i in range(testfuncDimmensions)]
testspec.loader.exec_module(func) # building the iterable arguments
partfunc = partial(simulate, heuristic_name, heuristicpath, funcname, funcpath, objs, args)
responses = array([0,0]) with multiprocessing.Pool(processes = 3) as pool:
convergence = empty(0) # running the simulations
best = empty(0) newRun = pool.map(partfunc,initpoints)
with pool(processes = 10) as pool:
for run in range(30): cpuTime = [resl[0] for resl in newRun]
# defining a countable test function quality = [resl[1] for resl in newRun]
@counter numCalls = [resl[2] for resl in newRun]
def testfunc(args): converged = [resl[3] for resl in newRun]
return func.main(args)
# Defining a random initial point to start testing the algorithms
initpoint = [r.random() * scale, r.random() * scale]
try:
#This timer calculates directly the CPU time of the process (Nanoseconds)
tic = time.process_time_ns()
# running the test by calling the heuritic script with the test function as argument
best = append(best, heuristic.main(testfunc, objs, initpoint, args))
toc = time.process_time_ns()
# ^^ The timer ends right above this; the CPU time is then calculated below by simple difference ^^
# CPU time in seconds
cpuTime = (toc - tic)*(10**-9)
numCalls = testfunc.count
converged = 1
except:
best = NaN
cpuTime = NaN
numCalls = testfunc.count
converged = 0
# Building the response
responses = vstack([responses, array([cpuTime,numCalls])])
convergence = append(convergence,[converged])
responses = delete(responses,[0],axis=0)
results = dict() results = dict()
results['stdevs'] = array([statistics.stdev(responses[:,[0]].flatten()), statistics.stdev(responses[:,[1]].flatten())]) results['cpuTime'] = array([statistics.mean(cpuTime), statistics.stdev(cpuTime)])
results['means'] = array([statistics.mean(responses[:,[0]].flatten()), statistics.mean(responses[:,[1]].flatten())]) results['quality'] = array([statistics.mean(quality), statistics.stdev(quality)])
results['convrate'] = statistics.mean(convergence) results['numCalls'] = array([statistics.mean(numCalls), statistics.stdev(numCalls)])
results['convRate'] = array([statistics.mean(converged), statistics.stdev(converged)])
connection.send(results) connection.send(results)
@ -220,7 +225,6 @@ def representfunc(funcpath):
ax3.imshow(beta) ax3.imshow(beta)
plt.show() plt.show()
print("should be plotted")
# Writing the calculated representation into the test function file # Writing the calculated representation into the test function file
# results['Represented'] = True # results['Represented'] = True
@ -269,7 +273,7 @@ def doe(heuristicpath, heuristic_name, testfunctionpaths, funcnames, objs, args,
connections[run][1].close() connections[run][1].close()
# display output # display output
print("\n\n||||| Responses: [cpuTime,numCalls] |||||") print("\n\n||||| Responses: [mean,stdDev] |||||")
for process in proc: print(process.name + "____\n" + str(responses[process.name]) + "\n_________________") for process in proc: print(process.name + "____\n" + str(responses[process.name]) + "\n_________________")
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -77,7 +77,7 @@ def main(func, obj, S, args):
#print('the Best Quality obtained was:{}'.format(Quality(Best,y))) #print('the Best Quality obtained was:{}'.format(Quality(Best,y)))
print("Final Quality is: {}".format(Quality(Best,y,func))) print("Final Quality is: {}".format(Quality(Best,y,func)))
print("final Temperature is: {}".format(t)) print("final Temperature is: {}".format(t))
return Best return Quality(Best,y,func)