multiprocessing with pickled sampling / test

This commit is contained in:
2021-05-15 21:15:47 -04:00
parent d0451b3f36
commit 466d7ec7d6
3 changed files with 55 additions and 51 deletions

View File

@ -2,12 +2,13 @@
from os import path
import importlib.util
import multiprocessing
import pathos.multiprocessing as mp
import time
import re
from numpy import random as r
from numpy import *
import statistics
from functools import partial
import shutil
# Surrogate modelling
@ -31,7 +32,41 @@ class counter:
self.count += 1
return self.func(*args, **kwargs)
def simulate(algName, algPath, funcname, funcpath, objs, args, initpoint):
# loading the heuristic object into the namespace and memory
spec = importlib.util.spec_from_file_location(algName, algPath)
heuristic = importlib.util.module_from_spec(spec)
spec.loader.exec_module(heuristic)
# loading the test function object into the namespace and memory
testspec = importlib.util.spec_from_file_location(funcname, funcpath)
func = importlib.util.module_from_spec(testspec)
testspec.loader.exec_module(func)
# defining a countable test function
@counter
def testfunc(args):
return func.main(args)
# using a try statement to handle potential exceptions raised by child processes like the algorithm or test functions or the pooling algorithm
try:
#This timer calculates directly the CPU time of the process (Nanoseconds)
tic = time.process_time_ns()
# running the test by calling the heuritic script with the test function as argument
quality = heuristic.main(testfunc, objs, initpoint, args)
toc = time.process_time_ns()
# ^^ The timer ends right above this; the CPU time is then calculated below by simple difference ^^
# CPU time in seconds
cpuTime = (toc - tic)*(10**-9)
numCalls = testfunc.count
converged = 1
except:
quality = NaN
cpuTime = NaN
numCalls = testfunc.count
converged = 0
return cpuTime, quality, numCalls, converged
def measure(heuristicpath, heuristic_name, funcpath, funcname, objs, args, scale, connection):
'''
@ -41,56 +76,26 @@ def measure(heuristicpath, heuristic_name, funcpath, funcname, objs, args, scale
# Seeding the random module for generating the initial point of the optimizer: Utilising random starting point for experimental validity
r.seed(int(time.time()))
# loading the heuristic object into the namespace and memory
spec = importlib.util.spec_from_file_location(heuristic_name, heuristicpath)
heuristic = importlib.util.module_from_spec(spec)
spec.loader.exec_module(heuristic)
testspec = importlib.util.spec_from_file_location(funcname, funcpath)
func = importlib.util.module_from_spec(testspec)
testspec.loader.exec_module(func)
# Defining random initial points to start testing the algorithms
initpoints = [[r.random() * scale, r.random() * scale] for run in range(3)] #update the inner as [r.random() * scale for i in range(testfuncDimmensions)]
# building the iterable arguments
partfunc = partial(simulate, heuristic_name, heuristicpath, funcname, funcpath, objs, args)
responses = array([0,0])
convergence = empty(0)
best = empty(0)
with multiprocessing.Pool(processes = 3) as pool:
# running the simulations
newRun = pool.map(partfunc,initpoints)
with pool(processes = 10) as pool:
cpuTime = [resl[0] for resl in newRun]
quality = [resl[1] for resl in newRun]
numCalls = [resl[2] for resl in newRun]
converged = [resl[3] for resl in newRun]
for run in range(30):
# defining a countable test function
@counter
def testfunc(args):
return func.main(args)
# Defining a random initial point to start testing the algorithms
initpoint = [r.random() * scale, r.random() * scale]
try:
#This timer calculates directly the CPU time of the process (Nanoseconds)
tic = time.process_time_ns()
# running the test by calling the heuritic script with the test function as argument
best = append(best, heuristic.main(testfunc, objs, initpoint, args))
toc = time.process_time_ns()
# ^^ The timer ends right above this; the CPU time is then calculated below by simple difference ^^
# CPU time in seconds
cpuTime = (toc - tic)*(10**-9)
numCalls = testfunc.count
converged = 1
except:
best = NaN
cpuTime = NaN
numCalls = testfunc.count
converged = 0
# Building the response
responses = vstack([responses, array([cpuTime,numCalls])])
convergence = append(convergence,[converged])
responses = delete(responses,[0],axis=0)
results = dict()
results['stdevs'] = array([statistics.stdev(responses[:,[0]].flatten()), statistics.stdev(responses[:,[1]].flatten())])
results['means'] = array([statistics.mean(responses[:,[0]].flatten()), statistics.mean(responses[:,[1]].flatten())])
results['convrate'] = statistics.mean(convergence)
results['cpuTime'] = array([statistics.mean(cpuTime), statistics.stdev(cpuTime)])
results['quality'] = array([statistics.mean(quality), statistics.stdev(quality)])
results['numCalls'] = array([statistics.mean(numCalls), statistics.stdev(numCalls)])
results['convRate'] = array([statistics.mean(converged), statistics.stdev(converged)])
connection.send(results)
@ -220,7 +225,6 @@ def representfunc(funcpath):
ax3.imshow(beta)
plt.show()
print("should be plotted")
# Writing the calculated representation into the test function file
# results['Represented'] = True
@ -269,7 +273,7 @@ def doe(heuristicpath, heuristic_name, testfunctionpaths, funcnames, objs, args,
connections[run][1].close()
# display output
print("\n\n||||| Responses: [cpuTime,numCalls] |||||")
print("\n\n||||| Responses: [mean,stdDev] |||||")
for process in proc: print(process.name + "____\n" + str(responses[process.name]) + "\n_________________")
if __name__ == '__main__':

View File

@ -77,7 +77,7 @@ def main(func, obj, S, args):
#print('the Best Quality obtained was:{}'.format(Quality(Best,y)))
print("Final Quality is: {}".format(Quality(Best,y,func)))
print("final Temperature is: {}".format(t))
return Best
return Quality(Best,y,func)