diff --git a/SourceCode/AlgorithmAnalyser.py b/SourceCode/AlgorithmAnalyser.py index 3154240..925db62 100644 --- a/SourceCode/AlgorithmAnalyser.py +++ b/SourceCode/AlgorithmAnalyser.py @@ -1,13 +1,14 @@ # directly running the DOE because existing surrogates can be explored with another workflow from os import path import importlib.util -import multiprocessing +import multiprocessing +import pathos.multiprocessing as mp import time import re from numpy import random as r from numpy import * import statistics - +from functools import partial import shutil # Surrogate modelling @@ -31,7 +32,41 @@ class counter: self.count += 1 return self.func(*args, **kwargs) +def simulate(algName, algPath, funcname, funcpath, objs, args, initpoint): + # loading the heuristic object into the namespace and memory + spec = importlib.util.spec_from_file_location(algName, algPath) + heuristic = importlib.util.module_from_spec(spec) + spec.loader.exec_module(heuristic) + # loading the test function object into the namespace and memory + testspec = importlib.util.spec_from_file_location(funcname, funcpath) + func = importlib.util.module_from_spec(testspec) + testspec.loader.exec_module(func) + + # defining a countable test function + @counter + def testfunc(args): + return func.main(args) + + # using a try statement to handle potential exceptions raised by child processes like the algorithm or test functions or the pooling algorithm + try: + #This timer calculates directly the CPU time of the process (Nanoseconds) + tic = time.process_time_ns() + # running the test by calling the heuritic script with the test function as argument + quality = heuristic.main(testfunc, objs, initpoint, args) + toc = time.process_time_ns() + # ^^ The timer ends right above this; the CPU time is then calculated below by simple difference ^^ + + # CPU time in seconds + cpuTime = (toc - tic)*(10**-9) + numCalls = testfunc.count + converged = 1 + except: + quality = NaN + cpuTime = NaN + numCalls = testfunc.count + converged = 0 + return cpuTime, quality, numCalls, converged def measure(heuristicpath, heuristic_name, funcpath, funcname, objs, args, scale, connection): ''' @@ -41,56 +76,26 @@ def measure(heuristicpath, heuristic_name, funcpath, funcname, objs, args, scale # Seeding the random module for generating the initial point of the optimizer: Utilising random starting point for experimental validity r.seed(int(time.time())) - # loading the heuristic object into the namespace and memory - spec = importlib.util.spec_from_file_location(heuristic_name, heuristicpath) - heuristic = importlib.util.module_from_spec(spec) - spec.loader.exec_module(heuristic) - testspec = importlib.util.spec_from_file_location(funcname, funcpath) - func = importlib.util.module_from_spec(testspec) - testspec.loader.exec_module(func) + # Defining random initial points to start testing the algorithms + initpoints = [[r.random() * scale, r.random() * scale] for run in range(3)] #update the inner as [r.random() * scale for i in range(testfuncDimmensions)] + # building the iterable arguments + partfunc = partial(simulate, heuristic_name, heuristicpath, funcname, funcpath, objs, args) - responses = array([0,0]) - convergence = empty(0) - best = empty(0) - - with pool(processes = 10) as pool: + with multiprocessing.Pool(processes = 3) as pool: + # running the simulations + newRun = pool.map(partfunc,initpoints) - for run in range(30): - # defining a countable test function - @counter - def testfunc(args): - return func.main(args) - # Defining a random initial point to start testing the algorithms - initpoint = [r.random() * scale, r.random() * scale] - - try: - #This timer calculates directly the CPU time of the process (Nanoseconds) - tic = time.process_time_ns() - # running the test by calling the heuritic script with the test function as argument - best = append(best, heuristic.main(testfunc, objs, initpoint, args)) - toc = time.process_time_ns() - # ^^ The timer ends right above this; the CPU time is then calculated below by simple difference ^^ - # CPU time in seconds - cpuTime = (toc - tic)*(10**-9) - numCalls = testfunc.count - converged = 1 - - except: - best = NaN - cpuTime = NaN - numCalls = testfunc.count - converged = 0 - - # Building the response - responses = vstack([responses, array([cpuTime,numCalls])]) - convergence = append(convergence,[converged]) - - responses = delete(responses,[0],axis=0) + cpuTime = [resl[0] for resl in newRun] + quality = [resl[1] for resl in newRun] + numCalls = [resl[2] for resl in newRun] + converged = [resl[3] for resl in newRun] + results = dict() - results['stdevs'] = array([statistics.stdev(responses[:,[0]].flatten()), statistics.stdev(responses[:,[1]].flatten())]) - results['means'] = array([statistics.mean(responses[:,[0]].flatten()), statistics.mean(responses[:,[1]].flatten())]) - results['convrate'] = statistics.mean(convergence) + results['cpuTime'] = array([statistics.mean(cpuTime), statistics.stdev(cpuTime)]) + results['quality'] = array([statistics.mean(quality), statistics.stdev(quality)]) + results['numCalls'] = array([statistics.mean(numCalls), statistics.stdev(numCalls)]) + results['convRate'] = array([statistics.mean(converged), statistics.stdev(converged)]) connection.send(results) @@ -220,7 +225,6 @@ def representfunc(funcpath): ax3.imshow(beta) plt.show() - print("should be plotted") # Writing the calculated representation into the test function file # results['Represented'] = True @@ -269,7 +273,7 @@ def doe(heuristicpath, heuristic_name, testfunctionpaths, funcnames, objs, args, connections[run][1].close() # display output - print("\n\n||||| Responses: [cpuTime,numCalls] |||||") + print("\n\n||||| Responses: [mean,stdDev] |||||") for process in proc: print(process.name + "____\n" + str(responses[process.name]) + "\n_________________") if __name__ == '__main__': diff --git a/SourceCode/SampleAlgorithms/SimmulatedAnnealing.py b/SourceCode/SampleAlgorithms/SimmulatedAnnealing.py index 8628680..90cf488 100644 --- a/SourceCode/SampleAlgorithms/SimmulatedAnnealing.py +++ b/SourceCode/SampleAlgorithms/SimmulatedAnnealing.py @@ -77,7 +77,7 @@ def main(func, obj, S, args): #print('the Best Quality obtained was:{}'.format(Quality(Best,y))) print("Final Quality is: {}".format(Quality(Best,y,func))) print("final Temperature is: {}".format(t)) - return Best + return Quality(Best,y,func) diff --git a/SourceCode/SampleAlgorithms/__pycache__/SimmulatedAnnealing.cpython-38.pyc b/SourceCode/SampleAlgorithms/__pycache__/SimmulatedAnnealing.cpython-38.pyc index 3306afe..adf81c6 100644 Binary files a/SourceCode/SampleAlgorithms/__pycache__/SimmulatedAnnealing.cpython-38.pyc and b/SourceCode/SampleAlgorithms/__pycache__/SimmulatedAnnealing.cpython-38.pyc differ