@ -1 +0,0 @@ | |||
@ -1,121 +0,0 @@ | |||
#!/usr/bin/env python3 | |||
import argparse | |||
import os | |||
import glob | |||
import json | |||
import numpy as np | |||
import matplotlib.pyplot as plt | |||
import configparser | |||
from util import script as scriptUtils | |||
def main(): | |||
args = __parseArguments() | |||
__stats(args["comparisonDir"], args["outputDir"]) | |||
def __parseArguments(): | |||
parser = scriptUtils.ArgParser() | |||
parser.addInstanceDirArg() | |||
parser.addArg(alias="comparisonDir", shortFlag="c", longFlag="comparison_dir", | |||
help="the direcotry with all comparison files", type=str) | |||
parser.addArg(alias="outputDir", shortFlag="s", longFlag="comparison_stats_dir", | |||
help="Directory to store the stats", type=str) | |||
arguments = parser.parse() | |||
arguments["datasetDir"] = os.path.abspath(arguments["datasetDir"]) | |||
arguments["comparisonDir"] = os.path.join(arguments["datasetDir"], | |||
arguments["comparisonDir"]) | |||
arguments["outputDir"] = os.path.join(arguments["datasetDir"], | |||
arguments["outputDir"]) | |||
return arguments | |||
def __stats(comparisonDir, outputDir): | |||
runs = glob.glob(os.path.join(comparisonDir, "run*")) | |||
for run in runs: | |||
stats = __collectStats(run) | |||
runOutputDir = os.path.join(outputDir, os.path.basename(run)) | |||
__writeStats(stats, runOutputDir) | |||
def __collectStats(comparisonDir): | |||
files = glob.glob(os.path.join(comparisonDir, "*.cmp")) | |||
stats = {} | |||
stats["match"] = 0 | |||
stats["false_positive"] = 0 | |||
stats["false_negative"] = 0 | |||
stats["unsat"] = 0 | |||
for path in files: | |||
comparison = __readComparison(path) | |||
minisat_satisfiable = comparison["minisat_satisfiable"] | |||
qubo_satisfiable = comparison["qubo_satisfiable"] | |||
if minisat_satisfiable == qubo_satisfiable: | |||
stats["match"] += 1 | |||
elif minisat_satisfiable == False and qubo_satisfiable == True: | |||
stats["false_positive"] += 1 | |||
elif minisat_satisfiable == True and qubo_satisfiable == False: | |||
stats["false_negative"] += 1 | |||
if not minisat_satisfiable: | |||
stats["unsat"] += 1 | |||
return stats | |||
def __readComparison(path): | |||
cmpFile = open(path, "r") | |||
comparison = json.load(cmpFile) | |||
cmpFile.close() | |||
return comparison | |||
def __writeStats(stats, outputDir): | |||
if not os.path.exists(outputDir): | |||
os.makedirs(outputDir) | |||
fig = plt.figure() | |||
ax = fig.add_subplot(111) | |||
numInstances = stats["match"] + stats["false_negative"] + stats["false_positive"] | |||
matchBar = ax.bar(x=0, height=stats["match"]) | |||
falsePositiveBar = ax.bar(x=1, height=stats["false_positive"]) | |||
falseNegativeBar = ax.bar(x=1, | |||
height=stats["false_negative"], | |||
bottom=stats["false_positive"]) | |||
ax.axhline(y=stats["match"], linestyle="--", color="gray") | |||
ax.axhline(y=stats["false_negative"], linestyle="--", color="gray") | |||
plt.ylabel("SAT Instanzen") | |||
plt.title("Verlgeich Minisat / WMIS qubo mit qbsolv") | |||
plt.xticks([0, 1], ("Gleiches Ergebnis", "Unterschiedliches Ergebnis")) | |||
plt.yticks([0, stats["match"], stats["false_negative"], numInstances]) | |||
plt.legend((matchBar, falsePositiveBar, falseNegativeBar), | |||
("Gleiches Ergebnis", | |||
"False Positive", | |||
"False Negative")) | |||
plt.savefig(os.path.join(outputDir, "stats.png")) | |||
if __name__ == "__main__": | |||
main() |
@ -1,40 +0,0 @@ | |||
#!/usr/bin/env python3 | |||
from util import script as scriptUtils | |||
from util import compare | |||
import glob | |||
import os | |||
def __main(): | |||
args = __parseArguments() | |||
__compareRuns(args) | |||
def __parseArguments(): | |||
parser = scriptUtils.ArgParser() | |||
parser.addInstanceDirArg() | |||
return parser.parse() | |||
def __compareRuns(args): | |||
instancePaths = glob.glob(os.path.join( | |||
os.path.join(args["dataset_dir"], | |||
args["instance_dir"]), | |||
"*.dimacs")) | |||
runDirs = glob.glob(os.path.join( | |||
os.path.join(args["dataset_dir"], | |||
args["wmis_result_dir"]), | |||
"run*")) | |||
for path in instancePaths: | |||
__compareRunsOfInstance(path, runDirs) | |||
def __compareRunsOfInstance(instancePath, runDirs): | |||
instanceName = os.path.basename(instancePath) | |||
with open(instancePath) as instanceFile: | |||
if __name__ == "__main__": | |||
__main() |
@ -1,141 +0,0 @@ | |||
#!/usr/bin/env python3 | |||
import argparse | |||
import os | |||
import glob | |||
import json | |||
import numpy as np | |||
import matplotlib.pyplot as plt | |||
import configparser | |||
import util.script as scriptUtils | |||
def main(): | |||
args = __parseArguments() | |||
__stats(args["comparisonDir"], args["outputDir"]) | |||
def __parseArguments(): | |||
parser = scriptUtils.ArgParser() | |||
parser.addInstanceDirArg() | |||
parser.addArg(alias="comparisonDir", shortFlag="c", longFlag="comparison_dir", | |||
help="the direcotry with all comparison files", type=str) | |||
parser.addArg(alias="outputDir", shortFlag="s", longFlag="comparison_stats_dir", | |||
help="Directory to store the stats", type=str) | |||
arguments = parser.parse() | |||
arguments["datasetDir"] = os.path.abspath(arguments["datasetDir"]) | |||
arguments["comparisonDir"] = os.path.join(arguments["datasetDir"], | |||
arguments["comparisonDir"]) | |||
arguments["outputDir"] = os.path.join(arguments["datasetDir"], | |||
arguments["outputDir"]) | |||
return arguments | |||
def __stats(comparisonDir, outputDir): | |||
runs = glob.glob(os.path.join(comparisonDir, "run*")) | |||
for run in runs: | |||
stats = __collectStats(run) | |||
print(stats) | |||
runOutputDir = os.path.join(outputDir, os.path.basename(run)) | |||
__writeStats(stats, runOutputDir) | |||
def __collectStats(comparisonDir): | |||
files = glob.glob(os.path.join(comparisonDir, "*.cmp")) | |||
stats = {} | |||
stats["match"] = {"count": 0, | |||
"instances": []} | |||
stats["false_positive"] = {"count": 0, | |||
"instances": []} | |||
stats["false_negative"] = {"count": 0, | |||
"instances": []} | |||
stats["unsat"] = {"count": 0, | |||
"instances": []} | |||
for path in files: | |||
comparison = __readComparison(path) | |||
minisat_satisfiable = comparison["minisat_satisfiable"] | |||
qubo_satisfiable = comparison["qubo_satisfiable"] | |||
instanceName = str(os.path.basename(path)).split(".")[0] | |||
if minisat_satisfiable == qubo_satisfiable: | |||
stats["match"]["count"] += 1 | |||
stats["match"]["instances"].append(instanceName) | |||
elif minisat_satisfiable == False and qubo_satisfiable == True: | |||
stats["false_positive"]["count"] += 1 | |||
stats["false_positive"]["instances"].append(instanceName) | |||
elif minisat_satisfiable == True and qubo_satisfiable == False: | |||
stats["false_negative"]["count"] += 1 | |||
stats["false_negative"]["instances"].append(instanceName) | |||
if not minisat_satisfiable: | |||
stats["unsat"]["count"] += 1 | |||
stats["unsat"]["instances"].append(instanceName) | |||
return stats | |||
def __readComparison(path): | |||
cmpFile = open(path, "r") | |||
comparison = json.load(cmpFile) | |||
cmpFile.close() | |||
return comparison | |||
def __writeStats(stats, outputDir): | |||
if not os.path.exists(outputDir): | |||
os.makedirs(outputDir) | |||
with open(os.path.join(outputDir,"statusCollection"), "w+") as statusFile: | |||
statusFile.write(json.dumps(stats)) | |||
fig = plt.figure() | |||
ax = fig.add_subplot(111) | |||
matchCount = stats["match"]["count"] | |||
falseNegativeCount = stats["false_negative"]["count"] | |||
falsePositiveCount = stats["false_positive"]["count"] | |||
numInstances = matchCount + falseNegativeCount + falsePositiveCount | |||
matchBar = ax.bar(x=0, height=matchCount) | |||
falsePositiveBar = ax.bar(x=1, height=falsePositiveCount) | |||
falseNegativeBar = ax.bar(x=1, | |||
height=falseNegativeCount, | |||
bottom=falsePositiveCount) | |||
ax.axhline(y=matchCount, linestyle="--", color="gray") | |||
ax.axhline(y=falseNegativeCount, linestyle="--", color="gray") | |||
plt.ylabel("SAT Instanzen") | |||
plt.title("Verlgeich Minisat / WMIS qubo mit qbsolv") | |||
plt.xticks([0, 1], ("Gleiches Ergebnis", "Unterschiedliches Ergebnis")) | |||
plt.yticks([0, matchCount, falseNegativeCount, numInstances]) | |||
plt.legend((matchBar, falsePositiveBar, falseNegativeBar), | |||
("Gleiches Ergebnis", | |||
"False Positive", | |||
"False Negative")) | |||
plt.savefig(os.path.join(outputDir, "stats.png")) | |||
if __name__ == "__main__": | |||
main() |
@ -1,51 +0,0 @@ | |||
#!/usr/bin/env python3 | |||
import os | |||
import configparser | |||
import argparse | |||
def main(): | |||
args = __parseArguments(); | |||
config = configparser.ConfigParser() | |||
dirs = {"INSTANCE_DIR": "instances", | |||
"MINISAT_RESULT_DIR": "minisatResults", | |||
"WMIS_RESULT_DIR": "wmisResults", | |||
"COMPARISON_DIR": "comparison"} | |||
dirs["COMPARISON_STATS_DIR"] = os.path.join(dirs["COMPARISON_DIR"], | |||
"stats") | |||
config["STRUCTURE"] = dirs | |||
os.mkdir(args["dir"]) | |||
os.mkdir(os.path.join(args["dir"], dirs["INSTANCE_DIR"])) | |||
os.mkdir(os.path.join(args["dir"], dirs["MINISAT_RESULT_DIR"])) | |||
os.mkdir(os.path.join(args["dir"], dirs["WMIS_RESULT_DIR"])) | |||
os.mkdir(os.path.join(args["dir"], dirs["COMPARISON_DIR"])) | |||
os.mkdir(os.path.join(args["dir"], dirs["COMPARISON_STATS_DIR"])) | |||
with open(os.path.join(args["dir"], "dataset.config"), "w") as configfile: | |||
config.write(configfile) | |||
configfile.close() | |||
def __parseArguments(): | |||
parser = argparse.ArgumentParser() | |||
parser.add_argument("-d", "--directory", help="the direcotry for the new dataset", type=str) | |||
args = parser.parse_args() | |||
arguments = {} | |||
print(args) | |||
arguments["dir"] = args.directory | |||
if arguments["dir"] == None: | |||
arguments["dir"] = str(input("Directory: ")) | |||
arguments["dir"] = os.path.abspath(arguments["dir"]) | |||
return arguments | |||
if __name__ == "__main__": | |||
main() |
@ -1,61 +0,0 @@ | |||
#!/usr/bin/env python3 | |||
from util import randomSAT | |||
from util import kSAT | |||
import argparse | |||
import configparser | |||
import os | |||
def main(): | |||
parser = argparse.ArgumentParser() | |||
parser.add_argument("-d", "--base_directory", help="the base directorey of the new dataset; should contain a dataset.config file", type=str) | |||
parser.add_argument("-i", "--instances", help="number of random kSAT instances", type=int) | |||
parser.add_argument("-v", "--variables", help="number of variables in ksat instances", type=int) | |||
parser.add_argument("-c", "--clauses", help="number of clauses in ksat instances", type=int) | |||
parser.add_argument("--variables_per_clause", help="variables per clause in ksat instances", type=int, default=3) | |||
parser.add_argument("-o", "--output", help="output directory", type=str) | |||
args = parser.parse_args() | |||
baseDir = args.base_directory | |||
if baseDir != None: | |||
config = __readConfig(os.path.join(baseDir, "dataset.config")); | |||
numberOfVariables = args.variables | |||
if numberOfVariables == None: | |||
numberOfVariables = int(input("Number of variables per instance: ")) | |||
numberOfClauses = args.clauses | |||
if numberOfClauses == None: | |||
numberOfClauses = int(input("Number of clauses per instance: ")) | |||
numberOfInstances = args.instances | |||
if numberOfInstances == None: | |||
numberOfInstances = int(input("Number of instances: ")) | |||
instanceDir = None | |||
if "instance_dir" in config["STRUCTURE"]: | |||
instanceDir = os.path.join(baseDir, config["STRUCTURE"]["instance_dir"]) | |||
elif args.output != None: | |||
instanceDir = args.output | |||
elif args.output == None: | |||
instanceDir = str(input("output directory: ")) | |||
for i in range(numberOfInstances): | |||
ksatInstance = randomSAT.generateRandomKSAT(numberOfClauses, | |||
numberOfVariables, | |||
args.variables_per_clause) | |||
instanceFilePath = os.path.join(instanceDir, "instance_%d.dimacs" % (i)) | |||
ksatInstance.writeDIMACS(instanceFilePath) | |||
def __readConfig(configFilePath): | |||
config = configparser.ConfigParser() | |||
if os.path.isfile(configFilePath): | |||
config.read(configFilePath) | |||
return config | |||
if __name__ == "__main__": | |||
main() |
@ -1,32 +0,0 @@ | |||
#!/usr/bin/env python3 | |||
from util import script as scrUt | |||
import configparser | |||
import os | |||
def main(): | |||
args = __parseArgs() | |||
print(args) | |||
def __parseArguments(): | |||
parser = scrUt.ArgParser() | |||
parser.addInstanceDirArg() | |||
parser.addArg(alias="instanceDir", shortFlag="i", longFlag="instance_dir", | |||
help="the directory with all instance files", type=str) | |||
parser.addArg(alias="outputDir", shortFlag="o", longFlag="output_dir", | |||
help="the directory to store the minisat results for each instance", | |||
type=str) | |||
parser.addArg(alias="configFile", shortFlag="c", longFlag="config", | |||
help="config file (default: ./satlab.config)", | |||
type=str, default=os.path.join(".", "satlab.config")) | |||
arguments = parser.parse() | |||
if __name__ == "__main__": | |||
main() |
@ -1,115 +0,0 @@ | |||
#!/usr/bin/env python3 | |||
from util import kSAT | |||
from util import SAT2QUBO | |||
from util.SATquboResult import SATquboResult | |||
import argparse | |||
from dwave_qbsolv import QBSolv | |||
import os | |||
import collections | |||
import json | |||
from tqdm import tqdm | |||
from util import script as scriptUtils | |||
def main(): | |||
arguments = __parseArguments() | |||
satInstance = kSAT.kSAT() | |||
print("reading ksat...") | |||
satInstance.readDIMACS(arguments["instancePath"]) | |||
print() | |||
result = __runWMISquboOnSatInstance(satInstance) | |||
resultPath = os.path.join( | |||
os.path.join(arguments["resultDir"], | |||
"run%d" % arguments["run"]), | |||
"%s.out" % arguments["instanceFileName"]) | |||
print() | |||
print("writing results to file...") | |||
__writeResult(result, resultPath) | |||
def __parseArguments(): | |||
parser = scriptUtils.ArgParser() | |||
parser.addInstanceDirArg() | |||
parser.addArg(alias="instancePath", shortFlag="i", longFlag="instance", | |||
help="instance file, has to be in DIMACS format", type=str) | |||
parser.addArg(alias="resultDir", shortFlag="o", longFlag="wmis_result_dir", | |||
help="the wmis result directory", type=str, | |||
ignoreDatabaseConfig=False) | |||
parser.addArg(alias="run", shortFlag="r", longFlag="run", | |||
help="results will get saved unter [instance]_[run].out", type=int) | |||
arguments = parser.parse() | |||
arguments["instanceFileName"] = os.path.basename(arguments["instancePath"]) | |||
return arguments | |||
def __runWMISquboOnSatInstance(satInstance): | |||
print("generating wmis qubo...") | |||
qubo = SAT2QUBO.WMISdictQUBO(satInstance) | |||
print() | |||
print("running gbsolv...") | |||
qbresult = QBSolv().sample_qubo(Q=qubo, find_max=True) | |||
print() | |||
print("packing results...") | |||
results = __packResults(satInstance, qbresult) | |||
return results | |||
def __packResults(satInstance, qbresult): | |||
results = [] | |||
samples = list(qbresult.samples()) | |||
occurrences = qbresult.data_vectors["num_occurrences"] | |||
for i in tqdm(range(len(samples))): | |||
quboResult = __satQuboResultFromSample(samples[i]) | |||
quboResult.setOccurrences(occurrences[i]) | |||
quboResult.setSatisfiesInstance(satInstance) | |||
results.append(quboResult) | |||
return results | |||
def __writeResult(results, resultPath): | |||
resultDir = os.path.dirname(resultPath) | |||
if not os.path.exists(resultDir): | |||
os.makedirs(resultDir) | |||
resultFile = open(resultPath, "w+") | |||
for result in tqdm(results): | |||
resultFile.write(json.dumps(result.toPrimitive())) | |||
resultFile.write("\n\n") | |||
resultFile.close() | |||
def __satQuboResultFromSample(sample): | |||
result = SATquboResult() | |||
for binding in sample: | |||
isActive = True if sample[binding] == 1 else False | |||
result.addBinding(binding, isActive) | |||
#if sample[binding] == 1: | |||
#result.addActiveBinding(binding) | |||
return result | |||
if __name__ == "__main__": | |||
main() |
@ -1,290 +0,0 @@ | |||
#!/usr/bin/env python3 | |||
import argparse | |||
import os | |||
import glob | |||
import json | |||
import numpy as np | |||
import matplotlib.pyplot as plt | |||
import collections | |||
from script import script as scriptUtils | |||
def main(): | |||
args = __parseArguments() | |||
print(args) | |||
__stats(args["comparisonDir"], args["outputDir"]) | |||
def __parseArguments(): | |||
argParser = scriptUtils.ArgParser() | |||
argParser.addInstanceDirArg(); | |||
argParser.addArg(alias="comparisonDir", shortFlag="c", longFlag="comparison_dir", | |||
help="the direcotry with all comparison files", type=str) | |||
argParser.addArg(alias="outputDir", shortFlag="o", longFlag="comparison_stats_dir", | |||
help="Directory to store the stats", type=str) | |||
return argParser.parse() | |||
def __stats(comparisonDir, outputDir): | |||
stats = __collectStats(comparisonDir) | |||
__writeStats(stats, outputDir) | |||
def __collectStats(comparisonDir): | |||
files = glob.glob(os.path.join(comparisonDir, "*.cmp")) | |||
stats = [] | |||
for path in files: | |||
comparison = __readComparison(path) | |||
stats.append(__processSingleInstance(comparison)) | |||
return stats | |||
def __processSingleInstance(comparison): | |||
instanceStats = {} | |||
conflicts = comparison["conflicts_per_variable"] | |||
conflictArr = np.array(list(conflicts.values())) | |||
instanceStats["conflicts_per_variable_mean"] = conflictArr.mean() | |||
instanceStats["conflicts_per_variable_median"] = np.median(conflictArr) | |||
instanceStats["conflicts_per_variable_std_dev"] = np.std(conflictArr) | |||
instanceStats["conflicts_per_variable_max"] = conflictArr.max() | |||
instanceStats["conflicts_per_variable_min"] = conflictArr.min() | |||
instanceStats["conflicts_per_instance"] = np.sum(conflictArr) | |||
instanceStats["raw_conflicts"] = list(conflictArr) | |||
instanceStats["conflicts_to_degree_per_variable"] = __calcConflictsToDegree(conflicts, | |||
comparison["degrees_of_variables"]) | |||
if comparison["minisat_satisfiable"]: | |||
if __instanceIsFalseNegative(comparison): | |||
instanceStats["result"] = "false_negative" | |||
else: | |||
instanceStats["result"] = "satisfiable" | |||
else: | |||
instanceStats["result"] = "unsatisfiable" | |||
return instanceStats | |||
def __calcConflictsToDegree(degreesPerVariable, conflictsPerVariable): | |||
conflictsToDegreePerVariable = [] | |||
for varLabel, degree in degreesPerVariable.items(): | |||
conflicts = conflictsPerVariable[varLabel] | |||
cnflToDeg = conflicts / (float(degree) / 2.0)**2 | |||
if cnflToDeg <= 1: | |||
conflictsToDegreePerVariable.append(cnflToDeg) | |||
return conflictsToDegreePerVariable | |||
def __instanceIsFalseNegative(comparison): | |||
return (comparison["minisat_satisfiable"] == True and | |||
comparison["qubo_satisfiable"] == False) | |||
def __readComparison(path): | |||
cmpFile = open(path, "r") | |||
comparison = json.load(cmpFile) | |||
cmpFile.close() | |||
return comparison | |||
def __writeStats(stats, outputDir): | |||
data = __seperateMatchesAndFalseNegatives(stats) | |||
overviewFig = __createOverviewFig(data) | |||
meanFig = __createSingleStatFig(data["mean"], "Conflicts per variable mean") | |||
medianFig = __createSingleStatFig(data["median"], "Conflicts per variable median") | |||
maxFig = __createSingleStatFig(data["max"], "Conflicts per variable max") | |||
minFig = __createSingleStatFig(data["min"], "Conflicts per variable min") | |||
stdDevFig = __createSingleStatFig(data["std_dev"], "Conflicts per variable\nstandard deviation") | |||
cnflPerInstFig = __createSingleStatFig(data["cnfl_per_inst"], "Conflicts per instance") | |||
cnflDegFig1 = __createSingleStatFig(data["cnflDeg"], "Conflicts in relation to degree", showfliers=False); | |||
cnflDegFig2 = __createSingleStatFig(data["cnflDeg"], "Conflicts in relation to degree", showfliers=True); | |||
histFig = __createHistogramFig(data, "raw", "Conflict per variable"); | |||
#cnflDegHistFig = __createHistogramFig(data, "cnflDeg", "Conflicts in relation to degree"); | |||
__setBatchXticks(figures=[overviewFig, | |||
meanFig, | |||
medianFig, | |||
maxFig, | |||
minFig, | |||
stdDevFig, | |||
cnflPerInstFig, | |||
cnflDegFig1, | |||
cnflDegFig2], | |||
ticks=[1, 2, 3], | |||
labels=["satisfiable", | |||
"false negative", | |||
"unsatisfiable"]) | |||
__setBatchXtickLabelRotation(figures=[overviewFig, | |||
meanFig, | |||
medianFig, | |||
maxFig, | |||
minFig, | |||
stdDevFig, | |||
cnflPerInstFig, | |||
cnflDegFig1, | |||
cnflDegFig2], | |||
rotation=30) | |||
overviewFig.savefig(os.path.join(outputDir, "conflicts_overview.png")) | |||
meanFig.savefig(os.path.join(outputDir, "conflicts_mean.png")) | |||
medianFig.savefig(os.path.join(outputDir, "conflicts_median.png")) | |||
maxFig.savefig(os.path.join(outputDir, "conflicts_max.png")) | |||
minFig.savefig(os.path.join(outputDir, "conflicts_min.png")) | |||
stdDevFig.savefig(os.path.join(outputDir, "conflicts_std_dev.png")) | |||
cnflPerInstFig.savefig(os.path.join(outputDir, "conflicts_per_instance.png")) | |||
histFig.savefig(os.path.join(outputDir, "conflicts_per_var_hist.png")) | |||
cnflDegFig1.savefig(os.path.join(outputDir, "conflicts_in_relation_to_degree_1.png")) | |||
cnflDegFig2.savefig(os.path.join(outputDir, "conflicts_in_relation_to_degree_2.png")) | |||
#plt.show(overviewFig) | |||
def __createOverviewFig(data): | |||
fig = plt.figure() | |||
ax0 = fig.add_subplot(141,) | |||
ax0.boxplot([data["mean"]["satisfiable"], | |||
data["mean"]["false_negative"], | |||
data["mean"]["unsatisfiable"]]) | |||
ax0.set_title("mean") | |||
ax1 = fig.add_subplot(142, sharey=ax0) | |||
ax1.boxplot([data["median"]["satisfiable"], | |||
data["median"]["false_negative"], | |||
data["median"]["unsatisfiable"]]) | |||
ax1.set_title("median") | |||
ax2 = fig.add_subplot(143, sharey=ax0) | |||
ax2.boxplot([data["max"]["satisfiable"], | |||
data["max"]["false_negative"], | |||
data["max"]["unsatisfiable"]]) | |||
ax2.set_title("max degree") | |||
ax3 = fig.add_subplot(144, sharey=ax0) | |||
ax3.boxplot([data["min"]["satisfiable"], | |||
data["min"]["false_negative"], | |||
data["min"]["unsatisfiable"]]) | |||
ax3.set_title("min degree") | |||
fig.set_size_inches(12, 8) | |||
fig.suptitle("Conflicts per variable overview", fontsize=16) | |||
return fig | |||
def __createHistogramFig(data, subDataSet, title): | |||
fig = plt.figure() | |||
bins = int(max(data[subDataSet]["satisfiable"]) / 5) | |||
ax0 = fig.add_subplot(321) | |||
ax0.hist(data[subDataSet]["satisfiable"], bins=bins) | |||
ax0_2 = fig.add_subplot(322) | |||
ax0_2.boxplot(data[subDataSet]["satisfiable"], vert=False) | |||
ax1 = fig.add_subplot(323, sharex=ax0) | |||
ax1.hist(data[subDataSet]["false_negative"], bins=bins) | |||
ax1_2 = fig.add_subplot(324, sharex=ax0_2) | |||
ax1_2.boxplot(data[subDataSet]["false_negative"], vert=False) | |||
ax2 = fig.add_subplot(325, sharex=ax0) | |||
ax2.hist(data[subDataSet]["unsatisfiable"], bins=bins) | |||
ax2_2 = fig.add_subplot(326, sharex=ax0_2) | |||
ax2_2.boxplot(data[subDataSet]["unsatisfiable"], vert=False) | |||
fig.set_size_inches(14, 10) | |||
fig.suptitle(title, fontsize=16) | |||
return fig | |||
def __createSingleStatFig(subDataset, title, showfliers=True): | |||
fig = plt.figure() | |||
ax = fig.add_subplot(111) | |||
ax.boxplot([subDataset["satisfiable"], | |||
subDataset["false_negative"], | |||
subDataset["unsatisfiable"]], showfliers=showfliers) | |||
fig.set_size_inches(3.5, 8) | |||
fig.suptitle(title, fontsize=16) | |||
return fig | |||
def __setBatchXticks(figures, ticks, labels): | |||
for fig in figures: | |||
plt.setp(fig.get_axes(), xticks=ticks, xticklabels=labels) | |||
def __setBatchXtickLabelRotation(figures, rotation): | |||
for fig in figures: | |||
for ax in fig.get_axes(): | |||
plt.setp(ax.get_xticklabels(), rotation=rotation) | |||
def __seperateMatchesAndFalseNegatives(stats): | |||
data = {} | |||
data["mean"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["median"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["std_dev"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["max"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["min"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["cnfl_per_inst"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["raw"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["cnflDeg"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
for instance in stats: | |||
target = instance["result"] | |||
data["mean"][target].append(instance["conflicts_per_variable_mean"]) | |||
data["median"][target].append(instance["conflicts_per_variable_median"]) | |||
data["std_dev"][target].append(instance["conflicts_per_variable_std_dev"]) | |||
data["max"][target].append(instance["conflicts_per_variable_max"]) | |||
data["min"][target].append(instance["conflicts_per_variable_min"]) | |||
data["cnfl_per_inst"][target].append(instance["conflicts_per_instance"]) | |||
data["raw"][target].extend(instance["raw_conflicts"]) | |||
data["cnflDeg"][target].extend(instance["conflicts_to_degree_per_variable"]) | |||
return data | |||
if __name__ == "__main__": | |||
main() |
@ -1,256 +0,0 @@ | |||
#!/usr/bin/env python3 | |||
import argparse | |||
import os | |||
import glob | |||
import json | |||
import numpy as np | |||
import matplotlib.pyplot as plt | |||
import collections | |||
def main(): | |||
args = __parseArguments() | |||
__stats(args["comparisonDir"], args["outputDir"]) | |||
def __parseArguments(): | |||
parser = argparse.ArgumentParser() | |||
parser.add_argument("-d", "--directory", help="the direcotry with all comparison files", type=str) | |||
parser.add_argument("-o", "--output", help="Directory to store the stats", type=str) | |||
args = parser.parse_args() | |||
arguments = {} | |||
print(args) | |||
arguments["comparisonDir"] = args.directory | |||
if arguments["comparisonDir"] == None: | |||
arguments["comparisonDir"] = str(input("Comparison directory: ")) | |||
arguments["comparisonDir"] = os.path.abspath(arguments["comparisonDir"]) | |||
arguments["outputDir"] = args.output | |||
if arguments["outputDir"] == None: | |||
arguments["outputDir"] = str(input("Output directory: ")) | |||
arguments["outputDir"] = os.path.abspath(arguments["outputDir"]) | |||
return arguments | |||
def __stats(comparisonDir, outputDir): | |||
stats = __collectStats(comparisonDir) | |||
__writeStats(stats, outputDir) | |||
def __collectStats(comparisonDir): | |||
files = glob.glob(os.path.join(comparisonDir, "*.cmp")) | |||
stats = [] | |||
for path in files: | |||
comparison = __readComparison(path) | |||
stats.append(__processSingleInstance(comparison)) | |||
return stats | |||
def __processSingleInstance(comparison): | |||
instanceStats = {} | |||
degrees = comparison["degrees_of_variables"] | |||
degreeArr = np.array(list(degrees.values())) | |||
instanceStats["degree_of_variables_mean"] = degreeArr.mean() | |||
instanceStats["degree_of_variables_median"] = np.median(degreeArr) | |||
instanceStats["degree_of_variables_std_dev"] = np.std(degreeArr) | |||
instanceStats["degree_of_variables_max"] = degreeArr.max() | |||
instanceStats["degree_of_variables_min"] = degreeArr.min() | |||
instanceStats["variables_per_degree"] = __getVarsPerDegree(degreeArr) | |||
if comparison["minisat_satisfiable"]: | |||
if __instanceIsFalseNegative(comparison): | |||
instanceStats["result"] = "false_negative" | |||
else: | |||
instanceStats["result"] = "satisfiable" | |||
else: | |||
instanceStats["result"] = "unsatisfiable" | |||
return instanceStats | |||
def __instanceIsFalseNegative(comparison): | |||
return (comparison["minisat_satisfiable"] == True and | |||
comparison["qubo_satisfiable"] == False) | |||
def __getVarsPerDegree(degreeArr): | |||
degCount = collections.Counter(degreeArr) | |||
varsPerDegree = {} | |||
for degree in degCount: | |||
varsPerDegree[degree] = degCount[degree] | |||
return varsPerDegree | |||
def __readComparison(path): | |||
cmpFile = open(path, "r") | |||
comparison = json.load(cmpFile) | |||
cmpFile.close() | |||
return comparison | |||
def __writeStats(stats, outputDir): | |||
fig1 = plt.figure() | |||
data = __seperateMatchesAndFalseNegatives(stats) | |||
ax0 = fig1.add_subplot(141,) | |||
ax0.boxplot([data["mean"]["satisfiable"], | |||
data["mean"]["false_negative"], | |||
data["mean"]["unsatisfiable"]]) | |||
ax0.set_title("mean") | |||
ax1 = fig1.add_subplot(142, sharey=ax0) | |||
ax1.boxplot([data["median"]["satisfiable"], | |||
data["median"]["false_negative"], | |||
data["median"]["unsatisfiable"]]) | |||
ax1.set_title("median") | |||
ax2 = fig1.add_subplot(143, sharey=ax0) | |||
ax2.boxplot([data["max"]["satisfiable"], | |||
data["max"]["false_negative"], | |||
data["max"]["unsatisfiable"]]) | |||
ax2.set_title("max degree") | |||
ax3 = fig1.add_subplot(144, sharey=ax0) | |||
ax3.boxplot([data["min"]["satisfiable"], | |||
data["min"]["false_negative"], | |||
data["min"]["unsatisfiable"]]) | |||
ax3.set_title("min degree") | |||
fig2 = plt.figure() | |||
ax4 = fig2.add_subplot(111) | |||
ax4.boxplot([data["std_dev"]["satisfiable"], | |||
data["std_dev"]["false_negative"], | |||
data["std_dev"]["unsatisfiable"]]) | |||
ax4.set_title("standard deviation") | |||
_BINS_ = 23 | |||
fig3 = plt.figure() | |||
ax5 = fig3.add_subplot(311) | |||
varsPerDegreeSat = __accumulateVarsPerDegree(data["vars_per_degree"]["satisfiable"]) | |||
ax5.hist(varsPerDegreeSat, density=True, bins=_BINS_) | |||
ax6 = fig3.add_subplot(312, sharex=ax5) | |||
varsPerDegreeFP = __accumulateVarsPerDegree(data["vars_per_degree"]["false_negative"]) | |||
ax6.hist(varsPerDegreeFP, density=True, bins=_BINS_) | |||
ax7 = fig3.add_subplot(313, sharex=ax6) | |||
varsPerDegreeUnsat = __accumulateVarsPerDegree(data["vars_per_degree"]["unsatisfiable"]) | |||
ax7.hist(varsPerDegreeUnsat, density=True, bins=_BINS_) | |||
plt.setp([ax0, ax1, ax2, ax3, ax4], xticks=[1, 2, 3], xticklabels=["satisfiable", | |||
"false negative", | |||
"unsatisfiable"]) | |||
plt.setp(ax0.get_xticklabels(), rotation=45) | |||
plt.setp(ax1.get_xticklabels(), rotation=45) | |||
plt.setp(ax2.get_xticklabels(), rotation=45) | |||
plt.setp(ax3.get_xticklabels(), rotation=45) | |||
plt.setp(ax4.get_xticklabels(), rotation=45) | |||
fig1.set_size_inches(12, 8) | |||
fig1.suptitle("Degrees of variables", fontsize=16) | |||
fig2.set_size_inches(4, 8) | |||
fig3.set_size_inches(5, 12) | |||
fig1.savefig(os.path.join(outputDir, "degrees1.png")) | |||
fig2.savefig(os.path.join(outputDir, "degrees2.png")) | |||
fig3.savefig(os.path.join(outputDir, "degrees3.png")) | |||
plt.show() | |||
def __accumulateVarsPerDegree(listOfVarsPerDegreeDicts): | |||
accumulated = [] | |||
for instance in listOfVarsPerDegreeDicts: | |||
for degree in instance: | |||
accumulated += [degree] * instance[degree] | |||
return accumulated | |||
def __compressVarsPerDegree(listOfVarsPerDegreeDicts): | |||
compressed = {} | |||
countOfVars = 0 | |||
for instance in listOfVarsPerDegreeDicts: | |||
for degree in instance: | |||
if degree in compressed: | |||
compressed[degree] += float(instance[degree]) | |||
else: | |||
compressed[degree] = float(instance[degree]) | |||
countOfVars += instance[degree] | |||
check = 0 | |||
for degree in compressed: | |||
compressed[degree] /= countOfVars | |||
check += compressed[degree] | |||
print("check: ", check) | |||
return compressed | |||
def __seperateMatchesAndFalseNegatives(stats): | |||
data = {} | |||
data["mean"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["median"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["std_dev"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["max"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["min"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
data["vars_per_degree"] = {"false_negative": [], | |||
"satisfiable": [], | |||
"unsatisfiable": []} | |||
for instance in stats: | |||
target = instance["result"] | |||
data["mean"][target].append(instance["degree_of_variables_mean"]) | |||
data["median"][target].append(instance["degree_of_variables_median"]) | |||
data["std_dev"][target].append(instance["degree_of_variables_std_dev"]) | |||
data["max"][target].append(instance["degree_of_variables_max"]) | |||
data["min"][target].append(instance["degree_of_variables_min"]) | |||
data["vars_per_degree"][target].append(instance["variables_per_degree"]) | |||
return data | |||
if __name__ == "__main__": | |||
main() |