@ -0,0 +1 @@ | |||||
@ -0,0 +1,121 @@ | |||||
#!/usr/bin/env python3 | |||||
import argparse | |||||
import os | |||||
import glob | |||||
import json | |||||
import numpy as np | |||||
import matplotlib.pyplot as plt | |||||
import configparser | |||||
import scriptUtils | |||||
def main(): | |||||
args = __parseArguments() | |||||
__stats(args["comparisonDir"], args["outputDir"]) | |||||
def __parseArguments(): | |||||
parser = scriptUtils.ArgParser() | |||||
parser.addInstanceDirArg() | |||||
parser.addArg(alias="comparisonDir", shortFlag="c", longFlag="comparison_dir", | |||||
help="the direcotry with all comparison files", type=str) | |||||
parser.addArg(alias="outputDir", shortFlag="s", longFlag="comparison_stats_dir", | |||||
help="Directory to store the stats", type=str) | |||||
arguments = parser.parse() | |||||
arguments["datasetDir"] = os.path.abspath(arguments["datasetDir"]) | |||||
arguments["comparisonDir"] = os.path.join(arguments["datasetDir"], | |||||
arguments["comparisonDir"]) | |||||
arguments["outputDir"] = os.path.join(arguments["datasetDir"], | |||||
arguments["outputDir"]) | |||||
return arguments | |||||
def __stats(comparisonDir, outputDir): | |||||
runs = glob.glob(os.path.join(comparisonDir, "run*")) | |||||
for run in runs: | |||||
stats = __collectStats(run) | |||||
runOutputDir = os.path.join(outputDir, os.path.basename(run)) | |||||
__writeStats(stats, runOutputDir) | |||||
def __collectStats(comparisonDir): | |||||
files = glob.glob(os.path.join(comparisonDir, "*.cmp")) | |||||
stats = {} | |||||
stats["match"] = 0 | |||||
stats["false_positive"] = 0 | |||||
stats["false_negative"] = 0 | |||||
stats["unsat"] = 0 | |||||
for path in files: | |||||
comparison = __readComparison(path) | |||||
minisat_satisfiable = comparison["minisat_satisfiable"] | |||||
qubo_satisfiable = comparison["qubo_satisfiable"] | |||||
if minisat_satisfiable == qubo_satisfiable: | |||||
stats["match"] += 1 | |||||
elif minisat_satisfiable == False and qubo_satisfiable == True: | |||||
stats["false_positive"] += 1 | |||||
elif minisat_satisfiable == True and qubo_satisfiable == False: | |||||
stats["false_negative"] += 1 | |||||
if not minisat_satisfiable: | |||||
stats["unsat"] += 1 | |||||
return stats | |||||
def __readComparison(path): | |||||
cmpFile = open(path, "r") | |||||
comparison = json.load(cmpFile) | |||||
cmpFile.close() | |||||
return comparison | |||||
def __writeStats(stats, outputDir): | |||||
if not os.path.exists(outputDir): | |||||
os.makedirs(outputDir) | |||||
fig = plt.figure() | |||||
ax = fig.add_subplot(111) | |||||
numInstances = stats["match"] + stats["false_negative"] + stats["false_positive"] | |||||
matchBar = ax.bar(x=0, height=stats["match"]) | |||||
falsePositiveBar = ax.bar(x=1, height=stats["false_positive"]) | |||||
falseNegativeBar = ax.bar(x=1, | |||||
height=stats["false_negative"], | |||||
bottom=stats["false_positive"]) | |||||
ax.axhline(y=stats["match"], linestyle="--", color="gray") | |||||
ax.axhline(y=stats["false_negative"], linestyle="--", color="gray") | |||||
plt.ylabel("SAT Instanzen") | |||||
plt.title("Verlgeich Minisat / WMIS qubo mit qbsolv") | |||||
plt.xticks([0, 1], ("Gleiches Ergebnis", "Unterschiedliches Ergebnis")) | |||||
plt.yticks([0, stats["match"], stats["false_negative"], numInstances]) | |||||
plt.legend((matchBar, falsePositiveBar, falseNegativeBar), | |||||
("Gleiches Ergebnis", | |||||
"False Positive", | |||||
"False Negative")) | |||||
plt.savefig(os.path.join(outputDir, "stats.png")) | |||||
if __name__ == "__main__": | |||||
main() |
@ -0,0 +1,112 @@ | |||||
#!/usr/bin/env python3 | |||||
import SATquboResult | |||||
import argparse | |||||
from kSAT import kSAT | |||||
import minisatUtils as mSatU | |||||
import SATquboResult | |||||
import os | |||||
import json | |||||
def main(): | |||||
args = __parseArguments() | |||||
comparison = __compare(args["instancePath"], | |||||
args["minisatResult"], | |||||
args["quboResult"]) | |||||
outputFilePath = os.path.join(args["outputDir"], | |||||
args["instanceFileName"] + ".cmp") | |||||
__writeComparison(comparison, outputFilePath) | |||||
def __parseArguments(): | |||||
parser = argparse.ArgumentParser() | |||||
parser.add_argument("-i", "--instance", help="instance file, has to be in DIMACS format", type=str) | |||||
parser.add_argument("-m", "--minisat_result", help="Minisat result file", type=str) | |||||
parser.add_argument("-q", "--qubo_result", help="Qubo result file", type=str) | |||||
parser.add_argument("-o", "--output", help="Directory to store the comparison (optional)", type=str) | |||||
args = parser.parse_args() | |||||
arguments = {} | |||||
arguments["instancePath"] = args.instance | |||||
if arguments["instancePath"] == None: | |||||
arguments["instancePath"] = str(input("Instance file: ")) | |||||
#arguments["instancePath"] | |||||
arguments["instanceFileName"] = os.path.basename(arguments["instancePath"]) | |||||
arguments["minisatResult"] = args.minisat_result | |||||
if arguments["minisatResult"] == None: | |||||
arguments["minisatResult"] = str(input("Minisat result file: ")) | |||||
arguments["quboResult"] = args.qubo_result | |||||
if arguments["quboResult"] == None: | |||||
arguments["quboResult"] = str(input("Qubo result file: ")) | |||||
arguments["outputDir"] = args.output | |||||
if arguments["outputDir"] == None: | |||||
arguments["outputDir"] = str(input("Output directory: ")) | |||||
arguments["outputDir"] = os.path.abspath(arguments["outputDir"]) | |||||
return arguments | |||||
def __compare(instancePath, msatResultFile, quboResultFile): | |||||
comparison = {} | |||||
sat = kSAT() | |||||
sat.readDIMACS(instancePath) | |||||
comparison["instance"] = os.path.basename(instancePath) | |||||
comparison["minisat_satisfiable"] = __getMinisatSatisfiability(msatResultFile) | |||||
comparison["qubo_satisfiable"] = __getQuboSatisfiability(quboResultFile) | |||||
comparison["degrees_of_variables"] = sat.getDegreesOfVariables() | |||||
comparison["conflicts_per_variable"] = __countConflicts(sat.getConflicts()) | |||||
return comparison | |||||
def __getMinisatSatisfiability(resultFile): | |||||
minisatResult = mSatU.readMinisatResult(resultFile) | |||||
return minisatResult["satisfiable"] | |||||
def __getQuboSatisfiability(resultFile): | |||||
results = SATquboResult.readResultsFromFile(resultFile) | |||||
for result in results: | |||||
if result.satisfiesInstance(): | |||||
return True | |||||
return False | |||||
def __writeComparison(comparison, outputFilePath): | |||||
if outputFilePath == None: | |||||
return | |||||
outputFile = open(outputFilePath, "w+") | |||||
outputFile.write(json.dumps(comparison) + "\n") | |||||
outputFile.close() | |||||
def __countConflicts(conflicts): | |||||
conflictsPerVariable = {} | |||||
for conflict in conflicts: | |||||
varLabel = abs(conflict[0][1]) | |||||
if varLabel not in conflictsPerVariable: | |||||
conflictsPerVariable[varLabel] = 1 | |||||
else: | |||||
conflictsPerVariable[varLabel] += 1 | |||||
return conflictsPerVariable | |||||
if __name__ == "__main__": | |||||
main() |
@ -0,0 +1,40 @@ | |||||
#!/usr/bin/env python3 | |||||
import scriptUtils | |||||
import compare | |||||
import glob | |||||
import os | |||||
def __main(): | |||||
args = __parseArguments() | |||||
__compareRuns(args) | |||||
def __parseArguments(): | |||||
parser = scriptUtils.ArgParser() | |||||
parser.addInstanceDirArg() | |||||
return parser.parse() | |||||
def __compareRuns(args): | |||||
instancePaths = glob.glob(os.path.join( | |||||
os.path.join(args["dataset_dir"], | |||||
args["instance_dir"]), | |||||
"*.dimacs")) | |||||
runDirs = glob.glob(os.path.join( | |||||
os.path.join(args["dataset_dir"], | |||||
args["wmis_result_dir"]), | |||||
"run*")) | |||||
for path in instancePaths: | |||||
__compareRunsOfInstance(path, runDirs) | |||||
def __compareRunsOfInstance(instancePath, runDirs): | |||||
instanceName = os.path.basename(instancePath) | |||||
with open(instancePath) as instanceFile: | |||||
if __name__ == "__main__": | |||||
__main() |
@ -0,0 +1,141 @@ | |||||
#!/usr/bin/env python3 | |||||
import argparse | |||||
import os | |||||
import glob | |||||
import json | |||||
import numpy as np | |||||
import matplotlib.pyplot as plt | |||||
import configparser | |||||
import scriptUtils | |||||
def main(): | |||||
args = __parseArguments() | |||||
__stats(args["comparisonDir"], args["outputDir"]) | |||||
def __parseArguments(): | |||||
parser = scriptUtils.ArgParser() | |||||
parser.addInstanceDirArg() | |||||
parser.addArg(alias="comparisonDir", shortFlag="c", longFlag="comparison_dir", | |||||
help="the direcotry with all comparison files", type=str) | |||||
parser.addArg(alias="outputDir", shortFlag="s", longFlag="comparison_stats_dir", | |||||
help="Directory to store the stats", type=str) | |||||
arguments = parser.parse() | |||||
arguments["datasetDir"] = os.path.abspath(arguments["datasetDir"]) | |||||
arguments["comparisonDir"] = os.path.join(arguments["datasetDir"], | |||||
arguments["comparisonDir"]) | |||||
arguments["outputDir"] = os.path.join(arguments["datasetDir"], | |||||
arguments["outputDir"]) | |||||
return arguments | |||||
def __stats(comparisonDir, outputDir): | |||||
runs = glob.glob(os.path.join(comparisonDir, "run*")) | |||||
for run in runs: | |||||
stats = __collectStats(run) | |||||
print(stats) | |||||
runOutputDir = os.path.join(outputDir, os.path.basename(run)) | |||||
__writeStats(stats, runOutputDir) | |||||
def __collectStats(comparisonDir): | |||||
files = glob.glob(os.path.join(comparisonDir, "*.cmp")) | |||||
stats = {} | |||||
stats["match"] = {"count": 0, | |||||
"instances": []} | |||||
stats["false_positive"] = {"count": 0, | |||||
"instances": []} | |||||
stats["false_negative"] = {"count": 0, | |||||
"instances": []} | |||||
stats["unsat"] = {"count": 0, | |||||
"instances": []} | |||||
for path in files: | |||||
comparison = __readComparison(path) | |||||
minisat_satisfiable = comparison["minisat_satisfiable"] | |||||
qubo_satisfiable = comparison["qubo_satisfiable"] | |||||
instanceName = str(os.path.basename(path)).split(".")[0] | |||||
if minisat_satisfiable == qubo_satisfiable: | |||||
stats["match"]["count"] += 1 | |||||
stats["match"]["instances"].append(instanceName) | |||||
elif minisat_satisfiable == False and qubo_satisfiable == True: | |||||
stats["false_positive"]["count"] += 1 | |||||
stats["false_positive"]["instances"].append(instanceName) | |||||
elif minisat_satisfiable == True and qubo_satisfiable == False: | |||||
stats["false_negative"]["count"] += 1 | |||||
stats["false_negative"]["instances"].append(instanceName) | |||||
if not minisat_satisfiable: | |||||
stats["unsat"]["count"] += 1 | |||||
stats["unsat"]["instances"].append(instanceName) | |||||
return stats | |||||
def __readComparison(path): | |||||
cmpFile = open(path, "r") | |||||
comparison = json.load(cmpFile) | |||||
cmpFile.close() | |||||
return comparison | |||||
def __writeStats(stats, outputDir): | |||||
if not os.path.exists(outputDir): | |||||
os.makedirs(outputDir) | |||||
with open(os.path.join(outputDir,"statusCollection"), "w+") as statusFile: | |||||
statusFile.write(json.dumps(stats)) | |||||
fig = plt.figure() | |||||
ax = fig.add_subplot(111) | |||||
matchCount = stats["match"]["count"] | |||||
falseNegativeCount = stats["false_negative"]["count"] | |||||
falsePositiveCount = stats["false_positive"]["count"] | |||||
numInstances = matchCount + falseNegativeCount + falsePositiveCount | |||||
matchBar = ax.bar(x=0, height=matchCount) | |||||
falsePositiveBar = ax.bar(x=1, height=falsePositiveCount) | |||||
falseNegativeBar = ax.bar(x=1, | |||||
height=falseNegativeCount, | |||||
bottom=falsePositiveCount) | |||||
ax.axhline(y=matchCount, linestyle="--", color="gray") | |||||
ax.axhline(y=falseNegativeCount, linestyle="--", color="gray") | |||||
plt.ylabel("SAT Instanzen") | |||||
plt.title("Verlgeich Minisat / WMIS qubo mit qbsolv") | |||||
plt.xticks([0, 1], ("Gleiches Ergebnis", "Unterschiedliches Ergebnis")) | |||||
plt.yticks([0, matchCount, falseNegativeCount, numInstances]) | |||||
plt.legend((matchBar, falsePositiveBar, falseNegativeBar), | |||||
("Gleiches Ergebnis", | |||||
"False Positive", | |||||
"False Negative")) | |||||
plt.savefig(os.path.join(outputDir, "stats.png")) | |||||
if __name__ == "__main__": | |||||
main() |
@ -0,0 +1,51 @@ | |||||
#!/usr/bin/env python3 | |||||
import os | |||||
import configparser | |||||
import argparse | |||||
def main(): | |||||
args = __parseArguments(); | |||||
config = configparser.ConfigParser() | |||||
dirs = {"INSTANCE_DIR": "instances", | |||||
"MINISAT_RESULT_DIR": "minisatResults", | |||||
"WMIS_RESULT_DIR": "wmisResults", | |||||
"COMPARISON_DIR": "comparison"} | |||||
dirs["COMPARISON_STATS_DIR"] = os.path.join(dirs["COMPARISON_DIR"], | |||||
"stats") | |||||
config["STRUCTURE"] = dirs | |||||
os.mkdir(args["dir"]) | |||||
os.mkdir(os.path.join(args["dir"], dirs["INSTANCE_DIR"])) | |||||
os.mkdir(os.path.join(args["dir"], dirs["MINISAT_RESULT_DIR"])) | |||||
os.mkdir(os.path.join(args["dir"], dirs["WMIS_RESULT_DIR"])) | |||||
os.mkdir(os.path.join(args["dir"], dirs["COMPARISON_DIR"])) | |||||
os.mkdir(os.path.join(args["dir"], dirs["COMPARISON_STATS_DIR"])) | |||||
with open(os.path.join(args["dir"], "dataset.config"), "w") as configfile: | |||||
config.write(configfile) | |||||
configfile.close() | |||||
def __parseArguments(): | |||||
parser = argparse.ArgumentParser() | |||||
parser.add_argument("-d", "--directory", help="the direcotry for the new dataset", type=str) | |||||
args = parser.parse_args() | |||||
arguments = {} | |||||
print(args) | |||||
arguments["dir"] = args.directory | |||||
if arguments["dir"] == None: | |||||
arguments["dir"] = str(input("Directory: ")) | |||||
arguments["dir"] = os.path.abspath(arguments["dir"]) | |||||
return arguments | |||||
if __name__ == "__main__": | |||||
main() |
@ -0,0 +1,61 @@ | |||||
#!/usr/bin/env python3 | |||||
import randomSAT | |||||
import kSAT | |||||
import argparse | |||||
import configparser | |||||
import os | |||||
def main(): | |||||
parser = argparse.ArgumentParser() | |||||
parser.add_argument("-d", "--base_directory", help="the base directorey of the new dataset; should contain a dataset.config file", type=str) | |||||
parser.add_argument("-i", "--instances", help="number of random kSAT instances", type=int) | |||||
parser.add_argument("-v", "--variables", help="number of variables in ksat instances", type=int) | |||||
parser.add_argument("-c", "--clauses", help="number of clauses in ksat instances", type=int) | |||||
parser.add_argument("--variables_per_clause", help="variables per clause in ksat instances", type=int, default=3) | |||||
parser.add_argument("-o", "--output", help="output directory", type=str) | |||||
args = parser.parse_args() | |||||
baseDir = args.base_directory | |||||
if baseDir != None: | |||||
config = __readConfig(os.path.join(baseDir, "dataset.config")); | |||||
numberOfVariables = args.variables | |||||
if numberOfVariables == None: | |||||
numberOfVariables = int(input("Number of variables per instance: ")) | |||||
numberOfClauses = args.clauses | |||||
if numberOfClauses == None: | |||||
numberOfClauses = int(input("Number of clauses per instance: ")) | |||||
numberOfInstances = args.instances | |||||
if numberOfInstances == None: | |||||
numberOfInstances = int(input("Number of instances: ")) | |||||
instanceDir = None | |||||
if "instance_dir" in config["STRUCTURE"]: | |||||
instanceDir = os.path.join(baseDir, config["STRUCTURE"]["instance_dir"]) | |||||
elif args.output != None: | |||||
instanceDir = args.output | |||||
elif args.output == None: | |||||
instanceDir = str(input("output directory: ")) | |||||
for i in range(numberOfInstances): | |||||
ksatInstance = randomSAT.generateRandomKSAT(numberOfClauses, | |||||
numberOfVariables, | |||||
args.variables_per_clause) | |||||
instanceFilePath = os.path.join(instanceDir, "instance_%d.dimacs" % (i)) | |||||
ksatInstance.writeDIMACS(instanceFilePath) | |||||
def __readConfig(configFilePath): | |||||
config = configparser.ConfigParser() | |||||
if os.path.isfile(configFilePath): | |||||
config.read(configFilePath) | |||||
return config | |||||
if __name__ == "__main__": | |||||
main() |
@ -0,0 +1,115 @@ | |||||
#!/usr/bin/env python3 | |||||
import kSAT | |||||
import SAT2QUBO | |||||
from SATquboResult import SATquboResult | |||||
import argparse | |||||
from dwave_qbsolv import QBSolv | |||||
import os | |||||
import collections | |||||
import json | |||||
from tqdm import tqdm | |||||
import scriptUtils | |||||
def main(): | |||||
arguments = __parseArguments() | |||||
satInstance = kSAT.kSAT() | |||||
print("reading ksat...") | |||||
satInstance.readDIMACS(arguments["instancePath"]) | |||||
print() | |||||
result = __runWMISquboOnSatInstance(satInstance) | |||||
resultPath = os.path.join( | |||||
os.path.join(arguments["resultDir"], | |||||
"run%d" % arguments["run"]), | |||||
"%s.out" % arguments["instanceFileName"]) | |||||
print() | |||||
print("writing results to file...") | |||||
__writeResult(result, resultPath) | |||||
def __parseArguments(): | |||||
parser = scriptUtils.ArgParser() | |||||
parser.addInstanceDirArg() | |||||
parser.addArg(alias="instancePath", shortFlag="i", longFlag="instance", | |||||
help="instance file, has to be in DIMACS format", type=str) | |||||
parser.addArg(alias="resultDir", shortFlag="o", longFlag="wmis_result_dir", | |||||
help="the wmis result directory", type=str, | |||||
ignoreDatabaseConfig=False) | |||||
parser.addArg(alias="run", shortFlag="r", longFlag="run", | |||||
help="results will get saved unter [instance]_[run].out", type=int) | |||||
arguments = parser.parse() | |||||
arguments["instanceFileName"] = os.path.basename(arguments["instancePath"]) | |||||
return arguments | |||||
def __runWMISquboOnSatInstance(satInstance): | |||||
print("generating wmis qubo...") | |||||
qubo = SAT2QUBO.WMISdictQUBO(satInstance) | |||||
print() | |||||
print("running gbsolv...") | |||||
qbresult = QBSolv().sample_qubo(Q=qubo, find_max=True) | |||||
print() | |||||
print("packing results...") | |||||
results = __packResults(satInstance, qbresult) | |||||
return results | |||||
def __packResults(satInstance, qbresult): | |||||
results = [] | |||||
samples = list(qbresult.samples()) | |||||
occurrences = qbresult.data_vectors["num_occurrences"] | |||||
for i in tqdm(range(len(samples))): | |||||
quboResult = __satQuboResultFromSample(samples[i]) | |||||
quboResult.setOccurrences(occurrences[i]) | |||||
quboResult.setSatisfiesInstance(satInstance) | |||||
results.append(quboResult) | |||||
return results | |||||
def __writeResult(results, resultPath): | |||||
resultDir = os.path.dirname(resultPath) | |||||
if not os.path.exists(resultDir): | |||||
os.makedirs(resultDir) | |||||
resultFile = open(resultPath, "w+") | |||||
for result in tqdm(results): | |||||
resultFile.write(json.dumps(result.toPrimitive())) | |||||
resultFile.write("\n\n") | |||||
resultFile.close() | |||||
def __satQuboResultFromSample(sample): | |||||
result = SATquboResult() | |||||
for binding in sample: | |||||
isActive = True if sample[binding] == 1 else False | |||||
result.addBinding(binding, isActive) | |||||
#if sample[binding] == 1: | |||||
#result.addActiveBinding(binding) | |||||
return result | |||||
if __name__ == "__main__": | |||||
main() |
@ -0,0 +1,290 @@ | |||||
#!/usr/bin/env python3 | |||||
import argparse | |||||
import os | |||||
import glob | |||||
import json | |||||
import numpy as np | |||||
import matplotlib.pyplot as plt | |||||
import collections | |||||
import scriptUtils | |||||
def main(): | |||||
args = __parseArguments() | |||||
print(args) | |||||
__stats(args["comparisonDir"], args["outputDir"]) | |||||
def __parseArguments(): | |||||
argParser = scriptUtils.ArgParser() | |||||
argParser.addInstanceDirArg(); | |||||
argParser.addArg(alias="comparisonDir", shortFlag="c", longFlag="comparison_dir", | |||||
help="the direcotry with all comparison files", type=str) | |||||
argParser.addArg(alias="outputDir", shortFlag="o", longFlag="comparison_stats_dir", | |||||
help="Directory to store the stats", type=str) | |||||
return argParser.parse() | |||||
def __stats(comparisonDir, outputDir): | |||||
stats = __collectStats(comparisonDir) | |||||
__writeStats(stats, outputDir) | |||||
def __collectStats(comparisonDir): | |||||
files = glob.glob(os.path.join(comparisonDir, "*.cmp")) | |||||
stats = [] | |||||
for path in files: | |||||
comparison = __readComparison(path) | |||||
stats.append(__processSingleInstance(comparison)) | |||||
return stats | |||||
def __processSingleInstance(comparison): | |||||
instanceStats = {} | |||||
conflicts = comparison["conflicts_per_variable"] | |||||
conflictArr = np.array(list(conflicts.values())) | |||||
instanceStats["conflicts_per_variable_mean"] = conflictArr.mean() | |||||
instanceStats["conflicts_per_variable_median"] = np.median(conflictArr) | |||||
instanceStats["conflicts_per_variable_std_dev"] = np.std(conflictArr) | |||||
instanceStats["conflicts_per_variable_max"] = conflictArr.max() | |||||
instanceStats["conflicts_per_variable_min"] = conflictArr.min() | |||||
instanceStats["conflicts_per_instance"] = np.sum(conflictArr) | |||||
instanceStats["raw_conflicts"] = list(conflictArr) | |||||
instanceStats["conflicts_to_degree_per_variable"] = __calcConflictsToDegree(conflicts, | |||||
comparison["degrees_of_variables"]) | |||||
if comparison["minisat_satisfiable"]: | |||||
if __instanceIsFalseNegative(comparison): | |||||
instanceStats["result"] = "false_negative" | |||||
else: | |||||
instanceStats["result"] = "satisfiable" | |||||
else: | |||||
instanceStats["result"] = "unsatisfiable" | |||||
return instanceStats | |||||
def __calcConflictsToDegree(degreesPerVariable, conflictsPerVariable): | |||||
conflictsToDegreePerVariable = [] | |||||
for varLabel, degree in degreesPerVariable.items(): | |||||
conflicts = conflictsPerVariable[varLabel] | |||||
cnflToDeg = conflicts / (float(degree) / 2.0)**2 | |||||
if cnflToDeg <= 1: | |||||
conflictsToDegreePerVariable.append(cnflToDeg) | |||||
return conflictsToDegreePerVariable | |||||
def __instanceIsFalseNegative(comparison): | |||||
return (comparison["minisat_satisfiable"] == True and | |||||
comparison["qubo_satisfiable"] == False) | |||||
def __readComparison(path): | |||||
cmpFile = open(path, "r") | |||||
comparison = json.load(cmpFile) | |||||
cmpFile.close() | |||||
return comparison | |||||
def __writeStats(stats, outputDir): | |||||
data = __seperateMatchesAndFalseNegatives(stats) | |||||
overviewFig = __createOverviewFig(data) | |||||
meanFig = __createSingleStatFig(data["mean"], "Conflicts per variable mean") | |||||
medianFig = __createSingleStatFig(data["median"], "Conflicts per variable median") | |||||
maxFig = __createSingleStatFig(data["max"], "Conflicts per variable max") | |||||
minFig = __createSingleStatFig(data["min"], "Conflicts per variable min") | |||||
stdDevFig = __createSingleStatFig(data["std_dev"], "Conflicts per variable\nstandard deviation") | |||||
cnflPerInstFig = __createSingleStatFig(data["cnfl_per_inst"], "Conflicts per instance") | |||||
cnflDegFig1 = __createSingleStatFig(data["cnflDeg"], "Conflicts in relation to degree", showfliers=False); | |||||
cnflDegFig2 = __createSingleStatFig(data["cnflDeg"], "Conflicts in relation to degree", showfliers=True); | |||||
histFig = __createHistogramFig(data, "raw", "Conflict per variable"); | |||||
#cnflDegHistFig = __createHistogramFig(data, "cnflDeg", "Conflicts in relation to degree"); | |||||
__setBatchXticks(figures=[overviewFig, | |||||
meanFig, | |||||
medianFig, | |||||
maxFig, | |||||
minFig, | |||||
stdDevFig, | |||||
cnflPerInstFig, | |||||
cnflDegFig1, | |||||
cnflDegFig2], | |||||
ticks=[1, 2, 3], | |||||
labels=["satisfiable", | |||||
"false negative", | |||||
"unsatisfiable"]) | |||||
__setBatchXtickLabelRotation(figures=[overviewFig, | |||||
meanFig, | |||||
medianFig, | |||||
maxFig, | |||||
minFig, | |||||
stdDevFig, | |||||
cnflPerInstFig, | |||||
cnflDegFig1, | |||||
cnflDegFig2], | |||||
rotation=30) | |||||
overviewFig.savefig(os.path.join(outputDir, "conflicts_overview.png")) | |||||
meanFig.savefig(os.path.join(outputDir, "conflicts_mean.png")) | |||||
medianFig.savefig(os.path.join(outputDir, "conflicts_median.png")) | |||||
maxFig.savefig(os.path.join(outputDir, "conflicts_max.png")) | |||||
minFig.savefig(os.path.join(outputDir, "conflicts_min.png")) | |||||
stdDevFig.savefig(os.path.join(outputDir, "conflicts_std_dev.png")) | |||||
cnflPerInstFig.savefig(os.path.join(outputDir, "conflicts_per_instance.png")) | |||||
histFig.savefig(os.path.join(outputDir, "conflicts_per_var_hist.png")) | |||||
cnflDegFig1.savefig(os.path.join(outputDir, "conflicts_in_relation_to_degree_1.png")) | |||||
cnflDegFig2.savefig(os.path.join(outputDir, "conflicts_in_relation_to_degree_2.png")) | |||||
#plt.show(overviewFig) | |||||
def __createOverviewFig(data): | |||||
fig = plt.figure() | |||||
ax0 = fig.add_subplot(141,) | |||||
ax0.boxplot([data["mean"]["satisfiable"], | |||||
data["mean"]["false_negative"], | |||||
data["mean"]["unsatisfiable"]]) | |||||
ax0.set_title("mean") | |||||
ax1 = fig.add_subplot(142, sharey=ax0) | |||||
ax1.boxplot([data["median"]["satisfiable"], | |||||
data["median"]["false_negative"], | |||||
data["median"]["unsatisfiable"]]) | |||||
ax1.set_title("median") | |||||
ax2 = fig.add_subplot(143, sharey=ax0) | |||||
ax2.boxplot([data["max"]["satisfiable"], | |||||
data["max"]["false_negative"], | |||||
data["max"]["unsatisfiable"]]) | |||||
ax2.set_title("max degree") | |||||
ax3 = fig.add_subplot(144, sharey=ax0) | |||||
ax3.boxplot([data["min"]["satisfiable"], | |||||
data["min"]["false_negative"], | |||||
data["min"]["unsatisfiable"]]) | |||||
ax3.set_title("min degree") | |||||
fig.set_size_inches(12, 8) | |||||
fig.suptitle("Conflicts per variable overview", fontsize=16) | |||||
return fig | |||||
def __createHistogramFig(data, subDataSet, title): | |||||
fig = plt.figure() | |||||
bins = int(max(data[subDataSet]["satisfiable"]) / 5) | |||||
ax0 = fig.add_subplot(321) | |||||
ax0.hist(data[subDataSet]["satisfiable"], bins=bins) | |||||
ax0_2 = fig.add_subplot(322) | |||||
ax0_2.boxplot(data[subDataSet]["satisfiable"], vert=False) | |||||
ax1 = fig.add_subplot(323, sharex=ax0) | |||||
ax1.hist(data[subDataSet]["false_negative"], bins=bins) | |||||
ax1_2 = fig.add_subplot(324, sharex=ax0_2) | |||||
ax1_2.boxplot(data[subDataSet]["false_negative"], vert=False) | |||||
ax2 = fig.add_subplot(325, sharex=ax0) | |||||
ax2.hist(data[subDataSet]["unsatisfiable"], bins=bins) | |||||
ax2_2 = fig.add_subplot(326, sharex=ax0_2) | |||||
ax2_2.boxplot(data[subDataSet]["unsatisfiable"], vert=False) | |||||
fig.set_size_inches(14, 10) | |||||
fig.suptitle(title, fontsize=16) | |||||
return fig | |||||
def __createSingleStatFig(subDataset, title, showfliers=True): | |||||
fig = plt.figure() | |||||
ax = fig.add_subplot(111) | |||||
ax.boxplot([subDataset["satisfiable"], | |||||
subDataset["false_negative"], | |||||
subDataset["unsatisfiable"]], showfliers=showfliers) | |||||
fig.set_size_inches(3.5, 8) | |||||
fig.suptitle(title, fontsize=16) | |||||
return fig | |||||
def __setBatchXticks(figures, ticks, labels): | |||||
for fig in figures: | |||||
plt.setp(fig.get_axes(), xticks=ticks, xticklabels=labels) | |||||
def __setBatchXtickLabelRotation(figures, rotation): | |||||
for fig in figures: | |||||
for ax in fig.get_axes(): | |||||
plt.setp(ax.get_xticklabels(), rotation=rotation) | |||||
def __seperateMatchesAndFalseNegatives(stats): | |||||
data = {} | |||||
data["mean"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["median"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["std_dev"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["max"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["min"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["cnfl_per_inst"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["raw"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["cnflDeg"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
for instance in stats: | |||||
target = instance["result"] | |||||
data["mean"][target].append(instance["conflicts_per_variable_mean"]) | |||||
data["median"][target].append(instance["conflicts_per_variable_median"]) | |||||
data["std_dev"][target].append(instance["conflicts_per_variable_std_dev"]) | |||||
data["max"][target].append(instance["conflicts_per_variable_max"]) | |||||
data["min"][target].append(instance["conflicts_per_variable_min"]) | |||||
data["cnfl_per_inst"][target].append(instance["conflicts_per_instance"]) | |||||
data["raw"][target].extend(instance["raw_conflicts"]) | |||||
data["cnflDeg"][target].extend(instance["conflicts_to_degree_per_variable"]) | |||||
return data | |||||
if __name__ == "__main__": | |||||
main() |
@ -0,0 +1,256 @@ | |||||
#!/usr/bin/env python3 | |||||
import argparse | |||||
import os | |||||
import glob | |||||
import json | |||||
import numpy as np | |||||
import matplotlib.pyplot as plt | |||||
import collections | |||||
def main(): | |||||
args = __parseArguments() | |||||
__stats(args["comparisonDir"], args["outputDir"]) | |||||
def __parseArguments(): | |||||
parser = argparse.ArgumentParser() | |||||
parser.add_argument("-d", "--directory", help="the direcotry with all comparison files", type=str) | |||||
parser.add_argument("-o", "--output", help="Directory to store the stats", type=str) | |||||
args = parser.parse_args() | |||||
arguments = {} | |||||
print(args) | |||||
arguments["comparisonDir"] = args.directory | |||||
if arguments["comparisonDir"] == None: | |||||
arguments["comparisonDir"] = str(input("Comparison directory: ")) | |||||
arguments["comparisonDir"] = os.path.abspath(arguments["comparisonDir"]) | |||||
arguments["outputDir"] = args.output | |||||
if arguments["outputDir"] == None: | |||||
arguments["outputDir"] = str(input("Output directory: ")) | |||||
arguments["outputDir"] = os.path.abspath(arguments["outputDir"]) | |||||
return arguments | |||||
def __stats(comparisonDir, outputDir): | |||||
stats = __collectStats(comparisonDir) | |||||
__writeStats(stats, outputDir) | |||||
def __collectStats(comparisonDir): | |||||
files = glob.glob(os.path.join(comparisonDir, "*.cmp")) | |||||
stats = [] | |||||
for path in files: | |||||
comparison = __readComparison(path) | |||||
stats.append(__processSingleInstance(comparison)) | |||||
return stats | |||||
def __processSingleInstance(comparison): | |||||
instanceStats = {} | |||||
degrees = comparison["degrees_of_variables"] | |||||
degreeArr = np.array(list(degrees.values())) | |||||
instanceStats["degree_of_variables_mean"] = degreeArr.mean() | |||||
instanceStats["degree_of_variables_median"] = np.median(degreeArr) | |||||
instanceStats["degree_of_variables_std_dev"] = np.std(degreeArr) | |||||
instanceStats["degree_of_variables_max"] = degreeArr.max() | |||||
instanceStats["degree_of_variables_min"] = degreeArr.min() | |||||
instanceStats["variables_per_degree"] = __getVarsPerDegree(degreeArr) | |||||
if comparison["minisat_satisfiable"]: | |||||
if __instanceIsFalseNegative(comparison): | |||||
instanceStats["result"] = "false_negative" | |||||
else: | |||||
instanceStats["result"] = "satisfiable" | |||||
else: | |||||
instanceStats["result"] = "unsatisfiable" | |||||
return instanceStats | |||||
def __instanceIsFalseNegative(comparison): | |||||
return (comparison["minisat_satisfiable"] == True and | |||||
comparison["qubo_satisfiable"] == False) | |||||
def __getVarsPerDegree(degreeArr): | |||||
degCount = collections.Counter(degreeArr) | |||||
varsPerDegree = {} | |||||
for degree in degCount: | |||||
varsPerDegree[degree] = degCount[degree] | |||||
return varsPerDegree | |||||
def __readComparison(path): | |||||
cmpFile = open(path, "r") | |||||
comparison = json.load(cmpFile) | |||||
cmpFile.close() | |||||
return comparison | |||||
def __writeStats(stats, outputDir): | |||||
fig1 = plt.figure() | |||||
data = __seperateMatchesAndFalseNegatives(stats) | |||||
ax0 = fig1.add_subplot(141,) | |||||
ax0.boxplot([data["mean"]["satisfiable"], | |||||
data["mean"]["false_negative"], | |||||
data["mean"]["unsatisfiable"]]) | |||||
ax0.set_title("mean") | |||||
ax1 = fig1.add_subplot(142, sharey=ax0) | |||||
ax1.boxplot([data["median"]["satisfiable"], | |||||
data["median"]["false_negative"], | |||||
data["median"]["unsatisfiable"]]) | |||||
ax1.set_title("median") | |||||
ax2 = fig1.add_subplot(143, sharey=ax0) | |||||
ax2.boxplot([data["max"]["satisfiable"], | |||||
data["max"]["false_negative"], | |||||
data["max"]["unsatisfiable"]]) | |||||
ax2.set_title("max degree") | |||||
ax3 = fig1.add_subplot(144, sharey=ax0) | |||||
ax3.boxplot([data["min"]["satisfiable"], | |||||
data["min"]["false_negative"], | |||||
data["min"]["unsatisfiable"]]) | |||||
ax3.set_title("min degree") | |||||
fig2 = plt.figure() | |||||
ax4 = fig2.add_subplot(111) | |||||
ax4.boxplot([data["std_dev"]["satisfiable"], | |||||
data["std_dev"]["false_negative"], | |||||
data["std_dev"]["unsatisfiable"]]) | |||||
ax4.set_title("standard deviation") | |||||
_BINS_ = 23 | |||||
fig3 = plt.figure() | |||||
ax5 = fig3.add_subplot(311) | |||||
varsPerDegreeSat = __accumulateVarsPerDegree(data["vars_per_degree"]["satisfiable"]) | |||||
ax5.hist(varsPerDegreeSat, density=True, bins=_BINS_) | |||||
ax6 = fig3.add_subplot(312, sharex=ax5) | |||||
varsPerDegreeFP = __accumulateVarsPerDegree(data["vars_per_degree"]["false_negative"]) | |||||
ax6.hist(varsPerDegreeFP, density=True, bins=_BINS_) | |||||
ax7 = fig3.add_subplot(313, sharex=ax6) | |||||
varsPerDegreeUnsat = __accumulateVarsPerDegree(data["vars_per_degree"]["unsatisfiable"]) | |||||
ax7.hist(varsPerDegreeUnsat, density=True, bins=_BINS_) | |||||
plt.setp([ax0, ax1, ax2, ax3, ax4], xticks=[1, 2, 3], xticklabels=["satisfiable", | |||||
"false negative", | |||||
"unsatisfiable"]) | |||||
plt.setp(ax0.get_xticklabels(), rotation=45) | |||||
plt.setp(ax1.get_xticklabels(), rotation=45) | |||||
plt.setp(ax2.get_xticklabels(), rotation=45) | |||||
plt.setp(ax3.get_xticklabels(), rotation=45) | |||||
plt.setp(ax4.get_xticklabels(), rotation=45) | |||||
fig1.set_size_inches(12, 8) | |||||
fig1.suptitle("Degrees of variables", fontsize=16) | |||||
fig2.set_size_inches(4, 8) | |||||
fig3.set_size_inches(5, 12) | |||||
fig1.savefig(os.path.join(outputDir, "degrees1.png")) | |||||
fig2.savefig(os.path.join(outputDir, "degrees2.png")) | |||||
fig3.savefig(os.path.join(outputDir, "degrees3.png")) | |||||
plt.show() | |||||
def __accumulateVarsPerDegree(listOfVarsPerDegreeDicts): | |||||
accumulated = [] | |||||
for instance in listOfVarsPerDegreeDicts: | |||||
for degree in instance: | |||||
accumulated += [degree] * instance[degree] | |||||
return accumulated | |||||
def __compressVarsPerDegree(listOfVarsPerDegreeDicts): | |||||
compressed = {} | |||||
countOfVars = 0 | |||||
for instance in listOfVarsPerDegreeDicts: | |||||
for degree in instance: | |||||
if degree in compressed: | |||||
compressed[degree] += float(instance[degree]) | |||||
else: | |||||
compressed[degree] = float(instance[degree]) | |||||
countOfVars += instance[degree] | |||||
check = 0 | |||||
for degree in compressed: | |||||
compressed[degree] /= countOfVars | |||||
check += compressed[degree] | |||||
print("check: ", check) | |||||
return compressed | |||||
def __seperateMatchesAndFalseNegatives(stats): | |||||
data = {} | |||||
data["mean"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["median"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["std_dev"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["max"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["min"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
data["vars_per_degree"] = {"false_negative": [], | |||||
"satisfiable": [], | |||||
"unsatisfiable": []} | |||||
for instance in stats: | |||||
target = instance["result"] | |||||
data["mean"][target].append(instance["degree_of_variables_mean"]) | |||||
data["median"][target].append(instance["degree_of_variables_median"]) | |||||
data["std_dev"][target].append(instance["degree_of_variables_std_dev"]) | |||||
data["max"][target].append(instance["degree_of_variables_max"]) | |||||
data["min"][target].append(instance["degree_of_variables_min"]) | |||||
data["vars_per_degree"][target].append(instance["variables_per_degree"]) | |||||
return data | |||||
if __name__ == "__main__": | |||||
main() |
@ -0,0 +1,47 @@ | |||||
#!/usr/bin/env python3 | |||||
import kSAT | |||||
import minisatUtils as mSatU | |||||
import argparse | |||||
def main(): | |||||
parser = argparse.ArgumentParser() | |||||
parser.add_argument("-i", "--instance", help="instance file, has to be in DIMACS format", type=str) | |||||
parser.add_argument("-r", "--result", help="minisat result file", type=str) | |||||
args = parser.parse_args() | |||||
instancePath = args.instance | |||||
if instancePath == None: | |||||
instancePath = str(input("Instance file: ")) | |||||
minisatResultFile = args.result | |||||
if minisatResultFile == None: | |||||
minisatResultFile = str(input("Minisat result file: ")) | |||||
print("Checking...") | |||||
print("Instance:\t%s" % instancePath) | |||||
print("Minisat result:\t%s" % minisatResultFile) | |||||
checkStr = __checkResult(instancePath, minisatResultFile) | |||||
print("check:\t\t%s" % checkStr) | |||||
def __checkResult(instancePath, minisatResultFile): | |||||
sat = kSAT.kSAT() | |||||
sat.readDIMACS(instancePath) | |||||
minisatResult = mSatU.readMinisatResult(minisatResultFile) | |||||
if minisatResult["satisfiable"]: | |||||
if sat.checkAssignment(minisatResult["assignments"]): | |||||
return "results do match" | |||||
else: | |||||
return "results do NOT match" | |||||
else: | |||||
return "unsatisfiable" | |||||
if __name__ == "__main__": | |||||
main() |
@ -0,0 +1,22 @@ | |||||
#!/usr/bin/env python3 | |||||
import randomSAT as rdSAT | |||||
kSATInstance = rdSAT.generateRandomKSAT(5, 4, 3) | |||||
print(kSATInstance.toString()) | |||||
print("conflicts:") | |||||
for conflict in kSATInstance.getConflicts(): | |||||
conflictVerified = False | |||||
clause1 = kSATInstance.getClause(conflict[0][0]) | |||||
clause2 = kSATInstance.getClause(conflict[1][0]) | |||||
for binding in clause2: | |||||
if binding == conflict[0][1] * -1: | |||||
conflictVerified = True | |||||
print(conflict[0], conflict[1], conflictVerified) | |||||
@ -0,0 +1,13 @@ | |||||
#!/usr/bin/env python3 | |||||
import SAT2QUBO as s2q | |||||
import randomSAT as rs | |||||
ksatInstance = rs.generateRandomKSAT(2, 4, 3) | |||||
qubo = s2q.WMISdictQUBO(ksatInstance) | |||||
print(ksatInstance.toString()) | |||||
for label in qubo: | |||||
print(label, qubo[label]) |
@ -0,0 +1,105 @@ | |||||
#!/usr/bin/env python3 | |||||
import numpy as np | |||||
import kSAT | |||||
from tqdm import tqdm | |||||
import math | |||||
__VERTEX_WEIGHT__ = 1 | |||||
__EDGE_WEIGHT__ = -2 | |||||
def WMISdictQUBO(kSATInstance): | |||||
quboInstance = {} | |||||
for clauseIndex in tqdm(range(kSATInstance.getNumberOfClauses())): | |||||
clause = kSATInstance.getClause(clauseIndex) | |||||
# build triangles | |||||
for varIndexInClause in range(len(clause)): | |||||
label = kSATInstance.getLableOfBinding(clauseIndex, | |||||
clause[varIndexInClause]) | |||||
quboInstance[(label, label)] = __VERTEX_WEIGHT__ | |||||
for i in range(varIndexInClause + 1, len(clause)): | |||||
targetLabel = kSATInstance.getLableOfBinding(clauseIndex, | |||||
clause[i]) | |||||
quboInstance[(label, targetLabel)] = __EDGE_WEIGHT__ | |||||
# connect conflicts | |||||
for conflict in kSATInstance.getConflicts(): | |||||
quboInstance[(conflict[0], conflict[1])] = __EDGE_WEIGHT__ | |||||
return quboInstance | |||||
class QuboWriter: | |||||
def __init__(self, qubo): | |||||
self.__labelIndexDict = {} | |||||
self.__qubo = qubo | |||||
self.__initLabelIndexDict(self.__qubo) | |||||
print(self.__labelIndexDict) | |||||
def __initLabelIndexDict(self, qubo): | |||||
indexCount = 0 | |||||
for coupler in qubo: | |||||
label1 = coupler[0] | |||||
label2 = coupler[1] | |||||
if label1 not in self.__labelIndexDict: | |||||
self.__labelIndexDict[label1] = indexCount | |||||
indexCount += 1 | |||||
if label2 not in self.__labelIndexDict: | |||||
self.__labelIndexDict[label2] = indexCount | |||||
indexCount += 1 | |||||
def write(self, filePath): | |||||
quboFile = open(filePath, "w") | |||||
self.__writeQuboFileHeader(quboFile) | |||||
self.__writeQuboFileNodes(quboFile) | |||||
self.__writeQuboFileCouplers(quboFile) | |||||
quboFile.close() | |||||
def __writeQuboFileHeader(self, quboFile): | |||||
numberOfNodes = len(self.__labelIndexDict) | |||||
numberOfCouplers = len(self.__qubo) - numberOfNodes | |||||
quboFile.write("c\n") | |||||
quboFile.write("c this is a generated qubo file\n") | |||||
quboFile.write("c\n") | |||||
quboFile.write("p qubo 0 %d %d %d\n" %(numberOfNodes, | |||||
numberOfNodes, | |||||
numberOfCouplers)) | |||||
def __writeQuboFileNodes(self, quboFile): | |||||
for label in self.__labelIndexDict: | |||||
self.__writeCoupler(quboFile, (label, label)) | |||||
def __writeCoupler(self, quboFile, coupler): | |||||
indices = self.__getNodeIndicesFromCoupler(coupler) | |||||
quboFile.write("%d %d %d\n" % (indices[0], | |||||
indices[1], | |||||
self.__qubo[coupler])) | |||||
def __getNodeIndicesFromCoupler(self, coupler): | |||||
index1 = self.__labelIndexDict[coupler[0]] | |||||
index2 = self.__labelIndexDict[coupler[1]] | |||||
if index1 <= index2: | |||||
return [index1, index2] | |||||
else: | |||||
return [index2, index1] | |||||
def __writeQuboFileCouplers(self, quboFile): | |||||
for coupler in self.__qubo: | |||||
if coupler[0] != coupler[1]: | |||||
self.__writeCoupler(quboFile, coupler) | |||||
@ -0,0 +1,168 @@ | |||||
import json | |||||
from kSAT import kSAT | |||||
class SATquboResult: | |||||
def __init__(self): | |||||
self.__assignments = set() | |||||
self.__activeBindings = {} | |||||
self.__hasConflicts = False | |||||
self.__occurrences = 1 | |||||
self.__satisfiesInstance = False | |||||
self.__assignmentsUpToDate = True | |||||
def satisfiesInstance(self): | |||||
return self.__satisfiesInstance | |||||
def setSatisfiesInstance(self, satInstance): | |||||
evaluation = satInstance.checkAssignment(self.getAssignmentsWithoutPositionIndex()) | |||||
self.__satisfiesInstance = evaluation | |||||
def addBinding(self, binding, isActive): | |||||
self.__assignmentsUpToDate = False | |||||
clause = binding[0] | |||||
assignment = binding[1] | |||||
varLabel = abs(assignment) | |||||
if varLabel not in self.__activeBindings: | |||||
self.__activeBindings[varLabel] = [] | |||||
if not isActive: | |||||
return | |||||
bindingSign = -1 if assignment < 0 else 1 | |||||
self.__activeBindings[varLabel].append((clause, bindingSign)) | |||||
self.__checkForConflictAtVar(varLabel) | |||||
def __checkForConflictAtVar(self, varLabel): | |||||
if varLabel not in self.__activeBindings: | |||||
return | |||||
sign = None | |||||
for assignment in self.__activeBindings[varLabel]: | |||||
if sign != None: | |||||
if sign * assignment[1] < 0: | |||||
self.__hasConflicts = True | |||||
return | |||||
sign = assignment[1] | |||||
def getAssignments(self): | |||||
if not self.__assignmentsUpToDate: | |||||
self.__updateAssignments() | |||||
return self.__assignments | |||||
def __updateAssignments(self): | |||||
self.__assignments = set() | |||||
for varLabel in self.__activeBindings: | |||||
bindings = self.__activeBindings[varLabel] | |||||
if not bindings: | |||||
self.__assignments.add(-1 * varLabel) | |||||
for binding in bindings: | |||||
sign = binding[1] | |||||
self.__assignments.add(sign * varLabel) | |||||
self.__assignmentsUpToDate = True | |||||
def getAssignmentsSortedByVarIndex(self): | |||||
return sorted(self.getAssignments(), key=abs) | |||||
def getAssignmentsWithoutPositionIndex(self): | |||||
assignments = self.getAssignmentsSortedByVarIndex() | |||||
for i in range(len(assignments)): | |||||
assignments[i] = 0 if assignments[i] < 0 else 1 | |||||
return assignments | |||||
def getActiveBindings(self): | |||||
return self.__activeBindings | |||||
def hasConflicts(self): | |||||
return self.__hasConflicts | |||||
def setOccurrences(self, occurrences): | |||||
self.__occurrences = occurrences | |||||
def getOccurrences(self): | |||||
return self.__occurrences | |||||
def toPrimitive(self): | |||||
primitive = {} | |||||
primitive["assignments"] = self.getAssignmentsSortedByVarIndex() | |||||
primitive["hasConflicts"] = self.__hasConflicts | |||||
primitive["activeBindings"] = self.getActiveBindings() | |||||
primitive["satisfiesInstance"] = self.__satisfiesInstance | |||||
return primitive | |||||
def fromPrimitive(self, primitive): | |||||
for varLabel in primitive["activeBindings"]: | |||||
bindings = primitive["activeBindings"][varLabel] | |||||
if not bindings: | |||||
self.addBinding((-1, int(varLabel)), False) | |||||
for binding in bindings: | |||||
clause = binding[0] | |||||
sign = binding[1] | |||||
self.addBinding((clause, sign * int(varLabel)), True) | |||||
self.__satisfiesInstance = primitive["satisfiesInstance"] | |||||
def readAssignmentsFromFile(resultFilePath): | |||||
resultFile = open(resultFilePath, "r") | |||||
assignments = [] | |||||
line = resultFile.readline() | |||||
while line: | |||||
if line.strip(): | |||||
assignments.append(__parseAssignmentFromLine(line)) | |||||
line = resultFile.readline() | |||||
resultFile.close() | |||||
return assignments | |||||
def __parseAssignmentFromLine(line): | |||||
result = json.loads(line) | |||||
assignment = [None] * abs(max(result["assignments"], key=abs)) | |||||
for varAssignment in result["assignments"]: | |||||
assignment[abs(varAssignment) - 1] = 0 if varAssignment < 0 else 1 | |||||
return assignment | |||||
def readResultsFromFile(resultFilePath): | |||||
resultFile = open(resultFilePath, "r") | |||||
results = [] | |||||
line = resultFile.readline() | |||||
while line: | |||||
if line.strip(): | |||||
result = SATquboResult() | |||||
result.fromPrimitive(json.loads(line)) | |||||
results.append(result) | |||||
line = resultFile.readline() | |||||
return results |
@ -0,0 +1,23 @@ | |||||
SATISFIABLE = 0 | |||||
UNSATISFIABLE = 1 | |||||
FALSE_NEGATIVE = 2 | |||||
FALSE_POSITIVE = 3 | |||||
def getComparisonStatus(comparison): | |||||
if (comparison["minisat_satisfiable"] and | |||||
comparison["qubo_satisfiable"]): | |||||
return SATISFIABLE | |||||
elif (not comparison["minisat_satisfiable"] and | |||||
not comparison["qubo_satisfiable"]): | |||||
return UNSATISFIABLE | |||||
elif (comparison["minisat_satisfiable"] and | |||||
not comparison["qubo_satisfiable"]): | |||||
return FALSE_NEGATIVE | |||||
elif (not comparison["minisat_satisfiable"] and | |||||
comparison["qubo_satisfiable"]): | |||||
return FALSE_POSITIVE | |||||
@ -0,0 +1,186 @@ | |||||
#!/usr/bin/env python3 | |||||
import numpy as np | |||||
class kSAT: | |||||
def __init__(self): | |||||
self.__clauses = [] | |||||
self.__numVariables = 0 | |||||
def getBindings(self): | |||||
return self.__clauses | |||||
def getNumberOfClauses(self): | |||||
return len(self.__clauses) | |||||
def getClause(self, clauseIndex): | |||||
return self.__clauses[clauseIndex] | |||||
def addClause(self, clause): | |||||
#counting variables (assuming variables are assigned | |||||
# consistently from 1 to numTotalVars) | |||||
for varBinding in clause: | |||||
if np.absolute(varBinding) > self.__numVariables: | |||||
self.__numVariables = np.absolute(varBinding) | |||||
self.__clauses.append(clause) | |||||
def getDegreesOfVariables(self): | |||||
degrees = {} | |||||
for clause in self.__clauses: | |||||
for binding in clause: | |||||
varLabel = abs(binding) | |||||
if varLabel not in degrees: | |||||
degrees[varLabel] = 1 | |||||
else: | |||||
degrees[varLabel] += 1 | |||||
return degrees | |||||
def evaluate(satInstance, assignments): | |||||
evaluations = [] | |||||
for assignment in assignments: | |||||
evaluations.append(satInstance.checkAssignment(assignment)) | |||||
return evaluations | |||||
def checkAssignment(self, assignment): | |||||
if not self.__assignmentIsComplete(assignment): | |||||
print("wrong number of variables in assignment") | |||||
return False | |||||
for clause in self.__clauses: | |||||
clauseResult = False | |||||
for binding in clause: | |||||
varIndex = np.absolute(binding) - 1 | |||||
if ((binding > 0 and assignment[varIndex] == True) or | |||||
(binding < 0 and assignment[varIndex] == False)): | |||||
clauseResult = True | |||||
if clauseResult == False: | |||||
return False | |||||
return True | |||||
def __assignmentIsComplete(self, assignment): | |||||
if (len(assignment) != self.getNumberOfVariables() or | |||||
None in assignment): | |||||
return False | |||||
return True | |||||
def getNumberOfVariables(self): | |||||
return self.__numVariables | |||||
def toString(self): | |||||
kSATString = "" | |||||
for clauseIndex in range(len(self.__clauses) - 1): | |||||
kSATString += self.__kSATClauseToString(clauseIndex) | |||||
kSATString += " * " | |||||
kSATString += self.__kSATClauseToString(len(self.__clauses) - 1) | |||||
return kSATString | |||||
def getLableOfBinding(self, clauseIndex, binding): | |||||
#return label = "%d%d" % (clauseIndex | |||||
# self.__clauses[clauseIndex][varInClauseIndex]) | |||||
return (clauseIndex, binding) | |||||
def getConflicts(self): | |||||
conflicts = [] | |||||
for clauseIndex in range(len(self.__clauses)): | |||||
clause = self.__clauses[clauseIndex] | |||||
for binding in clause: | |||||
clauseToCheckIndex = 0 | |||||
#search for conflict with binding | |||||
for clauseToCheckIndex in range(clauseIndex, len(self.__clauses)): | |||||
for bindingToCheck in self.__clauses[clauseToCheckIndex]: | |||||
if binding == bindingToCheck * -1: | |||||
conflLable1 = self.getLableOfBinding(clauseIndex, | |||||
binding) | |||||
conflLable2 = self.getLableOfBinding(clauseToCheckIndex, | |||||
bindingToCheck) | |||||
conflicts.append((conflLable1, conflLable2)) | |||||
return conflicts | |||||
def writeDIMACS(self, path): | |||||
outputFile = open(path, "w") | |||||
outputFile.write("c A SAT instance\n") | |||||
outputFile.write("p cnf %d %d \n" % (self.getNumberOfVariables(), | |||||
self.getNumberOfClauses())) | |||||
for clause in self.getBindings(): | |||||
for binding in clause: | |||||
outputFile.write("%d " % binding) | |||||
outputFile.write("0\n") | |||||
outputFile.close() | |||||
def readDIMACS(self, path): | |||||
inputFile = open(path, "r") | |||||
line = inputFile.readline() | |||||
self.reset() | |||||
while line != "": | |||||
if line[0] != "c" and line[0] != "p": | |||||
bindings = [int(binding) for binding in line.split()] | |||||
self.addClause(bindings[:len(bindings) -1]) | |||||
line = inputFile.readline() | |||||
inputFile.close() | |||||
def reset(self): | |||||
self.__clauses = [] | |||||
self.__numVariables = 0 | |||||
def __kSATClauseToString(self, clauseIndex): | |||||
clause = self.__clauses[clauseIndex] | |||||
varCount = 0 | |||||
isFirstVar = True; | |||||
clauseString = "("; | |||||
for weight in clause: | |||||
varCount += 1 | |||||
if not isFirstVar: | |||||
clauseString += " + " | |||||
clauseString += self.__kSATVariableToString(weight) | |||||
isFirstVar = False | |||||
clauseString += ")" | |||||
return clauseString | |||||
def __kSATVariableToString(self, weight): | |||||
name = "x%d" % np.absolute(weight) | |||||
if weight < 0: | |||||
return "!" + name | |||||
else: | |||||
return name |
@ -0,0 +1,34 @@ | |||||
def readMinisatResult(path): | |||||
result = {"assignments": [], "satisfiable": False} | |||||
resultFile = open(path) | |||||
line = resultFile.readline() | |||||
if line.strip() == "SAT": | |||||
result["satisfiable"] = True | |||||
result["assignments"] = __parseVarAssignments(resultFile.readline()) | |||||
resultFile.close() | |||||
return result | |||||
def __parseVarAssignments(line): | |||||
assignmentStrings = line.split() | |||||
trailer = assignmentStrings.pop() | |||||
assignments = [] | |||||
if trailer == "0": | |||||
for assignmentStr in assignmentStrings: | |||||
assignment = True if int(assignmentStr) > 0 else False | |||||
assignments.append(assignment) | |||||
else: | |||||
print("Bad format of assignment string:\n %s", line) | |||||
return assignments | |||||
@ -0,0 +1,66 @@ | |||||
#!/usr/bin/env python3 | |||||
import numpy as np | |||||
import random | |||||
import kSAT | |||||
def generateRandomKSAT(numberOfClauses, | |||||
numberOfVariables, | |||||
numberOfVariablesPerClause): | |||||
instance = kSAT.kSAT() | |||||
clauses = [[] for i in range(numberOfClauses)] | |||||
#make sure every variable is bound to at least one clause | |||||
for varIndex in range(numberOfVariables): | |||||
clauseIndex = random.choice(range(numberOfClauses)) | |||||
while (len(clauses[clauseIndex]) >= numberOfVariablesPerClause or | |||||
varIndex + 1 in clauses[clauseIndex]): | |||||
clauseIndex = random.choice(range(numberOfClauses)) | |||||
clauses[clauseIndex].append(varIndex + 1) | |||||
#fill in the missing bindings | |||||
for clause in clauses: | |||||
tmpClause = [] | |||||
clauseIsUnique = False | |||||
while not clauseIsUnique: | |||||
tmpClause = clause.copy() | |||||
numRemainingBindings = numberOfVariablesPerClause - len(tmpClause) | |||||
variablesNotYetInClause = __getVariablesNotYetInClause(tmpClause, | |||||
numberOfVariables) | |||||
remainingBindings = random.sample(variablesNotYetInClause, | |||||
numRemainingBindings) | |||||
tmpClause += remainingBindings | |||||
for i in range(len(tmpClause)): | |||||
tmpClause[i] *= random.choice([-1, 1]) | |||||
if tmpClause not in clauses: | |||||
clauseIsUnique = True | |||||
instance.addClause(tmpClause) | |||||
return instance | |||||
def __getVariablesNotYetInClause(clause, numberOfTotalVars): | |||||
missingVariables = [] | |||||
prevVar = 1; | |||||
for currVar in clause: | |||||
missingVariables += list(range(prevVar, currVar)) | |||||
prevVar = currVar + 1 | |||||
missingVariables += list(range(prevVar, numberOfTotalVars + 1)) | |||||
return missingVariables |
@ -0,0 +1,86 @@ | |||||
import configparser | |||||
import os | |||||
import argparse | |||||
def readConfig(configFilePath): | |||||
config = configparser.ConfigParser() | |||||
if os.path.isfile(configFilePath): | |||||
config.read(configFilePath) | |||||
return config | |||||
class ArgParser: | |||||
def __init__(self): | |||||
self.__flags = {} | |||||
self.__parser = argparse.ArgumentParser() | |||||
self.__instanceDirArgSet = False | |||||
self.__config = None | |||||
self.__parsedArgs = {} | |||||
def addArg(self, alias, | |||||
shortFlag, | |||||
longFlag, | |||||
help, | |||||
type, | |||||
default=None, | |||||
ignoreDatabaseConfig=False): | |||||
self.__flags[alias] = {"longFlag": longFlag, | |||||
"hasDefault": False, | |||||
"ignoreDatabaseConfig": ignoreDatabaseConfig} | |||||
if default != None: | |||||
self.__flags[alias]["hasDefault"] = True | |||||
self.__parser.add_argument("-%s" % shortFlag, | |||||
"--%s" % longFlag, | |||||
help=help, | |||||
type=type, | |||||
default=default) | |||||
def addInstanceDirArg(self): | |||||
self.__instanceDirArgSet = True | |||||
self.addArg(alias="datasetDir", shortFlag="d", longFlag="dataset_dir", | |||||
help="the base direcotry of the dataset; if this flag is given the others can be omitted", | |||||
type=str, ignoreDatabaseConfig=True) | |||||
def parse(self): | |||||
self.__parsedArgs = {} | |||||
args = vars(self.__parser.parse_args()) | |||||
if self.__instanceDirArgSet: | |||||
self.__config = readConfig(os.path.join(args["dataset_dir"], | |||||
"dataset.config")) | |||||
self.__parseDatasetConfig() | |||||
for alias, flag in self.__flags.items(): | |||||
self.__parsedArgs[alias] = self.__processFlag(args, flag) | |||||
self.__config = None | |||||
return self.__parsedArgs | |||||
def __parseDatasetConfig(self): | |||||
for flag, value in self.__config["STRUCTURE"].items(): | |||||
self.__parsedArgs[flag] = value | |||||
def __processFlag(self, args, flag): | |||||
longFlag = flag["longFlag"] | |||||
tmpValue = self.__parsedArgs[longFlag] if longFlag in self.__parsedArgs else None | |||||
if flag["ignoreDatabaseConfig"] == True: | |||||
tmpValue = None | |||||
if args[longFlag]: | |||||
tmpValue = args[longFlag] | |||||
if tmpValue == None: | |||||
tmpValue == input("pass arguement %s: " % longFlag) | |||||
return tmpValue | |||||
@ -0,0 +1,38 @@ | |||||
#!/usr/bin/env python3 | |||||
from kSAT import kSAT | |||||
import SATquboResult | |||||
import argparse | |||||
def main(): | |||||
parser = argparse.ArgumentParser() | |||||
parser.add_argument("-i", "--instance", help="instance file, has to be in DIMACS format", type=str) | |||||
parser.add_argument("-r", "--result", help="WMIS qubo result file", type=str) | |||||
args = parser.parse_args() | |||||
instancePath = args.instance | |||||
if instancePath == None: | |||||
instancePath = str(input("Instance file: ")) | |||||
resultFile = args.result | |||||
if resultFile == None: | |||||
resultFile = str(input("WMIS qubo result file: ")) | |||||
__verify(instancePath, resultFile) | |||||
def __verify(instancePath, resultFile): | |||||
sat = kSAT() | |||||
sat.readDIMACS(instancePath) | |||||
assignments = SATquboResult.readAssignmentsFromFile(resultFile) | |||||
evaluations = sat.evaluate(assignments) | |||||
for i in range(len(assignments)): | |||||
print(assignments[i], evaluations[i]) | |||||
if __name__ == "__main__": | |||||
main() |