Random scattered analysis

This commit is contained in:
LSaldyt
2017-10-19 20:37:40 -07:00
parent 176e6cd4e2
commit 397d49cc58
5 changed files with 20 additions and 13 deletions

View File

@ -89,6 +89,7 @@ class Copycat(object):
d['avgtemp'] = d.pop('sumtemp') / d['count']
d['avgtime'] = d.pop('sumtime') / d['count']
print('The formula {} provided:'.format(formula))
print('Average difference: {}'.format(self.temperature.getAverageDifference()))
pprint(answers)
return answers

View File

@ -30,7 +30,7 @@ class Rule(WorkspaceStructure):
return
averageDepth = (self.descriptor.conceptualDepth +
self.relation.conceptualDepth) / 2.0
averageDepth **= 1.1
averageDepth **= 1.1 # LSaldyt: This value (1.1) seems 100% contrived.
# see if the object corresponds to an object
# if so, see if the descriptor is present (modulo slippages) in the
# corresponding object
@ -45,15 +45,15 @@ class Rule(WorkspaceStructure):
self.internalStrength = 0.0
return
sharedDescriptorTerm = 100.0
conceptual_height = (100.0 - self.descriptor.conceptualDepth) / 10.0
sharedDescriptorWeight = conceptual_height ** 1.4
conceptual_height = (100.0 - self.descriptor.conceptualDepth) / 10.0 # LSaldyt: 10?
sharedDescriptorWeight = conceptual_height ** 1.4 # LSaldyt: 1.4 is also seemingly contrived
depthDifference = 100.0 - abs(self.descriptor.conceptualDepth -
self.relation.conceptualDepth)
weights = ((depthDifference, 12),
(averageDepth, 18),
(sharedDescriptorTerm, sharedDescriptorWeight))
weights = ((depthDifference, 12), # LSaldyt: ???
(averageDepth, 18), # ????
(sharedDescriptorTerm, sharedDescriptorWeight)) # 12 and 18 can be reduced to 2 and 3, depending on sharedDescriptorWeight
self.internalStrength = formulas.weightedAverage(weights)
if self.internalStrength > 100.0:
if self.internalStrength > 100.0: # LSaldyt: A better formula wouldn't need to do this.
self.internalStrength = 100.0
def ruleEqual(self, other):

View File

@ -58,7 +58,7 @@ def _averaged_alt(temp, prob):
def _working_best(temp, prob):
s = .5 # convergence
r = 2 # power
r = 1.05 # power
u = prob ** r if prob < .5 else prob ** (1/r)
return _weighted(temp, prob, s, u)
@ -76,6 +76,8 @@ class Temperature(object):
'alt_fifty' : _alt_fifty,
'average_alt' : _averaged_alt,
'best' : _working_best}
self.diffs = 0
self.ndiffs = 0
def reset(self):
self.actual_value = 100.0
@ -108,7 +110,14 @@ class Temperature(object):
def getAdjustedProbability(self, value):
temp = self.value()
prob = value
return self._adjustmentFormulas[self.adjustmentType](temp, prob)
adjusted = self._adjustmentFormulas[self.adjustmentType](temp, prob)
self.diffs += abs(adjusted - prob)
self.ndiffs += 1
return adjusted
def getAverageDifference(self):
return self.diffs / self.ndiffs
def useAdj(self, adj):
print('Changing to adjustment formula {}'.format(adj))

View File

@ -101,8 +101,6 @@ class Workspace(object):
def getUpdatedTemperature(self):
'''
Calculation of global tolerance towards irrelevance
temp = weightedAverage(totalUnhappiness(.8), ruleWeakness(.2))
'''
self.calculateIntraStringUnhappiness()
self.calculateInterStringUnhappiness()

View File

@ -25,7 +25,6 @@ class TestCopycat(unittest.TestCase):
self.longMessage = True # new in Python 2.7
def assertProbabilitiesLookRoughlyLike(self, actual, expected, iterations):
answerKeys = set(list(actual.keys()) + list(expected.keys()))
degreesFreedom = len(answerKeys)
chiSquared = 0
@ -41,7 +40,7 @@ class TestCopycat(unittest.TestCase):
chiSquared += (O - E) ** 2 / E
if chiSquared >= _chiSquared_table[degreesFreedom]:
self.fail('Significant different between expected and actual answer distributions: \n' +
self.fail('Significant difference between expected and actual answer distributions: \n' +
'Chi2 value: {} with {} degrees of freedom'.format(chiSquared, degreesFreedom))
def run_testcase(self, initial, modified, target, iterations, expected):