Merge branch 'feature-temperature-improvements' into develop

This commit is contained in:
LSaldyt
2017-10-09 13:01:42 -06:00
19 changed files with 149 additions and 204 deletions

3
.gitignore vendored
View File

@ -27,3 +27,6 @@ pip-log.txt
# Editors
.*.swp
# Output
output/*

5
.travis.yml Normal file
View File

@ -0,0 +1,5 @@
language: python
python:
- "3.6"
script:
- python3 tests.py

View File

@ -1 +1,3 @@
from .copycat import Copycat, Reporter # noqa
from .plot import plot_answers
from .io import save_answers

View File

@ -72,6 +72,7 @@ def __structureVsStructure(structure1, weight1, structure2, weight2):
"""Return true if the first structure comes out stronger than the second."""
ctx = structure1.ctx
random = ctx.random
# TODO: use entropy
temperature = ctx.temperature
structure1.updateStrength()
structure2.updateStrength()
@ -114,7 +115,6 @@ def __slippability(ctx, conceptMappings):
temperature = ctx.temperature
for mapping in conceptMappings:
slippiness = mapping.slippability() / 100.0
# TODO: use entropy
probabilityOfSlippage = temperature.getAdjustedProbability(slippiness)
if random.coinFlip(probabilityOfSlippage):
return True
@ -123,10 +123,18 @@ def __slippability(ctx, conceptMappings):
@codelet('breaker')
def breaker(ctx, codelet):
# From the original LISP:
'''
First decides probabilistically whether or not to fizzle, based on
temperature. Chooses a structure and random and decides probabilistically
whether or not to break it as a function of its total weakness.
If the structure is a bond in a group, have to break the group in
order to break the bond.
'''
random = ctx.random
temperature = ctx.temperature
workspace = ctx.workspace
# TODO: use entropy
probabilityOfFizzle = (100.0 - temperature.value()) / 100.0
if random.coinFlip(probabilityOfFizzle):
return
@ -142,8 +150,9 @@ def breaker(ctx, codelet):
if structure.source.group == structure.destination.group:
breakObjects += [structure.source.group]
# Break all the objects or none of them; this matches the Java
# "all objects" means a bond and its group, if it has one.
for structure in breakObjects:
# TODO: use entropy
breakProbability = temperature.getAdjustedProbability(
structure.totalStrength / 100.0)
if random.coinFlip(breakProbability):
@ -161,6 +170,7 @@ def chooseRelevantDescriptionByActivation(ctx, workspaceObject):
def similarPropertyLinks(ctx, slip_node):
random = ctx.random
# TODO: use entropy
temperature = ctx.temperature
result = []
for slip_link in slip_node.propertyLinks:
@ -216,6 +226,7 @@ def top_down_description_scout(ctx, codelet):
def description_strength_tester(ctx, codelet):
coderack = ctx.coderack
random = ctx.random
# TODO: use entropy
temperature = ctx.temperature
description = codelet.arguments[0]
description.descriptor.buffer = 100.0
@ -301,6 +312,7 @@ def rule_scout(ctx, codelet):
coderack = ctx.coderack
random = ctx.random
slipnet = ctx.slipnet
# TODO: use entropy
temperature = ctx.temperature
workspace = ctx.workspace
assert workspace.numberOfUnreplacedObjects() == 0
@ -339,6 +351,7 @@ def rule_scout(ctx, codelet):
# "union of this and distinguishing descriptors"
assert objectList
# use conceptual depth to choose a description
# TODO: use entropy
weights = [
temperature.getAdjustedValue(node.conceptualDepth)
for node in objectList
@ -350,6 +363,7 @@ def rule_scout(ctx, codelet):
objectList += [changed.replacement.relation]
objectList += [changed.replacement.objectFromModified.getDescriptor(
slipnet.letterCategory)]
# TODO: use entropy
# use conceptual depth to choose a relation
weights = [
temperature.getAdjustedValue(node.conceptualDepth)
@ -364,6 +378,7 @@ def rule_scout(ctx, codelet):
def rule_strength_tester(ctx, codelet):
coderack = ctx.coderack
random = ctx.random
# TODO: use entropy
temperature = ctx.temperature
rule = codelet.arguments[0]
rule.updateStrength()
@ -465,6 +480,7 @@ def top_down_bond_scout__direction(ctx, codelet):
def bond_strength_tester(ctx, codelet):
coderack = ctx.coderack
random = ctx.random
# TODO: use entropy
temperature = ctx.temperature
bond = codelet.arguments[0]
__showWhichStringObjectIsFrom(bond)
@ -747,6 +763,7 @@ def group_strength_tester(ctx, codelet):
coderack = ctx.coderack
random = ctx.random
slipnet = ctx.slipnet
# TODO: use entropy
temperature = ctx.temperature
# update strength value of the group
group = codelet.arguments[0]
@ -869,6 +886,7 @@ def __getCutoffWeights(bondDensity):
def rule_translator(ctx, codelet):
coderack = ctx.coderack
random = ctx.random
# TODO: use entropy
temperature = ctx.temperature
workspace = ctx.workspace
assert workspace.rule
@ -946,6 +964,7 @@ def important_object_correspondence_scout(ctx, codelet):
assert objectFromInitial is not None
descriptors = objectFromInitial.relevantDistinguishingDescriptors()
# choose descriptor by conceptual depth
# TODO: use entropy
weights = [temperature.getAdjustedValue(n.conceptualDepth) for n in descriptors]
slipnode = random.weighted_choice(descriptors, weights)
assert slipnode
@ -997,6 +1016,7 @@ def important_object_correspondence_scout(ctx, codelet):
def correspondence_strength_tester(ctx, codelet):
coderack = ctx.coderack
random = ctx.random
# TODO: use entropy
temperature = ctx.temperature
workspace = ctx.workspace
correspondence = codelet.arguments[0]

View File

@ -68,6 +68,7 @@ class Coderack(object):
self.postBottomUpCodelets()
def probabilityOfPosting(self, codeletName):
# TODO: use entropy
temperature = self.ctx.temperature
workspace = self.ctx.workspace
if codeletName == 'breaker':
@ -156,6 +157,7 @@ class Coderack(object):
def __postBottomUpCodelets(self, codeletName):
random = self.ctx.random
# TODO: use entropy
temperature = self.ctx.temperature
probability = self.probabilityOfPosting(codeletName)
howMany = self.howManyToPost(codeletName)
@ -288,6 +290,7 @@ class Coderack(object):
def chooseCodeletToRun(self):
random = self.ctx.random
# TODO: use entropy
temperature = self.ctx.temperature
assert self.codelets

View File

@ -4,7 +4,6 @@ from .slipnet import Slipnet
from .temperature import Temperature
from .workspace import Workspace
class Reporter(object):
"""Do-nothing base class for defining new reporter types"""
def report_answer(self, answer):
@ -16,7 +15,7 @@ class Reporter(object):
def report_slipnet(self, slipnet):
pass
def report_temperature(self, temperature):
def report_temperature(self, temperature): #TODO: use entropy
pass
def report_workspace(self, workspace):
@ -28,19 +27,19 @@ class Copycat(object):
self.coderack = Coderack(self)
self.random = Randomness(rng_seed)
self.slipnet = Slipnet()
self.temperature = Temperature()
self.temperature = Temperature() # TODO: use entropy
self.workspace = Workspace(self)
self.reporter = reporter or Reporter()
def mainLoop(self, lastUpdate):
currentTime = self.coderack.codeletsRun
self.temperature.tryUnclamp(currentTime)
self.temperature.tryUnclamp(currentTime) # TODO: use entropy
# Every 15 codelets, we update the workspace.
if currentTime >= lastUpdate + 15:
self.workspace.updateEverything()
self.coderack.updateCodelets()
self.slipnet.update(self.random)
self.temperature.update(self.workspace.getUpdatedTemperature())
self.temperature.update(self.workspace.getUpdatedTemperature()) # TODO: use entropy
lastUpdate = currentTime
self.reporter.report_slipnet(self.slipnet)
self.coderack.chooseAndRunCodelet()
@ -53,14 +52,14 @@ class Copycat(object):
"""Run a trial of the copycat algorithm"""
self.coderack.reset()
self.slipnet.reset()
self.temperature.reset()
self.temperature.reset() # TODO: use entropy
self.workspace.reset()
lastUpdate = float('-inf')
while self.workspace.finalAnswer is None:
lastUpdate = self.mainLoop(lastUpdate)
answer = {
'answer': self.workspace.finalAnswer,
'temp': self.temperature.last_unclamped_value,
'temp': self.temperature.last_unclamped_value, # TODO: use entropy
'time': self.coderack.codeletsRun,
}
self.reporter.report_answer(answer)
@ -68,16 +67,20 @@ class Copycat(object):
def run(self, initial, modified, target, iterations):
self.workspace.resetWithStrings(initial, modified, target)
#self.temperature.useAdj('original')
#self.temperature.useAdj('entropy')
self.temperature.useAdj('inverse') # 100 weight
answers = {}
for i in range(iterations):
answer = self.runTrial()
d = answers.setdefault(answer['answer'], {
'count': 0,
'sumtemp': 0,
'sumtemp': 0, # TODO: use entropy
'sumtime': 0
})
d['count'] += 1
d['sumtemp'] += answer['temp']
d['sumtemp'] += answer['temp'] # TODO: use entropy
d['sumtime'] += answer['time']
for answer, d in answers.items():

View File

@ -63,7 +63,7 @@ class CursesReporter(Reporter):
coderackHeight = height - upperHeight - answersHeight
self.focusOnSlipnet = focus_on_slipnet
self.fpsGoal = fps_goal
self.temperatureWindow = SafeSubwindow(window, height, 5, 0, 0)
self.temperatureWindow = SafeSubwindow(window, height, 5, 0, 0) # TODO: use entropy (entropyWindow)
self.upperWindow = SafeSubwindow(window, upperHeight, width-5, 0, 5)
self.coderackWindow = SafeSubwindow(window, coderackHeight, width-5, upperHeight, 5)
self.answersWindow = SafeSubwindow(window, answersHeight, width-5, upperHeight + coderackHeight, 5)

9
copycat/io.py Normal file
View File

@ -0,0 +1,9 @@
def save_answers(answers, filename):
answers = sorted(answers.items(), key=lambda kv : kv[1]['count'])
keys = [k for k, v in answers]
counts = [str(v['count']) for k, v in answers]
with open(filename, 'w') as outfile:
outfile.write(','.join(keys))
outfile.write('\n')
outfile.write(','.join(counts))

20
copycat/plot.py Normal file
View File

@ -0,0 +1,20 @@
import matplotlib.pyplot as plt; plt.rcdefaults()
import numpy as np
import matplotlib.pyplot as plt
def plot_answers(answers, show=True, save=True, filename='distribution.png'):
answers = sorted(answers.items(), key=lambda kv : kv[1]['count'])
objects = [t[0] + ' (temp:{})'.format(round(t[1]['avgtemp'], 2)) for t in answers]
yvalues = [t[1]['count'] for t in answers]
y_pos = np.arange(len(objects))
plt.bar(y_pos, yvalues, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('Count')
plt.title('Answers')
if show:
plt.show()
if save:
plt.savefig('output/{}'.format(filename))

View File

@ -1,9 +1,40 @@
import math
# Alternate formulas for getAdjustedProbability
def _original(temp, prob):
if prob == 0 or prob == 0.5 or temp == 0:
return prob
if prob < 0.5:
return 1.0 - _original(temp, 1.0 - prob)
coldness = 100.0 - temp
a = math.sqrt(coldness)
c = (10 - a) / 100
f = (c + 1) * prob
return max(f, 0.5)
def _entropy(temp, prob):
if prob == 0 or prob == 0.5 or temp == 0:
return prob
if prob < 0.5:
return 1.0 - _original(temp, 1.0 - prob)
coldness = 100.0 - temp
a = math.sqrt(coldness)
c = (10 - a) / 100
f = (c + 1) * prob
return -f * math.log2(f)
def _inverse_prob(temp, prob):
iprob = 1 - prob
return (temp / 100) * iprob + ((100 - temp) / 100) * prob
class Temperature(object):
def __init__(self):
self.reset()
self.adjustmentType = 'inverse'
self._adjustmentFormulas = {
'original' : _original,
'entropy' : _entropy,
'inverse' : _inverse_prob}
def reset(self):
self.actual_value = 100.0
@ -33,191 +64,14 @@ class Temperature(object):
def getAdjustedValue(self, value):
return value ** (((100.0 - self.value()) / 30.0) + 0.5)
"""
def getAdjustedProbability(self, value):
if value == 0 or value == 0.5 or self.value() == 0:
return value
if value < 0.5:
return 1.0 - self.getAdjustedProbability(1.0 - value)
coldness = 100.0 - self.value()
a = math.sqrt(coldness)
c = (10 - a) / 100
f = (c + 1) * value
return max(f, 0.5)
"""
temp = self.value()
prob = value
return self._adjustmentFormulas[self.adjustmentType](temp, prob)
def getAdjustedProbability(self, value):
"""
This function returns the probability for a decision.
Copied above.
def useAdj(self, adj):
print('Changing to adjustment formula {}'.format(adj))
self.adjustmentType = adj
Please look at the last line of it. Strangely, it was
return max(f, 0.5). Does that make sense? Let's compare
some results. Where it was (0.5), we obtained, for example:
iiijjjlll: 670 (avg time 1108.5, avg temp 23.6)
iiijjjd: 2 (avg time 1156.0, avg temp 35.0)
iiijjjkkl: 315 (avg time 1194.4, avg temp 35.5)
iiijjjkll: 8 (avg time 2096.8, avg temp 44.1)
iiijjjkkd: 5 (avg time 837.2, avg temp 48.0)
wyz: 5 (avg time 2275.2, avg temp 14.9)
xyd: 982 (avg time 2794.4, avg temp 17.5)
yyz: 7 (avg time 2731.9, avg temp 25.1)
dyz: 2 (avg time 3320.0, avg temp 27.1)
xyy: 2 (avg time 4084.5, avg temp 31.1)
xyz: 2 (avg time 1873.5, avg temp 52.1)
Now, let's see what return max(f, 0.0000) does:
wyz: 7 (avg time 3192.9, avg temp 13.1)
xyd: 985 (avg time 2849.1, avg temp 17.5)
yyz: 6 (avg time 3836.7, avg temp 18.6)
xyy: 1 (avg time 1421.0, avg temp 19.5)
xyz: 1 (avg time 7350.0, avg temp 48.3)
They *seem* better (in the strict sense that we've obtained both
lower T and more times of wyz.) But they're *not* statistically
significant (for 1000 runs).
Now... looking at the code... it seems to be a mess... what does
function f() even mean in intuitive terms?
Work it does, but dude... quite a hack.
Another run, with return f @line89:
wyz: 8 (avg time 4140.5, avg temp 13.3)
yyz: 6 (avg time 2905.2, avg temp 14.5)
xyd: 982 (avg time 3025.4, avg temp 17.6)
dyz: 4 (avg time 4265.0, avg temp 17.7)
Does it even matter? Another (quick) run, I think with return (0.5):
dyz: 1 (avg time 5198.0, avg temp 15.3)
wyz: 3 (avg time 4043.7, avg temp 17.1)
yyz: 9 (avg time 3373.6, avg temp 21.0)
xyd: 84 (avg time 5011.1, avg temp 23.3)
xyy: 3 (avg time 4752.0, avg temp 27.9)
Compared to return(0.99):
xyd: 1000 (avg time 1625.2, avg temp 17.3)
Comparing to return f --> Statistically significant.
Comparing to return(0.5) --> same, so this return value does something.
Now running return(0.0):
xyz: 3 (avg time 3996.7, avg temp 81.1)
dyz: 46 (avg time 5931.7, avg temp 82.6)
xd: 17 (avg time 6090.3, avg temp 83.8)
xyd: 934 (avg time 7699.8, avg temp 88.1)
It's bad overall, but at least it's statistically significant!
return (-f * (math.log2(f))) # Entropy test #1 (global).
wyz: 123 (avg time 5933.1, avg temp 16.5)
xyy: 200 (avg time 6486.7, avg temp 27.8)
yyz: 330 (avg time 6310.2, avg temp 38.5)
dyz: 75 (avg time 6393.3, avg temp 39.6)
yzz: 5 (avg time 4965.0, avg temp 59.3)
xyz: 160 (avg time 6886.2, avg temp 60.2)
xd: 4 (avg time 2841.0, avg temp 61.8)
dz: 3 (avg time 3721.0, avg temp 62.1)
xyd: 100 (avg time 5853.1, avg temp 67.5)
Here we get an intuitive result: entropy/uncertainty seems better at
exploring a whole range of possible solutions. It even seems, at least
to me, better than the distribution obtained by the original copycat.
instead of log2, trying ln --> return (-f * math.log(f)):
wyz: 78 (avg time 7793.7, avg temp 16.6)
xyy: 202 (avg time 9168.5, avg temp 27.5)
wxz: 1 (avg time 3154.0, avg temp 33.4)
dyz: 63 (avg time 7950.3, avg temp 41.7)
yyz: 217 (avg time 8147.4, avg temp 41.7)
xyz: 201 (avg time 7579.7, avg temp 62.5)
xxy: 1 (avg time 7994.0, avg temp 64.8)
yzz: 8 (avg time 4672.6, avg temp 65.7)
xd: 9 (avg time 9215.2, avg temp 68.1)
xyd: 217 (avg time 7677.9, avg temp 73.8)
dz: 3 (avg time 20379.0, avg temp 77.3)
(quickly) trying out (1-this_entropy_function):
xyd: 100 (avg time 2984.3, avg temp 18.2)
And that's beautiful! One wants an inverse function that punishes
exploration and creativity, that takes all the fluidity off
the system.
But somehow this completely messes up with abc abd iijjkk:
jijjkk: 66 (avg time 3200.1, avg temp 61.3)
iijjkk: 114 (avg time 5017.2, avg temp 63.5)
dijjkk: 23 (avg time 2209.0, avg temp 67.3)
iijjkl: 748 (avg time 3262.8, avg temp 70.0)
iijjkd: 49 (avg time 2315.9, avg temp 76.3)
Which leads me to suspect that someone may have overfitted the
model for either xyz or iijjkk or some other problem, and one
improvement there means disaster here.
Something tells me to invert again to 1-entropy... and bingo!
iijjll: 59 (avg time 797.4, avg temp 19.8)
iijjkl: 41 (avg time 696.1, avg temp 28.5)
My guess is that some code is prefering to find groups in the
opposite form that it likes finding the "symmetry/opposite"
concepts of the xyz problem.
Sould compare & contrast the unhappiness and relevance of both
the opposite/symmetry codelets and the grouping/chunking codelets.
My hunch is the sameness group code: something there that
interacts with Temperature is wicked, and should be relatively
easy to find the error.
Here's why: the following run was done on (1-entropy(f)):
mrrlll: 77 (avg time 2195.7, avg temp 41.4)
mrrd: 2 (avg time 1698.0, avg temp 42.6)
mrrkkl: 20 (avg time 1317.8, avg temp 46.6)
mrrkkd: 1 (avg time 1835.0, avg temp 48.6)
If (1-entropy(f)) binds the system into a tight corridor of possibilities,
then why does it easily get the samenessGroup right? If this is right,
then running just entropy(f) should have big trouble with samenessGroup.
Let's see:
nrrkkk: 11 (avg time 3637.8, avg temp 64.6)
drrkkk: 3 (avg time 5921.3, avg temp 66.2)
mrrkkd: 7 (avg time 6771.3, avg temp 74.6)
mrrkkl: 79 (avg time 3723.0, avg temp 74.9)
So there we are: the system is unable to find that change samenessGroup
to next letterCategory, so there ought to be something very different
in the code that:
* Interacts with Temperature (things like unhappiness, relevance, depth,
urgency, and whatever else interacts with T)
* something very close to samenessGroup... sameGroup, sameness,
sameNeighbors, etc... is encoded in a form that is *directly opposite*
to other concepts/categories/codlets, etc.
Need to play with this more... and WTF is f anyways?
"""
if value == 0 or value == 0.5 or self.value() == 0:
return value
if value < 0.5:
return 1.0 - self.getAdjustedProbability(1.0 - value)
coldness = 100.0 - self.value()
a = math.sqrt(coldness)
c = (10 - a) / 100
f = (c + 1) * value
return (0 + (-f * math.log2(f))) # max(f, 0.0000)
def adj_formulas(self):
return self._adjustmentFormulas.keys()

View File

@ -39,7 +39,7 @@ class Workspace(object):
self.changedObject = None
self.objects = []
self.structures = []
self.rule = None
self.rule = None # Only one rule? : LSaldyt
self.initial = WorkspaceString(self.ctx, self.initialString)
self.modified = WorkspaceString(self.ctx, self.modifiedString)
self.target = WorkspaceString(self.ctx, self.targetString)
@ -99,6 +99,11 @@ class Workspace(object):
# TODO: use entropy
def getUpdatedTemperature(self):
'''
Calculation of global tolerance towards irrelevance
temp = weightedAverage(totalUnhappiness(.8), ruleWeakness(.2))
'''
self.calculateIntraStringUnhappiness()
self.calculateInterStringUnhappiness()
self.calculateTotalUnhappiness()

View File

@ -1,5 +1,6 @@
def __chooseObjectFromList(ctx, objects, attribute):
# TODO: use entropy
random = ctx.random
temperature = ctx.temperature
weights = [

View File

@ -2,7 +2,6 @@ from .description import Description
from .formulas import weightedAverage
from .workspaceStructure import WorkspaceStructure
class WorkspaceObject(WorkspaceStructure):
# pylint: disable=too-many-instance-attributes
def __init__(self, workspaceString):

0
input/.placeholder Normal file
View File

9
input/problems.csv Normal file
View File

@ -0,0 +1,9 @@
abc,abd,ijk
aabc,aabd,ijkk
abc,abd,kji
abc,abd,mrrjjj
abc,abd,rssttt
abc,abd,xyz
abc,abd,ijjkkk
rst,rsu,xyz
abc,abd,xyyzzz
1 abc abd ijk
2 aabc aabd ijkk
3 abc abd kji
4 abc abd mrrjjj
5 abc abd rssttt
6 abc abd xyz
7 abc abd ijjkkk
8 rst rsu xyz
9 abc abd xyyzzz

View File

@ -0,0 +1,4 @@
abc,abd,ijk
aabc,aabd,ijkk
abc,abd,xyz
abc,abd,ijjkkk
1 abc abd ijk
2 aabc aabd ijkk
3 abc abd xyz
4 abc abd ijjkkk

11
main.py
View File

@ -35,8 +35,7 @@ final temperature of the workspace; lower means "more elegant".
import argparse
import logging
from copycat import Copycat, Reporter
from copycat import Copycat, Reporter, plot_answers, save_answers
class SimpleReporter(Reporter):
"""Reports results from a single run."""
@ -50,11 +49,13 @@ class SimpleReporter(Reporter):
def main():
"""Program's main entrance point. Self-explanatory code."""
logging.basicConfig(level=logging.INFO, format='%(message)s', filename='./copycat.log', filemode='w')
logging.basicConfig(level=logging.INFO, format='%(message)s', filename='./output/copycat.log', filemode='w')
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=None, help='Provide a deterministic seed for the RNG.')
parser.add_argument('--iterations', type=int, default=1, help='Run the given case this many times.')
parser.add_argument('--plot', action='store_true', help='Plot a bar graph of answer distribution')
parser.add_argument('--noshow', action='store_true', help='Don\'t display bar graph at end of run')
parser.add_argument('initial', type=str, help='A...')
parser.add_argument('modified', type=str, help='...is to B...')
parser.add_argument('target', type=str, help='...as C is to... what?')
@ -66,6 +67,10 @@ def main():
for answer, d in sorted(iter(answers.items()), key=lambda kv: kv[1]['avgtemp']):
print('%s: %d (avg time %.1f, avg temp %.1f)' % (answer, d['count'], d['avgtime'], d['avgtemp']))
if options.plot:
plot_answers(answers, show=not options.noshow)
save_answers(answers, 'output/answers.csv')
if __name__ == '__main__':
main()

0
output/.placeholder Normal file
View File

View File

@ -1,7 +1,9 @@
import unittest
from pprint import pprint
from .copycat import Copycat
from copycat import Copycat
# TODO: update test cases to use entropy
def pnormaldist(p):
table = {
@ -67,6 +69,7 @@ class TestCopycat(unittest.TestCase):
self.fail('No instances of expected key %s were produced! %r != %r' % (k, actual, expected))
def run_testcase(self, initial, modified, target, iterations, expected):
pprint(expected)
actual = Copycat().run(initial, modified, target, iterations)
self.assertEqual(sum(a['count'] for a in list(actual.values())), iterations)
self.assertProbabilitiesLookRoughlyLike(actual, expected)