Let's start to explain the basics of my model.
|x> => 3|a> + |b> # context.learn("","x",ket("a",3) + ket("b"))
O |x> => |y> # context.learn("O","x","y")
O |x> => |y> + 2.3|z> # context.learn("O","x",ket("y") + ket("z",2.3))
O |x> += |y> # context.add_learn("O","x","y")
O |x> += |a> + 7|y> + |z> # context.add_learn("O","x",ket("a") + ket("y",7) + ket("z"))
# newly added, stored rules.
# some examples:
O |x> #=> |y> # context.learn("O","x",stored_rule("|y>"))
O |x> #=> foo |y> + bah|z> # context.learn("O","x",stored_rule("foo |y> + bah|z>"))
# new and as yet unimplemented:
O |x> -=> |y> + |z> # context.minus_learn("O","x",ket("y") + ket("z"))
# and notation for forget all knowledge of O applied to |x>
# and forget all knowledge about |x>
# Because we need some mechanism for "neuronal circuit pruning".
O |x> # ket("x").apply_op(context,"O")
O (|x> + |y>) # (ket("x") + ket("y")).apply_op(context,"O")
O (2|x> + 3|y> + |z>) # (ket("x",2) + ket("y",3) + ket("z")).apply_op(context,"O")
O2 O1 |x> # ket("x").merged_apply_op(context,"O2 O1")
O^k |x> # ket("x").apply_op_multi(context,"O",k)
---------------------------------------
Let's go through the 4 categories:
built-in
sigmoid
applied functions
general functions
# the following are in the_semantic_db_code.py
# the first column is the current python, the second is the language for the BKO scheme.
# kets:
x.display() # display |x>
x.transpose() #
x.apply_bra(bra) #
x.select_elt(k) # select-elt[k] |x>
x.pick_elt() # pick-elt |x> # randomly select an element from |x>. Makes more sense in superposition context.
# has some similarity with wave-fn collapse in QM.
x.find_index(elt) # find-index[elt] |x>
x.find_value(elt) # find-value[elt] |x>
x.normalize() # normalize |x> # normalize so sum of coeffs = 1
x.rescale() # rescale |x> # rescale so coeff of max element = 1
x.rescale(t) # rescale[t] |x> # rescale so coeff of max element = t
x.apply_sigmoid(sig) # sigmoid[sig] |x>, or sig |x> # apply a sigmoid to all coeffs.
x.apply_sigmoid(sig,t) # sigmoid[sig,t] |x>, or sig[t] |x>
x.apply_fn(fn) # function[fn] |x>, or fn |x> # apply a function to all kets.
x.apply_op(context,op) # op |x>, or apply[|op>] |x>
x.apply_op_multi(context,op,n) # op^n |x>
x.similar(context,op) # similar[op] |x>
x.merged_apply_op(context,ops) # op3 op2 op1 |x>
x.the_label() # # returns the label of the ket.
x.the_value() # # returns the value of the ket.
x.type() # for a ket, it returns "ket", for a superposition: (ket + ket + ket + ket)
# for a nested superposition (which we don't want yet), ((ket + ket) + (ket + ket + ket + ket))
x.discrimination() # returns the difference between largest coeff, and second largest coeff.
# Kind of boring for kets (just returns the ket value), but useful for sp.
# stuff new in superposition:
# though most should also be in kets, but I haven't needed them in kets just yet.
x.apply_projection(bra) # |bra>
x.apply_fn_collapse(fn) # collapse-function[fn] |x>, or fn |x>
x.collapse() # collapse |x> # if there are repeated elements in a superposition, add them up.
x.count() # #count |x> # number of elements in |x>, expressed as an int/float
x.count_sum() # #count-sum |x> # add up the coeffs of the elements in |x>, expressed as an int/float
x.number_count() # count |x> # number of elements in |x>, in |number: x> format
x.number_count_sum() # count-sum |x> # add up the coeffs of the elements in |x>, in |number: x> format
# sum |x> # alias for count-sum
x.number_product() # product |x> # multiply the coeffs of the elements in |x>, in |number: x> format
x.drop() # drop |x> # drop elements with coeff <= 0 NB: in this model coeffs are almost always >= 0
x.drop_below(t) # drop-below[t] |x> # drop elements below t
x.drop_above(t) # drop-above[t] |x> # drop elements above t
x.select_range(a,b) # select-range[a,b] |x> # keep elements with index in range [a,b]
x.select_range(a,b) # select[a,b] |x> # shorter name of the above.
x.delete_elt(k) # delete-elt[k] |x> # delete k'th element from superposition. NB: index starts at 1, not 0.
x.reverse() # reverse |x> # reverse the list
x.shuffle() # shuffle |x> # shuffle the list
x.coeff_sort() # coeff-sort |x> # sort superposition by the coefficients of the kets
x.ket_sort() # ket-sort |x> # sort by the lowercase of the lables of the kets.
x.find_max_elt() # max-elt |x> # return the first ket found with the max coeff
x.find_min_elt() # min-elt |x> # return the first ket found with the min ceoff
x.find_max() # max |x> # return the kets with the max coeff
x.find_min() # min |x> # return the kets with the min coeff
x.find_max_coeff() # return the max coeff as int/float
x.find_min_coeff() # return the min coeff as int/float
x.number_find_max_coeff() # max-coeff |x> # return the max coeff in |number: x> format
x.number_find_min_coeff() # min-coeff |x> # return the min coeff in |number: x> format
x.multiply(t) # mult[t] # multiplies all coeffs by t
x.the_label() # # returns the label of the first ket in the sp. If empty, then "".
x.the_value() # # returns the value of the first ket in the sp. If empty, then 0.
x.absolute_noise(t) # absolute-noise[t] |x> # add noise to the ket/sp in range [0,t]
x.relative_noise(t) # relative-noise[t] |x> # add noise to the ket/sp in range [0,t*max_coeff]
# some common sigmoids:
clean(x) # clean |x> # sets all coeffs above 0 to 1.
threshold_filter(x,t) # threshold-filter[t] |x># sets everything below t to 0, else x
not_threshold_filter(x,t) # not-threshold-filter[t] |x># sets everything below t to x, else 0
binary_filter(x) # binary-filter |x> # sets everything below 0.96 to 0, else 1
not_binary_filter(x) # not-binary-filter |x> # sets everything below 0.96 to 1, else 0
pos(x) # pos |x> # sets everything below 0 to 0, else x
NOT(x) # NOT |x> # sets everything below 0.04 to 1, else 0
xor_filter(x) # xor-filter |x> # sets everything in range [0.96,1.04] to 1, else 0
#mult(x,t) # mult[t] |x> # multiplies all coeffs by t. # shifted to ket/sp.
in_range(x,a,b) # in-range[a,b] |x> # if coeff in [a,b] return coeff, else 0.
invert(x) # invert |x> # maps coeff from x to 1/x. if x == 0, then don't change it.
set_to(x,t) # set-to[t] |x> # sets all coeffs, including 0'd ones, to t
subtraction_invert(x,t) # subtraction-invert[t] |x> # sets all coeffs to t - x
# the following are in the_semantic_db_functions.py
# some functions (as in x.apply_fn(fn))
# mostly they map ket -> ket.
# Though if you use apply_fn_collapse() ket -> superposition is also valid.
apply_value(ket) # value |price: _x> => _x |_self>
extract_category(ket) # extract-category |animal: fish> => |animal>
extract_value(ket) # extract-value |animal: fish> => |fish>
# even though they are ket -> superpostion, these have a different use case than the above 3.
# I expect in the future there will be more of this type, or perhaps a general purpose version.
# Their general effect is to map from a higher order down to a lower order.
# text to words, words to letters, numbers to primes.
read_text(ket) # read |text: "some text here"> # returns a superposition
spell_word(ket) # spell |word: fish> # returns a superposition
factor_number(ket) # factor |number: 3281> => |number: 17> + |number: 193> # returns a superposition, obviously.
# this is a different type than the above 3.
is_prime(ket) # is-prime |number: 32051> => |yes>, is-prime |number: 15> => |no>
# Just a weird toy. I don't know why, just wanted to write it.
near_number(ket) # near-number |number: 40> => |number: 50> + |number: 30> + |number: 41> + |number: 49>
# another weird toy.
strange_int(ket) # strange-int |number: 1825> => |number: 83>
strange_int_prime(ket) # strange-int-prime |number: 1098349871> => |number: 7>
strange_int_depth(ket) # strange-int-depth |number: 1098349871> => |number: 6>
strange_int_delta(ket) # strange-int-delta |number: 1972> => |number: 1922>
strange_int_list(ket) # strange-int-list |number: 10978349078> => |number: 10978349078> + |number: 66134718> + |number: 1071> + |number: 30> + |number: 10> + |number: 7>
# general functions (usually acting on superpositions)
show_range(start,finish,step=1) # eg: show-range(|year: 1982>, |year: 1985>)
# should spit out: |year: 1982> + |year: 1983> + |year: 1984> + |year: 1985>
arithmetic(x,operator,y) # eg: arithmetic(|number: 3>,|symbol: +>,|number: 8>)
intersection_fn(foo,one,two) # code for a generalized intersection. Actual result depends on choice of foo().
# a generalization of set intersection.
# if given coeffs in {0,1} works as standard intersection.
# but also neatly handles other cases.
def intersection(one,two):
return intersection_fn(min,one,two)
# similar to the above. A generalization of set union.
# Note how both intn, and union use the same intersection_fn(), one uses min(), the other max()
def union(one,two):
return intersection_fn(max,one,two)
# the complement variable function:
def comp_fn(x,y):
if x == 0 and y != 0:
return y
elif x != 0 and y == 0:
return x
else:
return 0
# now for complement:
# Finds the set which is the complement of the sets one and two.
# ie, only keep set elements that are in one set, but not the other.
def complement(one,two):
return intersection_fn(comp_fn,one,two)
# the delete function:
def del_fn(x,y): # a possible variant is "return y - x"
if x != 0:
return 0
else:
return y
# deletes elements in one (that have non-zero coeff) from elements in two.
def delete(one,two):
return intersection_fn(del_fn,one,two).drop() # NB: the .drop()
# in practice you want intersection() and union() and complement() to append .drop() too.
# I left it out for now because of testing purposes.
# Alternatively, put .drop() in intersection_fn().
# output from a test of Boolean set union, intersection, complement, and delete:
$ ./test_intn.py
X: |a> + |c> + |d> + |e>
Y: |b> + |e>
union: |a> + |b> + |c> + |d> + |e>
intersection: |e>
complement: |a> + |b> + |c> + |d>
del X, Y: |b>
del Y, X: |a> + |c> + |d>
# test for set membership of |x> in |X>
# is >= t ?
# this is simple enough, that we probably don't even need this function. Just do it inline.
# Probably a little clearer to do it inline anyway, instead of the one step of indirection.
def set_mbr(x,X,t=1):
return X.apply_bra(x) >= t
# NB: x.find_index(elt) is an alternate method to determine set membership.
# It gives different answers though, because of the labels_match(label1,label2) vs label1 == label2 difference.
# a quiet version of simm:
# see: the_semantic_db_functions.py for more discussion of simm()
def silent_simm(A,B):
return intersection(A.normalize(),B.normalize()).count_sum()
# makes heavy use of supported-ops |X>
# indeed, it was the initial reason I added supported-ops to my model.
# another potential name is "stream of consciousness"
# it emulates the idea of drifting from one thought to the next, and then the next, and on and on.
train_of_thought(context,x,n)