from ca.nengo.model.impl import * from ca.nengo.model import * from ca.nengo.model.nef.impl import * from ca.nengo.model.neuron.impl import * from ca.nengo.model.neuron import * from ca.nengo.math.impl import * from ca.nengo.model.plasticity.impl import * from ca.nengo.util import * from ca.nengo.math import * from learning_rule import * #This code learns the connection weights between A and B #populations, given known weights from the error, which is #computing a nonlinear function of the input in a vector space #The learning rule is in a separate file to allow loading in Nengo N=50 #number of neurons D=1 #dimensionality of the input and function PDFTools.setSeed(10) #Function for looking at the weights def getWeights(ensemble,termination): return [n.synapticIntegrator.getTermination(termination).weights for n in ensemble.nodes] #Function for making connections into random synaptic weights def make_weights_init(network,source,origin,target,termination,pstc): source=network.getNode(source) target=network.getNode(target) decoder=source.getOrigin(origin).decoders w=MU.prod(target.encoders,MU.transpose(decoder)) #gain elsewhere total=sum([sum(row) for row in w])/(len(w)**2) maxW=max([max(row) for row in w]) normal = GaussianPDF(total,maxW) w=MU.random(N,N,normal) t=target.addTermination(termination,w,pstc,False) network.addProjection(source.getOrigin('AXON'),t) #Function for making connections into NEF synaptic weights def make_weights(network,source,origin,target,termination,pstc, isMod): source=network.getNode(source) target=network.getNode(target) decoder=source.getOrigin(origin).decoders w=MU.prod(target.encoders,MU.transpose(decoder)) #gain elsewhere t=target.addTermination(termination,w,pstc,isMod) network.addProjection(source.getOrigin('AXON'),t) #Delete the network if it already exists try: world.remove(LearnNet) except: pass #Create the network LearnNet = NetworkImpl() world.add(LearnNet) #Setup ensemble factory ef = NEFEnsembleFactoryImpl() maxRate = IndicatorPDF(200,400) intercept = IndicatorPDF(-1,1) #Choose node types nf = LIFNeuronFactory(tauRC=.020, tauRef=.001, maxRate=maxRate, intercept=intercept) ef.setNodeFactory(nf); #Make populations Aensemble = ef.make("A", N, D) Bensemble = ef.make("B", N, D) ErrorEnsemble = ef.make("Error", N, D) #Add them to the network LearnNet.addNode(Aensemble) LearnNet.addNode(Bensemble) LearnNet.addNode(ErrorEnsemble) #Create terminations to project to #Aensemble.addDecodedTermination("input",[[1,0],[0,1]],.005,0) #ErrorEnsemble.addDecodedTermination("RightInput",[[1,0],[0,1]],.005,0) #ErrorEnsemble.addDecodedTermination("BInput",[[-1,0],[0,-1]],.005,0) Aensemble.addDecodedTermination("input",[[1]],.005,0) ErrorEnsemble.addDecodedTermination("RightInput",[[1]],.005,0) ErrorEnsemble.addDecodedTermination("BInput",[[-1]],.005,0) #Create origins to project from if needed, define the nonlinear function to learn interpreter=DefaultFunctionInterpreter() #Aensemble.addDecodedOrigin("X_squared",[interpreter.parse("x0*x0",D),interpreter.parse("x1*x1",D)],"AXON") Aensemble.addDecodedOrigin("X_squared",[interpreter.parse("x0*x0",D)],"AXON") #Create input function #input1=FunctionInput('input1',[FourierFunction(fundamental=.1, cutoff=5, rms=1,seed=1,type=2)],Units.UNK) #input1=FunctionInput('input1',[FourierFunction(.1, 10,.5,2,2),FourierFunction(.1, 10,.5,9,2)],Units.UNK) input1=FunctionInput('input1',[FourierFunction(.1, 10,.5,2,2)],Units.UNK) LearnNet.addNode(input1) #Connect network LearnNet.addProjection(input1.getOrigin("origin"),Aensemble.getTermination("input")) make_weights_init(LearnNet,"A","X","B","A_neurons",.005) make_weights(LearnNet,"Error","X","B","error",.005, False) #So what is learned is as defined minus error LearnNet.addProjection(Aensemble.getOrigin("X_squared"),ErrorEnsemble.getTermination("RightInput")) LearnNet.addProjection(Bensemble.getOrigin("X"),ErrorEnsemble.getTermination("BInput")) #Create probes LearnNet.getSimulator().addProbe("A",Aensemble.X,1) LearnNet.getSimulator().addProbe("A",Aensemble,"X_squared",1) LearnNet.getSimulator().addProbe("B",Bensemble.X,1) LearnNet.getSimulator().addProbe("Error",Bensemble.X,1) rule=RealPlasticityRule("error",0,MyFunction(),'X') #setup rule LearnNet.setMode(SimulationMode.RATE) Bensemble.setPlasticityRule("A_neurons",rule)