From 4224e36ac664618a79a6e34f022eb58f8a1fa47c Mon Sep 17 00:00:00 2001 From: "Randall C. O'Reilly" Date: Tue, 15 Oct 2024 23:26:15 -0700 Subject: [PATCH] hip looper functions --- leabra/enumgen.go | 50 ++++++++++++++++++------------------- leabra/hip.go | 63 ++++++++++++++++++++++++++++++++++++++++++++--- leabra/typegen.go | 4 ++- 3 files changed, 88 insertions(+), 29 deletions(-) diff --git a/leabra/enumgen.go b/leabra/enumgen.go index ef5fef7..89d3ccb 100644 --- a/leabra/enumgen.go +++ b/leabra/enumgen.go @@ -6,16 +6,16 @@ import ( "cogentcore.org/core/enums" ) -var _ActNoiseTypeValues = []ActNoiseType{0, 1, 2, 3, 4} +var _ActNoiseTypeValues = []ActNoiseType{0, 1, 2, 3, 4, 6} // ActNoiseTypeN is the highest valid value for type ActNoiseType, plus one. -const ActNoiseTypeN ActNoiseType = 5 +const ActNoiseTypeN ActNoiseType = 7 -var _ActNoiseTypeValueMap = map[string]ActNoiseType{`NoNoise`: 0, `VmNoise`: 1, `GeNoise`: 2, `ActNoise`: 3, `GeMultNoise`: 4} +var _ActNoiseTypeValueMap = map[string]ActNoiseType{`NoNoise`: 0, `VmNoise`: 1, `GeNoise`: 2, `ActNoise`: 3, `GeMultNoise`: 4, `ActNoiseTypeN`: 6} -var _ActNoiseTypeDescMap = map[ActNoiseType]string{0: `NoNoise means no noise added`, 1: `VmNoise means noise is added to the membrane potential. IMPORTANT: this should NOT be used for rate-code (NXX1) activations, because they do not depend directly on the vm -- this then has no effect`, 2: `GeNoise means noise is added to the excitatory conductance (Ge). This should be used for rate coded activations (NXX1)`, 3: `ActNoise means noise is added to the final rate code activation`, 4: `GeMultNoise means that noise is multiplicative on the Ge excitatory conductance values`} +var _ActNoiseTypeDescMap = map[ActNoiseType]string{0: `NoNoise means no noise added`, 1: `VmNoise means noise is added to the membrane potential. IMPORTANT: this should NOT be used for rate-code (NXX1) activations, because they do not depend directly on the vm -- this then has no effect`, 2: `GeNoise means noise is added to the excitatory conductance (Ge). This should be used for rate coded activations (NXX1)`, 3: `ActNoise means noise is added to the final rate code activation`, 4: `GeMultNoise means that noise is multiplicative on the Ge excitatory conductance values`, 6: ``} -var _ActNoiseTypeMap = map[ActNoiseType]string{0: `NoNoise`, 1: `VmNoise`, 2: `GeNoise`, 3: `ActNoise`, 4: `GeMultNoise`} +var _ActNoiseTypeMap = map[ActNoiseType]string{0: `NoNoise`, 1: `VmNoise`, 2: `GeNoise`, 3: `ActNoise`, 4: `GeMultNoise`, 6: `ActNoiseTypeN`} // String returns the string representation of this ActNoiseType value. func (i ActNoiseType) String() string { return enums.String(i, _ActNoiseTypeMap) } @@ -49,16 +49,16 @@ func (i *ActNoiseType) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "ActNoiseType") } -var _QuartersValues = []Quarters{0, 1, 2, 3} +var _QuartersValues = []Quarters{0, 1, 2, 3, 5} // QuartersN is the highest valid value for type Quarters, plus one. -const QuartersN Quarters = 4 +const QuartersN Quarters = 6 -var _QuartersValueMap = map[string]Quarters{`Q1`: 0, `Q2`: 1, `Q3`: 2, `Q4`: 3} +var _QuartersValueMap = map[string]Quarters{`Q1`: 0, `Q2`: 1, `Q3`: 2, `Q4`: 3, `QuartersN`: 5} -var _QuartersDescMap = map[Quarters]string{0: `Q1 is the first quarter, which, due to 0-based indexing, shows up as Quarter = 0 in timer`, 1: ``, 2: ``, 3: ``} +var _QuartersDescMap = map[Quarters]string{0: `Q1 is the first quarter, which, due to 0-based indexing, shows up as Quarter = 0 in timer`, 1: ``, 2: ``, 3: ``, 5: ``} -var _QuartersMap = map[Quarters]string{0: `Q1`, 1: `Q2`, 2: `Q3`, 3: `Q4`} +var _QuartersMap = map[Quarters]string{0: `Q1`, 1: `Q2`, 2: `Q3`, 3: `Q4`, 5: `QuartersN`} // String returns the string representation of this Quarters value. func (i Quarters) String() string { return enums.BitFlagString(i, _QuartersValues) } @@ -106,16 +106,16 @@ func (i Quarters) MarshalText() ([]byte, error) { return []byte(i.String()), nil // UnmarshalText implements the [encoding.TextUnmarshaler] interface. func (i *Quarters) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "Quarters") } -var _LayerTypesValues = []LayerTypes{0, 1, 2, 3, 4, 5, 6, 7, 8} +var _LayerTypesValues = []LayerTypes{0, 1, 2, 3, 4, 5, 6, 7, 8, 10} // LayerTypesN is the highest valid value for type LayerTypes, plus one. -const LayerTypesN LayerTypes = 9 +const LayerTypesN LayerTypes = 11 -var _LayerTypesValueMap = map[string]LayerTypes{`SuperLayer`: 0, `InputLayer`: 1, `TargetLayer`: 2, `CompareLayer`: 3, `CTLayer`: 4, `PulvinarLayer`: 5, `TRNLayer`: 6, `PTMaintLayer`: 7, `PTPredLayer`: 8} +var _LayerTypesValueMap = map[string]LayerTypes{`SuperLayer`: 0, `InputLayer`: 1, `TargetLayer`: 2, `CompareLayer`: 3, `CTLayer`: 4, `PulvinarLayer`: 5, `TRNLayer`: 6, `PTMaintLayer`: 7, `PTPredLayer`: 8, `LayerTypesN`: 10} -var _LayerTypesDescMap = map[LayerTypes]string{0: `Super is a superficial cortical layer (lamina 2-3-4) which does not receive direct input or targets. In more generic models, it should be used as a Hidden layer, and maps onto the Hidden type in LayerTypes.`, 1: `Input is a layer that receives direct external input in its Ext inputs. Biologically, it can be a primary sensory layer, or a thalamic layer.`, 2: `Target is a layer that receives direct external target inputs used for driving plus-phase learning. Simple target layers are generally not used in more biological models, which instead use predictive learning via Pulvinar or related mechanisms.`, 3: `Compare is a layer that receives external comparison inputs, which drive statistics but do NOT drive activation or learning directly. It is rarely used in axon.`, 4: `CT are layer 6 corticothalamic projecting neurons, which drive "top down" predictions in Pulvinar layers. They maintain information over time via stronger NMDA channels and use maintained prior state information to generate predictions about current states forming on Super layers that then drive PT (5IB) bursting activity, which are the plus-phase drivers of Pulvinar activity.`, 5: `Pulvinar are thalamic relay cell neurons in the higher-order Pulvinar nucleus of the thalamus, and functionally isomorphic neurons in the MD thalamus, and potentially other areas. These cells alternately reflect predictions driven by CT pathways, and actual outcomes driven by 5IB Burst activity from corresponding PT or Super layer neurons that provide strong driving inputs.`, 6: `TRNLayer is thalamic reticular nucleus layer for inhibitory competition within the thalamus.`, 7: `PTMaintLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that exhibit robust, stable maintenance of activity over the duration of a goal engaged window, modulated by basal ganglia (BG) disinhibitory gating, supported by strong MaintNMDA channels and recurrent excitation. The lateral PTSelfMaint pathway uses MaintG to drive GMaintRaw input that feeds into the stronger, longer MaintNMDA channels, and the ThalToPT ModulatoryG pathway from BGThalamus multiplicatively modulates the strength of other inputs, such that only at the time of BG gating are these strong enough to drive sustained active maintenance. Use Act.Dend.ModGain to parameterize.`, 8: `PTPredLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that combine modulatory input from PTMaintLayer sustained maintenance and CTLayer dynamic predictive learning that helps to predict state changes during the period of active goal maintenance. This layer provides the primary input to VSPatch US-timing prediction layers, and other layers that require predictive dynamic`} +var _LayerTypesDescMap = map[LayerTypes]string{0: `Super is a superficial cortical layer (lamina 2-3-4) which does not receive direct input or targets. In more generic models, it should be used as a Hidden layer, and maps onto the Hidden type in LayerTypes.`, 1: `Input is a layer that receives direct external input in its Ext inputs. Biologically, it can be a primary sensory layer, or a thalamic layer.`, 2: `Target is a layer that receives direct external target inputs used for driving plus-phase learning. Simple target layers are generally not used in more biological models, which instead use predictive learning via Pulvinar or related mechanisms.`, 3: `Compare is a layer that receives external comparison inputs, which drive statistics but do NOT drive activation or learning directly. It is rarely used in axon.`, 4: `CT are layer 6 corticothalamic projecting neurons, which drive "top down" predictions in Pulvinar layers. They maintain information over time via stronger NMDA channels and use maintained prior state information to generate predictions about current states forming on Super layers that then drive PT (5IB) bursting activity, which are the plus-phase drivers of Pulvinar activity.`, 5: `Pulvinar are thalamic relay cell neurons in the higher-order Pulvinar nucleus of the thalamus, and functionally isomorphic neurons in the MD thalamus, and potentially other areas. These cells alternately reflect predictions driven by CT pathways, and actual outcomes driven by 5IB Burst activity from corresponding PT or Super layer neurons that provide strong driving inputs.`, 6: `TRNLayer is thalamic reticular nucleus layer for inhibitory competition within the thalamus.`, 7: `PTMaintLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that exhibit robust, stable maintenance of activity over the duration of a goal engaged window, modulated by basal ganglia (BG) disinhibitory gating, supported by strong MaintNMDA channels and recurrent excitation. The lateral PTSelfMaint pathway uses MaintG to drive GMaintRaw input that feeds into the stronger, longer MaintNMDA channels, and the ThalToPT ModulatoryG pathway from BGThalamus multiplicatively modulates the strength of other inputs, such that only at the time of BG gating are these strong enough to drive sustained active maintenance. Use Act.Dend.ModGain to parameterize.`, 8: `PTPredLayer implements the subset of pyramidal tract (PT) layer 5 intrinsic bursting (5IB) deep neurons that combine modulatory input from PTMaintLayer sustained maintenance and CTLayer dynamic predictive learning that helps to predict state changes during the period of active goal maintenance. This layer provides the primary input to VSPatch US-timing prediction layers, and other layers that require predictive dynamic`, 10: ``} -var _LayerTypesMap = map[LayerTypes]string{0: `SuperLayer`, 1: `InputLayer`, 2: `TargetLayer`, 3: `CompareLayer`, 4: `CTLayer`, 5: `PulvinarLayer`, 6: `TRNLayer`, 7: `PTMaintLayer`, 8: `PTPredLayer`} +var _LayerTypesMap = map[LayerTypes]string{0: `SuperLayer`, 1: `InputLayer`, 2: `TargetLayer`, 3: `CompareLayer`, 4: `CTLayer`, 5: `PulvinarLayer`, 6: `TRNLayer`, 7: `PTMaintLayer`, 8: `PTPredLayer`, 10: `LayerTypesN`} // String returns the string representation of this LayerTypes value. func (i LayerTypes) String() string { return enums.String(i, _LayerTypesMap) } @@ -149,16 +149,16 @@ func (i *LayerTypes) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "LayerTypes") } -var _NeurFlagsValues = []NeurFlags{0, 1, 2, 3} +var _NeurFlagsValues = []NeurFlags{0, 1, 2, 3, 5} // NeurFlagsN is the highest valid value for type NeurFlags, plus one. -const NeurFlagsN NeurFlags = 4 +const NeurFlagsN NeurFlags = 6 -var _NeurFlagsValueMap = map[string]NeurFlags{`NeurOff`: 0, `NeurHasExt`: 1, `NeurHasTarg`: 2, `NeurHasCmpr`: 3} +var _NeurFlagsValueMap = map[string]NeurFlags{`NeurOff`: 0, `NeurHasExt`: 1, `NeurHasTarg`: 2, `NeurHasCmpr`: 3, `NeurFlagsN`: 5} -var _NeurFlagsDescMap = map[NeurFlags]string{0: `NeurOff flag indicates that this neuron has been turned off (i.e., lesioned)`, 1: `NeurHasExt means the neuron has external input in its Ext field`, 2: `NeurHasTarg means the neuron has external target input in its Targ field`, 3: `NeurHasCmpr means the neuron has external comparison input in its Targ field -- used for computing comparison statistics but does not drive neural activity ever`} +var _NeurFlagsDescMap = map[NeurFlags]string{0: `NeurOff flag indicates that this neuron has been turned off (i.e., lesioned)`, 1: `NeurHasExt means the neuron has external input in its Ext field`, 2: `NeurHasTarg means the neuron has external target input in its Targ field`, 3: `NeurHasCmpr means the neuron has external comparison input in its Targ field -- used for computing comparison statistics but does not drive neural activity ever`, 5: ``} -var _NeurFlagsMap = map[NeurFlags]string{0: `NeurOff`, 1: `NeurHasExt`, 2: `NeurHasTarg`, 3: `NeurHasCmpr`} +var _NeurFlagsMap = map[NeurFlags]string{0: `NeurOff`, 1: `NeurHasExt`, 2: `NeurHasTarg`, 3: `NeurHasCmpr`, 5: `NeurFlagsN`} // String returns the string representation of this NeurFlags value. func (i NeurFlags) String() string { return enums.BitFlagString(i, _NeurFlagsValues) } @@ -208,16 +208,16 @@ func (i *NeurFlags) UnmarshalText(text []byte) error { return enums.UnmarshalText(i, text, "NeurFlags") } -var _PathTypesValues = []PathTypes{0, 1, 2, 3, 4} +var _PathTypesValues = []PathTypes{0, 1, 2, 3, 4, 5, 6, 7} // PathTypesN is the highest valid value for type PathTypes, plus one. -const PathTypesN PathTypes = 5 +const PathTypesN PathTypes = 8 -var _PathTypesValueMap = map[string]PathTypes{`ForwardPath`: 0, `BackPath`: 1, `LateralPath`: 2, `InhibPath`: 3, `CTCtxtPath`: 4} +var _PathTypesValueMap = map[string]PathTypes{`ForwardPath`: 0, `BackPath`: 1, `LateralPath`: 2, `InhibPath`: 3, `CTCtxtPath`: 4, `CHLPath`: 5, `EcCa1Path`: 6, `PathTypesN`: 7} -var _PathTypesDescMap = map[PathTypes]string{0: `Forward is a feedforward, bottom-up pathway from sensory inputs to higher layers`, 1: `Back is a feedback, top-down pathway from higher layers back to lower layers`, 2: `Lateral is a lateral pathway within the same layer / area`, 3: `Inhib is an inhibitory pathway that drives inhibitory synaptic conductances instead of the default excitatory ones.`, 4: `CTCtxt are pathways from Superficial layers to CT layers that send Burst activations drive updating of CtxtGe excitatory conductance, at end of plus (51B Bursting) phase. Biologically, this pathway comes from the PT layer 5IB neurons, but it is simpler to use the Super neurons directly, and PT are optional for most network types. These pathways also use a special learning rule that takes into account the temporal delays in the activation states. Can also add self context from CT for deeper temporal context.`} +var _PathTypesDescMap = map[PathTypes]string{0: `Forward is a feedforward, bottom-up pathway from sensory inputs to higher layers`, 1: `Back is a feedback, top-down pathway from higher layers back to lower layers`, 2: `Lateral is a lateral pathway within the same layer / area`, 3: `Inhib is an inhibitory pathway that drives inhibitory synaptic conductances instead of the default excitatory ones.`, 4: `CTCtxt are pathways from Superficial layers to CT layers that send Burst activations drive updating of CtxtGe excitatory conductance, at end of plus (51B Bursting) phase. Biologically, this pathway comes from the PT layer 5IB neurons, but it is simpler to use the Super neurons directly, and PT are optional for most network types. These pathways also use a special learning rule that takes into account the temporal delays in the activation states. Can also add self context from CT for deeper temporal context.`, 5: `CHLPath implements Contrastive Hebbian Learning.`, 6: `EcCa1Path implements special learning for EC <-> CA1 pathways in the hippocampus to perform error-driven learning of this encoder pathway according to the ThetaPhase algorithm. uses Contrastive Hebbian Learning (CHL) on ActP - ActQ1 Q1: ECin -> CA1 -> ECout : ActQ1 = minus phase for auto-encoder Q2, 3: CA3 -> CA1 -> ECout : ActM = minus phase for recall Q4: ECin -> CA1, ECin -> ECout : ActP = plus phase for everything`, 7: ``} -var _PathTypesMap = map[PathTypes]string{0: `ForwardPath`, 1: `BackPath`, 2: `LateralPath`, 3: `InhibPath`, 4: `CTCtxtPath`} +var _PathTypesMap = map[PathTypes]string{0: `ForwardPath`, 1: `BackPath`, 2: `LateralPath`, 3: `InhibPath`, 4: `CTCtxtPath`, 5: `CHLPath`, 6: `EcCa1Path`, 7: `PathTypesN`} // String returns the string representation of this PathTypes value. func (i PathTypes) String() string { return enums.String(i, _PathTypesMap) } diff --git a/leabra/hip.go b/leabra/hip.go index e76f45b..13ed211 100644 --- a/leabra/hip.go +++ b/leabra/hip.go @@ -5,7 +5,12 @@ package leabra import ( + "fmt" + + "cogentcore.org/core/base/errors" "cogentcore.org/core/math32" + "github.com/emer/emergent/v2/etime" + "github.com/emer/emergent/v2/looper" ) // Contrastive Hebbian Learning (CHL) parameters @@ -152,9 +157,6 @@ func (pt *Path) EcCa1Defaults() { // DWt computes the weight change (learning) -- on sending pathways // Delta version func (pt *Path) DWtEcCa1() { - if !pt.Learn.Learn { - return - } slay := pt.Send rlay := pt.Recv for si := range slay.Neurons { @@ -202,3 +204,58 @@ func (pt *Path) DWtEcCa1() { } } } + +// ConfigLoopsHip configures the hippocampal looper and should be included in ConfigLoops +// in model to make sure hip loops is configured correctly. +// see hip.go for an instance of implementation of this function. +func (net *Network) ConfigLoopsHip(ctx *Context, man *looper.Manager) { + var tmpValues []float32 + ecout := net.LayerByName("ECout") + ecin := net.LayerByName("ECin") + ca1 := net.LayerByName("CA1") + ca3 := net.LayerByName("CA3") + ca1FromECin := errors.Log1(ca1.RecvPathBySendName("ECin")).(*Path) + ca1FromCa3 := errors.Log1(ca1.RecvPathBySendName("CA3")).(*Path) + ca3FromDg := errors.Log1(ca3.RecvPathBySendName("DG")).(*Path) + + dgPjScale := ca3FromDg.WtScale.Rel + + // configure events -- note that events are shared between Train, Test + // so only need to do it once on Train + mode := etime.Train + stack := man.Stacks[mode] + cyc, _ := stack.Loops[etime.Cycle] + minusStart := cyc.EventByName("MinusPhase") // cycle 0 + minusStart.OnEvent.Add("HipMinusPhase:Start", func() { + ca1FromECin.WtScale.Abs = 1 + ca1FromCa3.WtScale.Abs = 0 + ca3FromDg.WtScale.Rel = 0 + net.GScaleFromAvgAct() + net.InitGInc() + }) + quarter1 := cyc.EventByName("Quarter1") + quarter1.OnEvent.Add("Hip:Quarter1", func() { + ca1FromECin.WtScale.Abs = 0 + ca1FromCa3.WtScale.Abs = 1 + if ctx.Mode == etime.Test { + ca3FromDg.WtScale.Rel = 1 // weaker + fmt.Println("test:, rel = 1") + } else { + ca3FromDg.WtScale.Rel = dgPjScale + fmt.Println("train, rel:", dgPjScale) + } + net.GScaleFromAvgAct() + net.InitGInc() + }) + plus := cyc.EventByName("PlusPhase") + plus.OnEvent.InsertBefore("MinusPhase:End", "HipPlusPhase:Start", func() { + ca1FromECin.WtScale.Abs = 1 + ca1FromCa3.WtScale.Abs = 0 + if ctx.Mode == etime.Train { + ecin.UnitValues(&tmpValues, "Act", 0) + ecout.ApplyExt1D32(tmpValues) + } + net.GScaleFromAvgAct() + net.InitGInc() + }) +} diff --git a/leabra/typegen.go b/leabra/typegen.go index e46085a..5c097b4 100644 --- a/leabra/typegen.go +++ b/leabra/typegen.go @@ -28,6 +28,8 @@ var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/leabra.Contex var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/leabra.Quarters", IDName: "quarters", Doc: "Quarters are the different alpha trial quarters, as a bitflag,\nfor use in relevant timing parameters where quarters need to be specified.\nThe Q1..4 defined values are integer *bit positions* -- use Set, Has etc methods\nto set bits from these bit positions."}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/leabra.CHLParams", IDName: "chl-params", Doc: "Contrastive Hebbian Learning (CHL) parameters", Fields: []types.Field{{Name: "On", Doc: "if true, use CHL learning instead of standard XCAL learning -- allows easy exploration of CHL vs. XCAL"}, {Name: "Hebb", Doc: "amount of hebbian learning (should be relatively small, can be effective at .0001)"}, {Name: "Err", Doc: "amount of error driven learning, automatically computed to be 1-Hebb"}, {Name: "MinusQ1", Doc: "if true, use ActQ1 as the minus phase -- otherwise ActM"}, {Name: "SAvgCor", Doc: "proportion of correction to apply to sending average activation for hebbian learning component (0=none, 1=all, .5=half, etc)"}, {Name: "SAvgThr", Doc: "threshold of sending average activation below which learning does not occur (prevents learning when there is no input)"}}}) + var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/leabra.InhibParams", IDName: "inhib-params", Doc: "leabra.InhibParams contains all the inhibition computation params and functions for basic Leabra\nThis is included in leabra.Layer to support computation.\nThis also includes other misc layer-level params such as running-average activation in the layer\nwhich is used for netinput rescaling and potentially for adapting inhibition over time", Fields: []types.Field{{Name: "Layer", Doc: "inhibition across the entire layer"}, {Name: "Pool", Doc: "inhibition across sub-pools of units, for layers with 4D shape"}, {Name: "Self", Doc: "neuron self-inhibition parameters -- can be beneficial for producing more graded, linear response -- not typically used in cortical networks"}, {Name: "ActAvg", Doc: "running-average activation computation values -- for overall estimates of layer activation levels, used in netinput scaling"}}}) var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/leabra.SelfInhibParams", IDName: "self-inhib-params", Doc: "SelfInhibParams defines parameters for Neuron self-inhibition -- activation of the neuron directly feeds back\nto produce a proportional additional contribution to Gi", Fields: []types.Field{{Name: "On", Doc: "enable neuron self-inhibition"}, {Name: "Gi", Doc: "strength of individual neuron self feedback inhibition -- can produce proportional activation behavior in individual units for specialized cases (e.g., scalar val or BG units), but not so good for typical hidden layers"}, {Name: "Tau", Doc: "time constant in cycles, which should be milliseconds typically (roughly, how long it takes for value to change significantly -- 1.4x the half-life) for integrating unit self feedback inhibitory values -- prevents oscillations that otherwise occur -- relatively rapid 1.4 typically works, but may need to go longer if oscillations are a problem"}, {Name: "Dt", Doc: "rate = 1 / tau"}}}) @@ -68,7 +70,7 @@ var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/leabra.NeurFl var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/leabra.WtBalRecvPath", IDName: "wt-bal-recv-path", Doc: "WtBalRecvPath are state variables used in computing the WtBal weight balance function\nThere is one of these for each Recv Neuron participating in the pathway.", Fields: []types.Field{{Name: "Avg", Doc: "average of effective weight values that exceed WtBal.AvgThr across given Recv Neuron's connections for given Path"}, {Name: "Fact", Doc: "overall weight balance factor that drives changes in WbInc vs. WbDec via a sigmoidal function -- this is the net strength of weight balance changes"}, {Name: "Inc", Doc: "weight balance increment factor -- extra multiplier to add to weight increases to maintain overall weight balance"}, {Name: "Dec", Doc: "weight balance decrement factor -- extra multiplier to add to weight decreases to maintain overall weight balance"}}}) -var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/leabra.Path", IDName: "path", Doc: "Path implements the Leabra algorithm at the synaptic level,\nin terms of a pathway connecting two layers.", Embeds: []types.Field{{Name: "PathBase"}}, Fields: []types.Field{{Name: "Send", Doc: "sending layer for this pathway."}, {Name: "Recv", Doc: "receiving layer for this pathway."}, {Name: "Type", Doc: "type of pathway."}, {Name: "WtInit", Doc: "initial random weight distribution"}, {Name: "WtScale", Doc: "weight scaling parameters: modulates overall strength of pathway,\nusing both absolute and relative factors."}, {Name: "Learn", Doc: "synaptic-level learning parameters"}, {Name: "Syns", Doc: "synaptic state values, ordered by the sending layer\nunits which owns them -- one-to-one with SConIndex array."}, {Name: "GScale", Doc: "scaling factor for integrating synaptic input conductances (G's).\ncomputed in AlphaCycInit, incorporates running-average activity levels."}, {Name: "GInc", Doc: "local per-recv unit increment accumulator for synaptic\nconductance from sending units. goes to either GeRaw or GiRaw\non neuron depending on pathway type."}, {Name: "WbRecv", Doc: "weight balance state variables for this pathway, one per recv neuron."}, {Name: "RConN", Doc: "number of recv connections for each neuron in the receiving layer,\nas a flat list."}, {Name: "RConNAvgMax", Doc: "average and maximum number of recv connections in the receiving layer."}, {Name: "RConIndexSt", Doc: "starting index into ConIndex list for each neuron in\nreceiving layer; list incremented by ConN."}, {Name: "RConIndex", Doc: "index of other neuron on sending side of pathway,\nordered by the receiving layer's order of units as the\nouter loop (each start is in ConIndexSt),\nand then by the sending layer's units within that."}, {Name: "RSynIndex", Doc: "index of synaptic state values for each recv unit x connection,\nfor the receiver pathway which does not own the synapses,\nand instead indexes into sender-ordered list."}, {Name: "SConN", Doc: "number of sending connections for each neuron in the\nsending layer, as a flat list."}, {Name: "SConNAvgMax", Doc: "average and maximum number of sending connections\nin the sending layer."}, {Name: "SConIndexSt", Doc: "starting index into ConIndex list for each neuron in\nsending layer; list incremented by ConN."}, {Name: "SConIndex", Doc: "index of other neuron on receiving side of pathway,\nordered by the sending layer's order of units as the\nouter loop (each start is in ConIndexSt), and then\nby the sending layer's units within that."}}}) +var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/leabra.Path", IDName: "path", Doc: "Path implements the Leabra algorithm at the synaptic level,\nin terms of a pathway connecting two layers.", Embeds: []types.Field{{Name: "PathBase"}}, Fields: []types.Field{{Name: "Send", Doc: "sending layer for this pathway."}, {Name: "Recv", Doc: "receiving layer for this pathway."}, {Name: "Type", Doc: "type of pathway."}, {Name: "WtInit", Doc: "initial random weight distribution"}, {Name: "WtScale", Doc: "weight scaling parameters: modulates overall strength of pathway,\nusing both absolute and relative factors."}, {Name: "Learn", Doc: "synaptic-level learning parameters"}, {Name: "CHL", Doc: "CHL are the parameters for CHL learning. if CHL is On then\nWtSig.SoftBound is automatically turned off, as it is incompatible."}, {Name: "Syns", Doc: "synaptic state values, ordered by the sending layer\nunits which owns them -- one-to-one with SConIndex array."}, {Name: "GScale", Doc: "scaling factor for integrating synaptic input conductances (G's).\ncomputed in AlphaCycInit, incorporates running-average activity levels."}, {Name: "GInc", Doc: "local per-recv unit increment accumulator for synaptic\nconductance from sending units. goes to either GeRaw or GiRaw\non neuron depending on pathway type."}, {Name: "WbRecv", Doc: "weight balance state variables for this pathway, one per recv neuron."}, {Name: "RConN", Doc: "number of recv connections for each neuron in the receiving layer,\nas a flat list."}, {Name: "RConNAvgMax", Doc: "average and maximum number of recv connections in the receiving layer."}, {Name: "RConIndexSt", Doc: "starting index into ConIndex list for each neuron in\nreceiving layer; list incremented by ConN."}, {Name: "RConIndex", Doc: "index of other neuron on sending side of pathway,\nordered by the receiving layer's order of units as the\nouter loop (each start is in ConIndexSt),\nand then by the sending layer's units within that."}, {Name: "RSynIndex", Doc: "index of synaptic state values for each recv unit x connection,\nfor the receiver pathway which does not own the synapses,\nand instead indexes into sender-ordered list."}, {Name: "SConN", Doc: "number of sending connections for each neuron in the\nsending layer, as a flat list."}, {Name: "SConNAvgMax", Doc: "average and maximum number of sending connections\nin the sending layer."}, {Name: "SConIndexSt", Doc: "starting index into ConIndex list for each neuron in\nsending layer; list incremented by ConN."}, {Name: "SConIndex", Doc: "index of other neuron on receiving side of pathway,\nordered by the sending layer's order of units as the\nouter loop (each start is in ConIndexSt), and then\nby the sending layer's units within that."}}}) var _ = types.AddType(&types.Type{Name: "github.com/emer/leabra/v2/leabra.PathTypes", IDName: "path-types", Doc: "PathTypes enumerates all the different types of leabra pathways,\nfor the different algorithm types supported.\nClass parameter styles automatically key off of these types."})