diff --git a/.buildinfo b/.buildinfo new file mode 100644 index 0000000..f6fc73a --- /dev/null +++ b/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: b316255dee9ce301b8068bd695b9553d +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/_modules/baskerville/bed.html b/_modules/baskerville/bed.html new file mode 100644 index 0000000..c0930a5 --- /dev/null +++ b/_modules/baskerville/bed.html @@ -0,0 +1,301 @@ + + + + + + baskerville.bed — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for baskerville.bed

+# Copyright 2023 Calico LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =========================================================================
+
+import json
+import os
+import sys
+
+import numpy as np
+import pandas as pd
+import pysam
+from tqdm import tqdm
+
+from baskerville import dna
+
+################################################################################
+# bed.py
+#
+# Methods to work with BED files.
+################################################################################
+
+
+
+[docs] +def make_bed_seqs(bed_file, fasta_file, seq_len, stranded=False): + """Return BED regions as sequences and regions as a list of coordinate + tuples, extended to a specified length.""" + """Extract and extend BED sequences to seq_len.""" + fasta_open = pysam.Fastafile(fasta_file) + + seqs_dna = [] + seqs_coords = [] + + for line in open(bed_file): + a = line.split() + chrm = a[0] + start = int(float(a[1])) + end = int(float(a[2])) + if len(a) >= 6: + strand = a[5] + else: + strand = "+" + + # determine sequence limits + mid = (start + end) // 2 + seq_start = mid - seq_len // 2 + seq_end = seq_start + seq_len + + # save + if stranded: + seqs_coords.append((chrm, seq_start, seq_end, strand)) + else: + seqs_coords.append((chrm, seq_start, seq_end)) + + # initialize sequence + seq_dna = "" + + # add N's for left over reach + if seq_start < 0: + print( + "Adding %d Ns to %s:%d-%s" % (-seq_start, chrm, start, end), + file=sys.stderr, + ) + seq_dna = "N" * (-seq_start) + seq_start = 0 + + # get dna + seq_dna += fasta_open.fetch(chrm, seq_start, seq_end).upper() + + # add N's for right over reach + if len(seq_dna) < seq_len: + print( + "Adding %d Ns to %s:%d-%s" % (seq_len - len(seq_dna), chrm, start, end), + file=sys.stderr, + ) + seq_dna += "N" * (seq_len - len(seq_dna)) + + # reverse complement + if stranded and strand == "-": + seq_dna = dna.dna_rc(seq_dna) + + # append + seqs_dna.append(seq_dna) + + fasta_open.close() + + return seqs_dna, seqs_coords
+ + + +
+[docs] +def read_bed_coords(bed_file, seq_len): + """Return BED regions as a list of coordinate + tuples, extended to a specified length.""" + seqs_coords = [] + + for line in open(bed_file): + a = line.split() + chrm = a[0] + start = int(float(a[1])) + end = int(float(a[2])) + + # determine sequence limits + mid = (start + end) // 2 + seq_start = mid - seq_len // 2 + seq_end = seq_start + seq_len + + # save + seqs_coords.append((chrm, seq_start, seq_end)) + + return seqs_coords
+ + + +
+[docs] +def write_bedgraph( + preds, targets, data_dir: str, out_dir: str, split_label: str, bedgraph_indexes=None +): + """Write BEDgraph files for predictions and targets from a dataset.. + + Args: + preds (np.array): Predictions. + targets (np.array): Targets. + data_dir (str): Data directory, for identifying sequences and statistics. + out_dir (str): Output directory. + split_label (str): Split label. + bedgraph_indexes (list): List of target indexes to write. + """ + # get shapes + num_seqs, target_length, num_targets = targets.shape + + # set bedgraph indexes + if bedgraph_indexes is None: + bedgraph_indexes = np.arange(num_targets) + + # read data parameters + with open("%s/statistics.json" % data_dir) as data_open: + data_stats = json.load(data_open) + pool_width = data_stats["pool_width"] + + # read sequence positions + seqs_df = pd.read_csv( + "%s/sequences.bed" % data_dir, sep="\t", names=["chr", "start", "end", "split"] + ) + seqs_df = seqs_df[seqs_df.split == split_label] + assert seqs_df.shape[0] == num_seqs + + # initialize output directory + os.makedirs(out_dir, exist_ok=True) + + print("Writing BEDgraph files") + for ti in tqdm(bedgraph_indexes): + # slice preds/targets + preds_ti = preds[:, :, ti] + targets_ti = targets[:, :, ti] + + # initialize raw predictions/targets + preds_out = open("%s/preds_t%d.bedgraph" % (out_dir, ti), "w") + targets_out = open("%s/targets_t%d.bedgraph" % (out_dir, ti), "w") + + # write raw predictions/targets + for si, seq in enumerate(seqs_df.itertuples()): + # write bin values + bin_start = seq.start + for bi in range(target_length): + bin_end = bin_start + pool_width + cols = [ + seq.chr, + str(bin_start), + str(bin_end), + "%.2f" % preds_ti[si, bi], + ] + print("\t".join(cols), file=preds_out) + cols = [ + seq.chr, + str(bin_start), + str(bin_end), + "%.2f" % targets_ti[si, bi], + ] + print("\t".join(cols), file=targets_out) + bin_start = bin_end + + preds_out.close() + targets_out.close()
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/baskerville/blocks.html b/_modules/baskerville/blocks.html new file mode 100644 index 0000000..d61b3fc --- /dev/null +++ b/_modules/baskerville/blocks.html @@ -0,0 +1,2219 @@ + + + + + + baskerville.blocks — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for baskerville.blocks

+# Copyright 2019 Calico LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =========================================================================
+import pdb
+import numpy as np
+import tensorflow as tf
+
+from baskerville import layers
+
+
+############################################################
+# Convolution
+############################################################
+
+[docs] +def conv_block( + inputs, + filters=None, + kernel_size=1, + activation="relu", + activation_end=None, + stride=1, + dilation_rate=1, + l2_scale=0, + dropout=0, + conv_type="standard", + pool_size=1, + pool_type="max", + norm_type=None, + bn_momentum=0.99, + norm_gamma=None, + residual=False, + kernel_initializer="he_normal", + padding="same", +): + """Construct a single convolution block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + filters: Conv1D filters + kernel_size: Conv1D kernel_size + activation: relu/gelu/etc + stride: Conv1D stride + dilation_rate: Conv1D dilation rate + l2_scale: L2 regularization weight. + dropout: Dropout rate probability + conv_type: Conv1D layer type + residual: Residual connection boolean + pool_size: Max pool width + norm_type: Apply batch or layer normalization + bn_momentum: BatchNorm momentum + norm_gamma: BatchNorm gamma (defaults according to residual) + + Returns: + [batch_size, seq_length, features] output sequence + """ + + # flow through variable current + current = inputs + + # choose convolution type + if conv_type == "separable": + conv_layer = tf.keras.layers.SeparableConv1D + else: + conv_layer = tf.keras.layers.Conv1D + + if filters is None: + filters = inputs.shape[-1] + + # activation + current = layers.activate(current, activation) + + # convolution + current = conv_layer( + filters=filters, + kernel_size=kernel_size, + strides=stride, + padding="same", + use_bias=(norm_type is None), + dilation_rate=dilation_rate, + kernel_initializer=kernel_initializer, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + )(current) + + # normalize + if norm_type == "batch-sync": + current = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, gamma_initializer=norm_gamma, synchronized=True + )(current) + elif norm_type == "batch": + current = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, gamma_initializer=norm_gamma + )(current) + elif norm_type == "layer": + current = tf.keras.layers.LayerNormalization(gamma_initializer=norm_gamma)( + current + ) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(rate=dropout)(current) + + # residual add + if residual: + current = tf.keras.layers.Add()([inputs, current]) + + # end activation + if activation_end is not None: + current = layers.activate(current, activation_end) + + # Pool + if pool_size > 1: + if pool_type == "softmax": + current = layers.SoftmaxPool1D(pool_size=pool_size)(current) + else: + current = tf.keras.layers.MaxPool1D(pool_size=pool_size, padding=padding)( + current + ) + + return current
+ + + +
+[docs] +def conv_dna( + inputs, + filters=None, + kernel_size=15, + activation="relu", + stride=1, + l2_scale=0, + residual=False, + dropout=0, + dropout_residual=0, + pool_size=1, + pool_type="max", + norm_type=None, + bn_momentum=0.99, + norm_gamma=None, + use_bias=None, + se=False, + conv_type="standard", + kernel_initializer="he_normal", + padding="same", +): + """Construct a single convolution block, assumed to be operating on DNA. + + Args: + inputs: [batch_size, seq_length, features] input sequence + filters: Conv1D filters + kernel_size: Conv1D kernel_size + activation: relu/gelu/etc + stride: Conv1D stride + l2_scale: L2 regularization weight. + dropout: Dropout rate probability + conv_type: Conv1D layer type + pool_size: Max pool width + norm_type: Apply batch or layer normalization + bn_momentum: BatchNorm momentum + + Returns: + [batch_size, seq_length, features] output sequence + """ + + # flow through variable current + current = inputs + + # choose convolution type + if conv_type == "separable": + conv_layer = tf.keras.layers.SeparableConv1D + else: + conv_layer = tf.keras.layers.Conv1D + + if filters is None: + filters = inputs.shape[-1] + + # need option to define for older models + if use_bias is None: + use_bias = norm_type is None and not residual + + # convolution + current = conv_layer( + filters=filters, + kernel_size=kernel_size, + strides=stride, + padding="same", + use_bias=use_bias, + kernel_initializer=kernel_initializer, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + )(current) + + # squeeze-excite + if se: + current = squeeze_excite(current) + + if residual: + # residual conv block + rcurrent = conv_nac( + current, + activation=activation, + l2_scale=l2_scale, + dropout=dropout_residual, + conv_type=conv_type, + norm_type=norm_type, + se=se, + bn_momentum=bn_momentum, + kernel_initializer=kernel_initializer, + ) + + # residual add + rcurrent = layers.Scale()(rcurrent) + current = tf.keras.layers.Add()([current, rcurrent]) + + else: + # normalize + if norm_type == "batch-sync": + current = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, synchronized=True + )(current) + elif norm_type == "batch": + current = tf.keras.layers.BatchNormalization(momentum=bn_momentum)(current) + elif norm_type == "layer": + current = tf.keras.layers.LayerNormalization()(current) + + # activation + current = layers.activate(current, activation) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(rate=dropout)(current) + + # Pool + if pool_size > 1: + if pool_type == "softmax": + current = layers.SoftmaxPool1D(pool_size=pool_size)(current) + else: + current = tf.keras.layers.MaxPool1D(pool_size=pool_size, padding=padding)( + current + ) + + return current
+ + + +
+[docs] +def conv_nac( + inputs, + filters=None, + kernel_size=1, + activation="relu", + stride=1, + dilation_rate=1, + l2_scale=0, + dropout=0, + conv_type="standard", + residual=False, + pool_size=1, + pool_type="max", + norm_type=None, + bn_momentum=0.99, + norm_gamma=None, + kernel_initializer="he_normal", + padding="same", + se=False, +): + """Construct a single convolution block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + filters: Conv1D filters + kernel_size: Conv1D kernel_size + activation: relu/gelu/etc + stride: Conv1D stride + dilation_rate: Conv1D dilation rate + l2_scale: L2 regularization weight. + dropout: Dropout rate probability + conv_type: Conv1D layer type + residual: Residual connection boolean + pool_size: Max pool width + norm_type: Apply batch or layer normalization + bn_momentum: BatchNorm momentum + + Returns: + [batch_size, seq_length, features] output sequence + """ + + # flow through variable current + current = inputs + + # choose convolution type + if conv_type == "separable": + conv_layer = tf.keras.layers.SeparableConv1D + else: + conv_layer = tf.keras.layers.Conv1D + + if filters is None: + filters = inputs.shape[-1] + + # normalize + if norm_type == "batch-sync": + current = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, synchronized=True + )(current) + elif norm_type == "batch": + current = tf.keras.layers.BatchNormalization(momentum=bn_momentum)(current) + elif norm_type == "layer": + current = tf.keras.layers.LayerNormalization()(current) + + # activation + current = layers.activate(current, activation) + + # convolution + current = conv_layer( + filters=filters, + kernel_size=kernel_size, + strides=stride, + padding=padding, + use_bias=True, + dilation_rate=dilation_rate, + kernel_initializer=kernel_initializer, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + )(current) + + # squeeze-excite + if se: + current = squeeze_excite(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(rate=dropout)(current) + + # residual add + if residual: + current = tf.keras.layers.Add()([inputs, current]) + + # Pool + if pool_size > 1: + if pool_type == "softmax": + current = layers.SoftmaxPool1D(pool_size=pool_size)(current) + else: + current = tf.keras.layers.MaxPool1D(pool_size=pool_size, padding=padding)( + current + ) + + return current
+ + + +
+[docs] +def conv_next( + inputs, + filters=None, + kernel_size=7, + activation="relu", + dense_expansion=2.0, + dilation_rate=1, + l2_scale=0, + dropout=0, + residual=False, + pool_size=1, + pool_type="max", + kernel_initializer="he_normal", + padding="same", + norm_type=None, + bn_momentum=0.99, +): + """Construct a single convolution block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + filters: Conv1D filters + kernel_size: Conv1D kernel_size + activation: relu/gelu/etc + dilation_rate: Conv1D dilation rate + l2_scale: L2 regularization weight. + dropout: Dropout rate probability + residual: Residual connection boolean + pool_size: Max pool width + bn_momentum: BatchNorm momentum + + Returns: + [batch_size, seq_length, features] output sequence + """ + + if filters is None: + filters = inputs.shape[-1] + + # flow through variable current + current = inputs + + # convolution + current = tf.keras.layers.SeparableConv1D( + filters=filters, + kernel_size=kernel_size, + padding="same", + use_bias=True, + dilation_rate=dilation_rate, + kernel_initializer=kernel_initializer, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + )(current) + + # normalize + current = tf.keras.layers.LayerNormalization(epsilon=1e-5)(current) + + # dense expansion + expansion_filters = int(dense_expansion) * filters + current = tf.keras.layers.Dense( + units=expansion_filters, + use_bias=True, + kernel_initializer=kernel_initializer, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + )(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(rate=dropout)(current) + + # activation + current = layers.activate(current, activation) + + # dense contraction + current = tf.keras.layers.Dense( + units=filters, + use_bias=True, + kernel_initializer=kernel_initializer, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + )(current) + + # residual add + if residual: + current = tf.keras.layers.Add()([inputs, current]) + + # Pool + if pool_size > 1: + if pool_type == "softmax": + current = layers.SoftmaxPool1D(pool_size=pool_size)(current) + else: + current = tf.keras.layers.MaxPool1D(pool_size=pool_size, padding=padding)( + current + ) + + return current
+ + + +
+[docs] +def unet_conv( + inputs, + unet_repr, + activation="relu", + stride=2, + l2_scale=0, + dropout=0, + norm_type=None, + bn_momentum=0.99, + kernel_size=1, + kernel_initializer="he_normal", + upsample_conv=False, +): + """Construct a feature pyramid network block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + kernel_size: Conv1D kernel_size + activation: relu/gelu/etc + stride: UpSample stride + l2_scale: L2 regularization weight. + dropout: Dropout rate probability + norm_type: Apply batch or layer normalization + bn_momentum: BatchNorm momentum + upsample_conv: Conv1D the upsampled input path + + Returns: + [batch_size, seq_length, features] output sequence + """ + + # variables + current1 = inputs + current2 = unet_repr + + # normalize + if norm_type == "batch-sync": + current1 = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, synchronized=True + )(current1) + current2 = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, synchronized=True + )(current2) + elif norm_type == "batch": + current1 = tf.keras.layers.BatchNormalization(momentum=bn_momentum)(current1) + current2 = tf.keras.layers.BatchNormalization(momentum=bn_momentum)(current2) + elif norm_type == "layer": + current1 = tf.keras.layers.LayerNormalization()(current1) + current2 = tf.keras.layers.LayerNormalization()(current2) + + # activate + current1 = layers.activate(current1, activation) + current2 = layers.activate(current2, activation) + + filters = inputs.shape[-1] + + # dense + if upsample_conv: + current1 = tf.keras.layers.Dense( + units=filters, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + kernel_initializer=kernel_initializer, + )(current1) + current2 = tf.keras.layers.Dense( + units=filters, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + kernel_initializer=kernel_initializer, + )(current2) + + # upsample + current1 = tf.keras.layers.UpSampling1D(size=stride)(current1) + + # add + current = tf.keras.layers.Add()([current1, current2]) + + # normalize? + # activate? + + # convolution + current = tf.keras.layers.SeparableConv1D( + filters=filters, + kernel_size=kernel_size, + padding="same", + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + kernel_initializer=kernel_initializer, + )(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(dropout)(current) + + return current
+ + + +
+[docs] +def unet_concat( + inputs, + unet_repr, + activation="relu", + stride=2, + l2_scale=0, + dropout=0, + norm_type=None, + bn_momentum=0.99, + kernel_size=1, + kernel_initializer="he_normal", +): + """Construct a single transposed convolution block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + filters: Conv1D filters + kernel_size: Conv1D kernel_size + activation: relu/gelu/etc + stride: UpSample stride + l2_scale: L2 regularization weight. + dropout: Dropout rate probability + conv_type: Conv1D layer type + norm_type: Apply batch or layer normalization + bn_momentum: BatchNorm momentum + + Returns: + [batch_size, stride*seq_length, features] output sequence + """ + + # variables + current1 = inputs + current2 = unet_repr + + # normalize + if norm_type == "batch-sync": + current1 = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, synchronized=True + )(current1) + current2 = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, synchronized=True + )(current2) + elif norm_type == "batch": + current1 = tf.keras.layers.BatchNormalization(momentum=bn_momentum)(current1) + current2 = tf.keras.layers.BatchNormalization(momentum=bn_momentum)(current2) + elif norm_type == "layer": + current1 = tf.keras.layers.LayerNormalization()(current1) + current2 = tf.keras.layers.LayerNormalization()(current2) + + # upsample + current1 = tf.keras.layers.UpSampling1D(size=stride)(current1) + + # concatenate + current = tf.keras.layers.Concatenate()([current2, current1]) + + # activate + current = layers.activate(current, activation) + + # dense + mid_units = int(1.5 * unet_repr.shape[-1]) + current = tf.keras.layers.Dense( + units=mid_units, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + kernel_initializer=kernel_initializer, + )(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(dropout)(current) + + # activate + current = layers.activate(current, activation) + + # dense + current = tf.keras.layers.Conv1D( + filters=unet_repr.shape[-1], + kernel_size=kernel_size, + padding="same", + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + kernel_initializer=kernel_initializer, + )(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(dropout)(current) + + # residual + current = tf.keras.layers.Add()([unet_repr, current]) + + return current
+ + + +
+[docs] +def tconv_nac( + inputs, + filters=None, + kernel_size=1, + activation="relu", + stride=1, + l2_scale=0, + dropout=0, + conv_type="standard", + norm_type=None, + bn_momentum=0.99, + norm_gamma=None, + kernel_initializer="he_normal", + padding="same", +): + """Construct a single transposed convolution block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + filters: Conv1D filters + kernel_size: Conv1D kernel_size + activation: relu/gelu/etc + stride: UpSample stride + l2_scale: L2 regularization weight. + dropout: Dropout rate probability + conv_type: Conv1D layer type + norm_type: Apply batch or layer normalization + bn_momentum: BatchNorm momentum + + Returns: + [batch_size, stride*seq_length, features] output sequence + """ + + # flow through variable current + current = inputs + + if filters is None: + filters = inputs.shape[-1] + + # normalize + if norm_type == "batch-sync": + current = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, synchronized=True + )(current) + elif norm_type == "batch": + current = tf.keras.layers.BatchNormalization(momentum=bn_momentum)(current) + elif norm_type == "layer": + current = tf.keras.layers.LayerNormalization()(current) + + # activation + current = layers.activate(current, activation) + + # convolution + current = tf.keras.layers.Conv1DTranspose( + filters=filters, + kernel_size=kernel_size, + strides=stride, + padding="same", + use_bias=True, + kernel_initializer=kernel_initializer, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + )(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(rate=dropout)(current) + + return current
+ + + +
+[docs] +def conv_block_2d( + inputs, + filters=128, + activation="relu", + conv_type="standard", + kernel_size=1, + stride=1, + dilation_rate=1, + l2_scale=0, + dropout=0, + pool_size=1, + norm_type=None, + bn_momentum=0.99, + norm_gamma="ones", + kernel_initializer="he_normal", + symmetric=False, +): + """Construct a single 2D convolution block.""" + + # flow through variable current + current = inputs + + # activation + current = layers.activate(current, activation) + + # choose convolution type + if conv_type == "separable": + conv_layer = tf.keras.layers.SeparableConv2D + else: + conv_layer = tf.keras.layers.Conv2D + + # convolution + current = conv_layer( + filters=filters, + kernel_size=kernel_size, + strides=stride, + padding="same", + use_bias=(norm_type is None), + dilation_rate=dilation_rate, + kernel_initializer=kernel_initializer, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + )(current) + + # normalize + if norm_type == "batch-sync": + current = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, gamma_initializer=norm_gamma, synchronized=True + )(current) + elif norm_type == "batch": + current = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, gamma_initializer=norm_gamma + )(current) + elif norm_type == "layer": + current = tf.keras.layers.LayerNormalization(gamma_initializer=norm_gamma)( + current + ) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(rate=dropout)(current) + + # pool + if pool_size > 1: + current = tf.keras.layers.MaxPool2D(pool_size=pool_size, padding="same")( + current + ) + + # symmetric + if symmetric: + current = layers.Symmetrize2D()(current) + + return current
+ + + +############################################################ +# Towers +############################################################ +
+[docs] +def conv_tower_v1(inputs, filters_init, filters_mult=1, repeat=1, **kwargs): + """Construct a reducing convolution block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + filters_init: Initial Conv1D filters + filters_mult: Multiplier for Conv1D filters + repeat: Conv block repetitions + + Returns: + [batch_size, seq_length, features] output sequence + """ + + # flow through variable current + current = inputs + + # initialize filters + rep_filters = filters_init + + for ri in range(repeat): + # convolution + current = conv_block(current, filters=int(np.round(rep_filters)), **kwargs) + + # update filters + rep_filters *= filters_mult + + return current
+ + + +
+[docs] +def conv_tower( + inputs, + filters_init, + filters_end=None, + filters_mult=None, + divisible_by=1, + repeat=1, + reprs=[], + **kwargs, +): + """Construct a reducing convolution block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + filters_init: Initial Conv1D filters + filters_end: End Conv1D filters + filters_mult: Multiplier for Conv1D filters + divisible_by: Round filters to be divisible by (eg a power of two) + repeat: Tower repetitions + + Returns: + [batch_size, seq_length, features] output sequence + """ + + def _round(x): + return int(np.round(x / divisible_by) * divisible_by) + + # flow through variable current + current = inputs + + # initialize filters + rep_filters = filters_init + + # determine multiplier + if filters_mult is None: + assert filters_end is not None + filters_mult = np.exp(np.log(filters_end / filters_init) / (repeat - 1)) + + for ri in range(repeat): + # convolution + current = conv_block(current, filters=_round(rep_filters), **kwargs) + + # save representation + reprs.append(current) + + # update filters + rep_filters *= filters_mult + + return current
+ + + +
+[docs] +def conv_tower_nac( + inputs, + filters_init, + filters_end=None, + filters_mult=None, + divisible_by=1, + repeat=1, + reprs=[], + **kwargs, +): + """Construct a reducing convolution block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + filters_init: Initial Conv1D filters + filters_end: End Conv1D filters + filters_mult: Multiplier for Conv1D filters + divisible_by: Round filters to be divisible by (eg a power of two) + repeat: Tower repetitions + reprs: Append representations. + + Returns: + [batch_size, seq_length, features] output sequence + """ + + def _round(x): + return int(np.round(x / divisible_by) * divisible_by) + + # flow through variable current + current = inputs + + # initialize filters + rep_filters = filters_init + + # determine multiplier + if filters_mult is None: + assert filters_end is not None + filters_mult = np.exp(np.log(filters_end / filters_init) / (repeat - 1)) + + for ri in range(repeat): + # convolution + current = conv_nac(current, filters=_round(rep_filters), **kwargs) + + # save representation + reprs.append(current) + + # update filters + rep_filters *= filters_mult + + return current
+ + + +
+[docs] +def res_tower( + inputs, + filters_init, + filters_end=None, + filters_mult=None, + kernel_size=1, + dropout=0, + pool_size=2, + pool_type="max", + divisible_by=1, + repeat=1, + num_convs=2, + reprs=[], + **kwargs, +): + """Construct a reducing convolution block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + filters_init: Initial Conv1D filters + filters_end: End Conv1D filters + filters_mult: Multiplier for Conv1D filters + kernel_size: Conv1D kernel_size + dropout: Dropout on subsequent convolution blocks. + pool_size: Pool width. + repeat: Residual block repetitions + num_convs: Conv blocks per residual layer + + Returns: + [batch_size, seq_length, features] output sequence + """ + + def _round(x): + return int(np.round(x / divisible_by) * divisible_by) + + # flow through variable current + current = inputs + + # initialize filters + rep_filters = filters_init + + # determine multiplier + if filters_mult is None: + assert filters_end is not None + filters_mult = np.exp(np.log(filters_end / filters_init) / (repeat - 1)) + + for ri in range(repeat): + rep_filters_int = _round(rep_filters) + + # initial + current = conv_nac( + current, filters=rep_filters_int, kernel_size=kernel_size, **kwargs + ) + current0 = current + + # subsequent + for ci in range(1, num_convs): + # bg = 'ones' if ci < num_convs-1 else 'zeros' + current = conv_nac(current, filters=rep_filters_int, **kwargs) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(rate=dropout)(current) + + # residual add + if num_convs > 1: + current = layers.Scale()(current) + current = tf.keras.layers.Add()([current0, current]) + + # save representation + reprs.append(current) + + # pool + if pool_size > 1: + if pool_type == "softmax": + current = layers.SoftmaxPool1D(pool_size=pool_size)(current) + else: + current = tf.keras.layers.MaxPool1D( + pool_size=pool_size, padding="same" + )(current) + + # update filters + rep_filters *= filters_mult + + return current
+ + + +
+[docs] +def convnext_tower( + inputs, + filters_init, + filters_end=None, + filters_mult=None, + kernel_size=1, + dropout=0, + pool_size=2, + pool_type="max", + divisible_by=1, + repeat=1, + num_convs=2, + reprs=[], + **kwargs, +): + """Abc. + + Args: + inputs: [batch_size, seq_length, features] input sequence + filters_init: Initial Conv1D filters + filters_end: End Conv1D filters + filters_mult: Multiplier for Conv1D filters + kernel_size: Conv1D kernel_size + dropout: Dropout on subsequent convolution blocks. + pool_size: Pool width. + repeat: Residual block repetitions + num_convs: Conv blocks per residual layer + + Returns: + [batch_size, seq_length, features] output sequence + """ + + def _round(x): + return int(np.round(x / divisible_by) * divisible_by) + + # flow through variable current + current = inputs + + # initialize filters + rep_filters = filters_init + + # determine multiplier + if filters_mult is None: + assert filters_end is not None + filters_mult = np.exp(np.log(filters_end / filters_init) / (repeat - 1)) + + for ri in range(repeat): + rep_filters_int = _round(rep_filters) + + # initial + current = conv_next( + current, + filters=rep_filters_int, + kernel_size=kernel_size, + dropout=dropout, + **kwargs, + ) + current0 = current + + # subsequent + for ci in range(1, num_convs): + # bg = 'ones' if ci < num_convs-1 else 'zeros' + current = conv_next( + current, + filters=rep_filters_int, + kernel_size=kernel_size, + dropout=dropout, + **kwargs, + ) + + # residual add + if num_convs > 1: + current = layers.Scale()(current) + current = tf.keras.layers.Add()([current0, current]) + + # pool + if pool_size > 1: + if pool_type == "softmax": + current = layers.SoftmaxPool1D(pool_size=pool_size)(current) + else: + current = tf.keras.layers.MaxPool1D( + pool_size=pool_size, padding="same" + )(current) + + # save representation + reprs.append(current) + + # update filters + rep_filters *= filters_mult + + return current
+ + + +############################################################ +# Attention +############################################################ +
+[docs] +def transformer( + inputs, + key_size=None, + heads=1, + out_size=None, + activation="relu", + dense_expansion=2.0, + content_position_bias=True, + dropout=0.25, + attention_dropout=0.05, + position_dropout=0.01, + l2_scale=0, + mha_l2_scale=0, + num_position_features=None, + qkv_width=1, + mha_initializer="he_normal", + kernel_initializer="he_normal", + **kwargs, +): + """Construct a transformer block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + key_size: Conv block repetitions + + Returns: + [batch_size, seq_length, features] output sequence + """ + if out_size is None: + out_size = inputs.shape[-1] + assert out_size % heads == 0 + value_size = out_size // heads + + # layer norm + current = tf.keras.layers.LayerNormalization()(inputs) + + # multi-head attention + current = layers.MultiheadAttention( + value_size=value_size, + key_size=key_size, + heads=heads, + num_position_features=num_position_features, + attention_dropout_rate=attention_dropout, + positional_dropout_rate=position_dropout, + content_position_bias=content_position_bias, + initializer=mha_initializer, + l2_scale=mha_l2_scale, + qkv_width=qkv_width, + )(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(dropout)(current) + + # residual + current = tf.keras.layers.Add()([inputs, current]) + + if dense_expansion == 0: + final = current + else: + final = transformer_dense( + current, out_size, dense_expansion, l2_scale, dropout, kernel_initializer + ) + + return final
+ + + +
+[docs] +def transformer_split( + inputs, + splits=2, + key_size=None, + heads=1, + out_size=None, + activation="relu", + dense_expansion=2.0, + content_position_bias=True, + dropout=0.25, + attention_dropout=0.05, + position_dropout=0.01, + l2_scale=0, + mha_l2_scale=0, + num_position_features=None, + qkv_width=1, + mha_initializer="he_normal", + kernel_initializer="he_normal", + **kwargs, +): + """Construct a transformer block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + key_size: Conv block repetitions + + Returns: + [batch_size, seq_length, features] output sequence + """ + if out_size is None: + out_size = inputs.shape[-1] + assert out_size % heads == 0 + value_size = out_size // heads + + # layer norm + current = tf.keras.layers.LayerNormalization()(inputs) + + # multi-head attention + mha = layers.MultiheadAttention( + value_size=value_size, + key_size=key_size, + heads=heads, + num_position_features=num_position_features, + attention_dropout_rate=attention_dropout, + positional_dropout_rate=position_dropout, + content_position_bias=content_position_bias, + initializer=mha_initializer, + l2_scale=mha_l2_scale, + qkv_width=qkv_width, + ) + + _, seq_len, seq_depth = inputs.shape + seq_len2 = seq_len // 2 + seq_len4 = seq_len // 4 + + if splits == 2: + # split in two length-wise + current = tf.keras.layers.Reshape((2, seq_len2, seq_depth))(current) + + # MHA left/right + current_left = mha(current[:, 0, :, :]) + current_right = mha(current[:, 1, :, :]) + + current_list = [current_left, current_right] + + elif splits == 3: + # split in four length-wise + current = tf.keras.layers.Reshape((4, seq_len4, seq_depth))(current) + + # transformer left/right + current_left = mha(current[:, 0, :, :]) + current_right = mha(current[:, 3, :, :]) + + # transformer center + current_center = tf.keras.layers.Reshape((seq_len2, seq_depth))( + current[:, 1:3, :, :] + ) + current_center = mha(current_center) + + current_list = [current_left, current_center, current_right] + + else: + print("transformer_split not implemented for splits > 3", sys.stderr) + exit(1) + + # concat along position axis + current = tf.keras.layers.Concatenate(axis=1)(current_list) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(dropout)(current) + + # residual + current = tf.keras.layers.Add()([inputs, current]) + + if dense_expansion == 0: + final = current + else: + final = transformer_dense( + current, out_size, dense_expansion, l2_scale, dropout, kernel_initializer + ) + + return final
+ + + +
+[docs] +def transformer_dense( + inputs, out_size, dense_expansion, l2_scale, dropout, kernel_initializer +): + """Transformer block dense portion.""" + # layer norm + current = tf.keras.layers.LayerNormalization()(inputs) + + # dense + expansion_filters = int(dense_expansion * out_size) + current = tf.keras.layers.Dense( + units=expansion_filters, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + kernel_initializer=kernel_initializer, + )(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(dropout)(current) + + # activation + current = layers.activate(current, "relu") + + # dense + current = tf.keras.layers.Dense( + units=out_size, + kernel_regularizer=tf.keras.regularizers.l2(l2_scale), + kernel_initializer=kernel_initializer, + )(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(dropout)(current) + + # residual + final = tf.keras.layers.Add()([inputs, current]) + + return final
+ + + +
+[docs] +def transformer2( + inputs, + key_size=None, + heads=1, + out_size=None, + activation="relu", + num_position_features=None, + attention_dropout=0.05, + position_dropout=0.01, + dropout=0.25, + dense_expansion=2.0, + qkv_width=1, + **kwargs, +): + """Construct a transformer block, with length-wise pooling before + returning to full length. + + Args: + inputs: [batch_size, seq_length, features] input sequence + key_size: Conv block repetitions + + Returns: + [batch_size, seq_length, features] output sequence + """ + if out_size is None: + out_size = inputs.shape[-1] + assert out_size % heads == 0 + value_size = out_size // heads + + # convolution to decrease length + current = conv_nac( + inputs, + filters=min(4 * key_size, inputs.shape[-1]), + kernel_size=3, + pool_size=2, + **kwargs, + ) + + # layer norm + current = tf.keras.layers.LayerNormalization()(current) + + # multi-head attention + current = layers.MultiheadAttention( + value_size=value_size, + key_size=key_size, + heads=heads, + num_position_features=num_position_features, + attention_dropout_rate=attention_dropout, + positional_dropout_rate=position_dropout, + transpose_stride=2, + qkv_width=qkv_width, + )(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(dropout)(current) + + # concatenate and transform + current = tf.keras.layers.Concatenate()([inputs, current]) + current = tf.keras.layers.Dense(out_size)(current) + + # residual + current = tf.keras.layers.Add()([inputs, current]) + + if dense_expansion == 0: + final = current + else: + current_mha = current + + # layer norm + current = tf.keras.layers.LayerNormalization()(current) + + # dense + expansion_filters = int(dense_expansion * out_size) + current = tf.keras.layers.Dense(expansion_filters)(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(dropout)(current) + + # activation + current = layers.activate(current, "relu") + + # dense + current = tf.keras.layers.Dense(out_size)(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(dropout)(current) + + # residual + final = tf.keras.layers.Add()([current_mha, current]) + + return final
+ + + +
+[docs] +def swin_transformer(inputs, **kwargs): + current = inputs + current = transformer_split(current, splits=2, **kwargs) + current = transformer_split(current, splits=3, **kwargs) + return current
+ + + +
+[docs] +def transformer_tower(inputs, repeat=2, block_type="transformer", **kwargs): + """Construct a tower of repeated transformer blocks. + + Args: + inputs: [batch_size, seq_length, features] input sequence + repeat: Conv block repetitions + + Returns: + [batch_size, seq_length, features] output sequence + """ + + if block_type == "lambda": + transformer_block = transformer_lambda + elif block_type == "swin": + transformer_block = swin_transformer + elif block_type == "transformer2": + transformer_block = transformer2 + else: + transformer_block = transformer + + current = inputs + for ri in range(repeat): + current = transformer_block(current, **kwargs) + return current
+ + + +
+[docs] +def squeeze_excite( + inputs, + activation="relu", + bottleneck_ratio=8, + additive=False, + norm_type=None, + bn_momentum=0.9, + **kwargs, +): + return layers.SqueezeExcite( + activation, additive, bottleneck_ratio, norm_type, bn_momentum + )(inputs)
+ + + +
+[docs] +def wheeze_excite(inputs, pool_size, **kwargs): + return layers.WheezeExcite(pool_size)(inputs)
+ + + +
+[docs] +def global_context(inputs, **kwargs): + return layers.GlobalContext()(inputs)
+ + + +############################################################ +# Dilated Towers +############################################################ + + +
+[docs] +def dilated_dense( + inputs, + filters, + kernel_size=3, + rate_mult=2, + conv_type="standard", + dropout=0, + repeat=1, + **kwargs, +): + """Construct a residual dilated dense block. + + Args: + + Returns: + """ + + # flow through variable current + current = inputs + + # initialize dilation rate + dilation_rate = 1.0 + + for ri in range(repeat): + rep_input = current + + # dilate + current = conv_block( + current, + filters=filters, + kernel_size=kernel_size, + dilation_rate=int(np.round(dilation_rate)), + conv_type=conv_type, + **kwargs, + ) + + # dense concat + current = tf.keras.layers.Concatenate()([rep_input, current]) + + # update dilation rate + dilation_rate *= rate_mult + + return current
+ + + +
+[docs] +def dilated_residual( + inputs, + filters, + kernel_size=3, + rate_mult=2, + dropout=0, + repeat=1, + conv_type="standard", + norm_type=None, + round=False, + **kwargs, +): + """Construct a residual dilated convolution block. + + Args: + + Returns: + """ + + # flow through variable current + current = inputs + + # initialize dilation rate + dilation_rate = 1.0 + + for ri in range(repeat): + rep_input = current + + # dilate + current = conv_block( + current, + filters=filters, + kernel_size=kernel_size, + dilation_rate=int(np.round(dilation_rate)), + conv_type=conv_type, + norm_type=norm_type, + norm_gamma="ones", + **kwargs, + ) + + # return + current = conv_block( + current, + filters=rep_input.shape[-1], + dropout=dropout, + norm_type=norm_type, + norm_gamma="zeros", + **kwargs, + ) + + # InitZero + if norm_type is None: + current = layers.Scale()(current) + + # residual add + current = tf.keras.layers.Add()([rep_input, current]) + + # update dilation rate + dilation_rate *= rate_mult + if round: + dilation_rate = np.round(dilation_rate) + + return current
+ + + +
+[docs] +def dilated_residual_nac( + inputs, filters, kernel_size=3, rate_mult=2, dropout=0, repeat=1, **kwargs +): + """Construct a residual dilated convolution block. + + Args: + + Returns: + """ + + # flow through variable current + current = inputs + + # initialize dilation rate + dilation_rate = 1.0 + + for ri in range(repeat): + rep_input = current + + # dilate + current = conv_nac( + current, + filters=filters, + kernel_size=kernel_size, + dilation_rate=int(np.round(dilation_rate)), + **kwargs, + ) + + # return + current = conv_nac( + current, filters=rep_input.shape[-1], dropout=dropout, **kwargs + ) + + # residual add + current = tf.keras.layers.Add()([rep_input, current]) + + # update dilation rate + dilation_rate *= rate_mult + + return current
+ + + +
+[docs] +def dilated_residual_2d( + inputs, + filters, + kernel_size=3, + rate_mult=2, + dropout=0, + repeat=1, + symmetric=True, + **kwargs, +): + """Construct a residual dilated convolution block.""" + + # flow through variable current + current = inputs + + # initialize dilation rate + dilation_rate = 1.0 + + for ri in range(repeat): + rep_input = current + + # dilate + current = conv_block_2d( + current, + filters=filters, + kernel_size=kernel_size, + dilation_rate=int(np.round(dilation_rate)), + norm_gamma="ones", + **kwargs, + ) + + # return + current = conv_block_2d( + current, + filters=rep_input.shape[-1], + dropout=dropout, + norm_gamma="zeros", + **kwargs, + ) + + # residual add + current = tf.keras.layers.Add()([rep_input, current]) + + # enforce symmetry + if symmetric: + current = layers.Symmetrize2D()(current) + + # update dilation rate + dilation_rate *= rate_mult + + return current
+ + + +############################################################ +# Center ops +############################################################ + + +
+[docs] +def center_average(inputs, center, **kwargs): + current = layers.CenterAverage(center)(inputs) + return current
+ + + +
+[docs] +def center_slice(inputs, center, **kwargs): + current = layers.CenterSlice(center)(inputs) + return current
+ + + +############################################################ +# 2D +############################################################ + + +
+[docs] +def concat_dist_2d(inputs, **kwargs): + current = layers.ConcatDist2D()(inputs) + return current
+ + + +
+[docs] +def concat_position(inputs, transform="abs", power=1, **kwargs): + current = layers.ConcatPosition(transform, power)(inputs) + return current
+ + + +
+[docs] +def cropping_2d(inputs, cropping, **kwargs): + current = tf.keras.layers.Cropping2D(cropping)(inputs) + return current
+ + + +
+[docs] +def one_to_two(inputs, operation="mean", **kwargs): + current = layers.OneToTwo(operation)(inputs) + return current
+ + + +
+[docs] +def symmetrize_2d(inputs, **kwargs): + return layers.Symmetrize2D()(inputs)
+ + + +
+[docs] +def upper_tri(inputs, diagonal_offset=2, **kwargs): + current = layers.UpperTri(diagonal_offset)(inputs) + return current
+ + + +############################################################ +# Factorization +############################################################ + + +
+[docs] +def factor_inverse(inputs, components_file, **kwargs): + current = layers.FactorInverse(components_file)(inputs) + return current
+ + + +############################################################ +# Dense +############################################################ +
+[docs] +def dense_block( + inputs, + units=None, + activation="relu", + activation_end=None, + flatten=False, + dropout=0, + l2_scale=0, + l1_scale=0, + residual=False, + norm_type=None, + bn_momentum=0.99, + norm_gamma=None, + kernel_initializer="he_normal", + **kwargs, +): + """Construct a single convolution block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + units: Conv1D filters + activation: relu/gelu/etc + activation_end: Compute activation after the other operations + flatten: Flatten across positional axis + dropout: Dropout rate probability + l2_scale: L2 regularization weight. + l1_scale: L1 regularization weight. + residual: Residual connection boolean + batch_norm: Apply batch normalization + bn_momentum: BatchNorm momentum + norm_gamma: BatchNorm gamma (defaults according to residual) + + Returns: + [batch_size, seq_length(?), features] output sequence + """ + current = inputs + + if units is None: + units = inputs.shape[-1] + + # activation + current = layers.activate(current, activation) + + # flatten + if flatten: + _, seq_len, seq_depth = current.shape + current = tf.keras.layers.Reshape( + ( + 1, + seq_len * seq_depth, + ) + )(current) + + # dense + current = tf.keras.layers.Dense( + units=units, + use_bias=(norm_type is None), + kernel_initializer=kernel_initializer, + kernel_regularizer=tf.keras.regularizers.l1_l2(l1_scale, l2_scale), + )(current) + + # normalize + if norm_gamma is None: + norm_gamma = "zeros" if residual else "ones" + if norm_type == "batch-sync": + current = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, gamma_initializer=norm_gamma, synchronized=True + )(current) + elif norm_type == "batch": + current = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, gamma_initializer=norm_gamma + )(current) + elif norm_type == "layer": + current = tf.keras.layers.LayerNormalization(gamma_initializer=norm_gamma)( + current + ) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(rate=dropout)(current) + + # residual add + if residual: + current = tf.keras.layers.Add()([inputs, current]) + + # end activation + if activation_end is not None: + current = layers.activate(current, activation_end) + + return current
+ + + +
+[docs] +def dense_nac( + inputs, + units=None, + activation="relu", + flatten=False, + dropout=0, + l2_scale=0, + l1_scale=0, + residual=False, + norm_type=None, + bn_momentum=0.99, + norm_gamma=None, + kernel_initializer="he_normal", + **kwargs, +): + """Construct a single convolution block. + + Args: + inputs: [batch_size, seq_length, features] input sequence + units: Conv1D filters + activation: relu/gelu/etc + activation_end: Compute activation after the other operations + flatten: Flatten across positional axis + dropout: Dropout rate probability + l2_scale: L2 regularization weight. + l1_scale: L1 regularization weight. + residual: Residual connection boolean + batch_norm: Apply batch normalization + bn_momentum: BatchNorm momentum + norm_gamma: BatchNorm gamma (defaults according to residual) + + Returns: + [batch_size, seq_length(?), features] output sequence + """ + current = inputs + + if units is None: + units = inputs.shape[-1] + + # normalize + if norm_gamma is None: + norm_gamma = "zeros" if residual else "ones" + if norm_type == "batch-sync": + current = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, gamma_initializer=norm_gamma, synchronized=True + )(current) + elif norm_type == "batch": + current = tf.keras.layers.BatchNormalization( + momentum=bn_momentum, gamma_initializer=norm_gamma + )(current) + elif norm_type == "layer": + current = tf.keras.layers.LayerNormalization(gamma_initializer=norm_gamma)( + current + ) + + # activation + current = layers.activate(current, activation) + + # flatten + if flatten: + _, seq_len, seq_depth = current.shape + current = tf.keras.layers.Reshape( + ( + 1, + seq_len * seq_depth, + ) + )(current) + + # dense + current = tf.keras.layers.Dense( + units=units, + use_bias=True, + kernel_initializer=kernel_initializer, + kernel_regularizer=tf.keras.regularizers.l1_l2(l1_scale, l2_scale), + )(current) + + # dropout + if dropout > 0: + current = tf.keras.layers.Dropout(rate=dropout)(current) + + # residual add + if residual: + current = tf.keras.layers.Add()([inputs, current]) + + return current
+ + + +
+[docs] +def final( + inputs, + units, + activation="linear", + flatten=False, + kernel_initializer="he_normal", + l2_scale=0, + l1_scale=0, + **kwargs, +): + """Final simple transformation before comparison to targets. + + Args: + inputs: [batch_size, seq_length, features] input sequence + units: Dense units + activation: relu/gelu/etc + flatten: Flatten positional axis. + l2_scale: L2 regularization weight. + l1_scale: L1 regularization weight. + + Returns: + [batch_size, seq_length(?), units] output sequence + + """ + current = inputs + + # flatten + if flatten: + _, seq_len, seq_depth = current.shape + current = tf.keras.layers.Reshape( + ( + 1, + seq_len * seq_depth, + ) + )(current) + + # dense + current = tf.keras.layers.Dense( + units=units, + use_bias=True, + activation=activation, + kernel_initializer=kernel_initializer, + kernel_regularizer=tf.keras.regularizers.l1_l2(l1_scale, l2_scale), + )(current) + + return current
+ + + +############################################################ +# Dictionary +############################################################ +name_func = { + "center_slice": center_slice, + "center_average": center_average, + "concat_dist_2d": concat_dist_2d, + "concat_position": concat_position, + "conv_block": conv_block, + "conv_dna": conv_dna, + "conv_nac": conv_nac, + "conv_next": conv_next, + "conv_block_2d": conv_block_2d, + "conv_tower": conv_tower, + "conv_tower_nac": conv_tower_nac, + "convnext_tower": convnext_tower, + "cropping_2d": cropping_2d, + "dense_block": dense_block, + "dense_nac": dense_nac, + "dilated_residual": dilated_residual, + "dilated_residual_nac": dilated_residual_nac, + "dilated_residual_2d": dilated_residual_2d, + "dilated_dense": dilated_dense, + "factor_inverse": factor_inverse, + "final": final, + "global_context": global_context, + "one_to_two": one_to_two, + "symmetrize_2d": symmetrize_2d, + "squeeze_excite": squeeze_excite, + "swin_transformer": swin_transformer, + "res_tower": res_tower, + "tconv_nac": tconv_nac, + "transformer": transformer, + "transformer_tower": transformer_tower, + "unet_conv": unet_conv, + "unet_concat": unet_concat, + "upper_tri": upper_tri, + "wheeze_excite": wheeze_excite, +} + +keras_func = { + "Conv1D": tf.keras.layers.Conv1D, + "Cropping1D": tf.keras.layers.Cropping1D, + "Cropping2D": tf.keras.layers.Cropping2D, + "Dense": tf.keras.layers.Dense, + "Flatten": tf.keras.layers.Flatten, +} +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/baskerville/dataset.html b/_modules/baskerville/dataset.html new file mode 100644 index 0000000..231eb2e --- /dev/null +++ b/_modules/baskerville/dataset.html @@ -0,0 +1,529 @@ + + + + + + baskerville.dataset — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for baskerville.dataset

+# Copyright 2023 Calico LLC
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     https://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =========================================================================
+import glob
+import json
+import pdb
+import sys
+
+from natsort import natsorted
+import numpy as np
+import pandas as pd
+import tensorflow as tf
+
+gpu_devices = tf.config.experimental.list_physical_devices("GPU")
+for device in gpu_devices:
+    tf.config.experimental.set_memory_growth(device, True)
+
+# TFRecord constants
+TFR_INPUT = "sequence"
+TFR_OUTPUT = "target"
+
+
+
+[docs] +def file_to_records(filename: str): + """Read TFRecord file into tf.data.Dataset.""" + return tf.data.TFRecordDataset(filename, compression_type="ZLIB")
+ + + +
+[docs] +class SeqDataset: + """Labeled sequence dataset for Tensorflow. + + Args: + data_dir (str): Dataset directory. + split_label (str): Dataset split, e.g. train, valid, test. + batch_size (int): Batch size. + shuffle_buffer (int): Shuffle buffer size. Defaults to 128. + seq_length_crop (int): Sequence length to crop from sides. Defaults to 0. + mode (str): Dataset mode, e.g. train/eval. Defaults to 'eval'. + tfr_pattern (str): TFRecord pattern to glob. Defaults to split_label. + targets_slice_file (str): Targets table from which to slice a target subset. + """ + + def __init__( + self, + data_dir: str, + split_label: str, + batch_size: int, + shuffle_buffer: int = 128, + seq_length_crop: int = 0, + mode: str = "eval", + tfr_pattern: str = None, + targets_slice_file: str = None, + ): + self.data_dir = data_dir + self.split_label = split_label + self.batch_size = batch_size + self.shuffle_buffer = shuffle_buffer + self.seq_length_crop = seq_length_crop + self.mode = mode + self.tfr_pattern = tfr_pattern + + # read data parameters + data_stats_file = "%s/statistics.json" % self.data_dir + with open(data_stats_file) as data_stats_open: + data_stats = json.load(data_stats_open) + self.seq_length = data_stats["seq_length"] + + # set defaults + self.seq_depth = data_stats.get("seq_depth", 4) + self.seq_1hot = data_stats.get("seq_1hot", False) + self.target_length = data_stats["target_length"] + self.num_targets = data_stats["num_targets"] + self.pool_width = data_stats["pool_width"] + + # slice targets + if targets_slice_file is None: + self.targets_slice = None + else: + targets_df = pd.read_csv(targets_slice_file, index_col=0, sep="\t") + self.targets_slice = np.array(targets_df.index) + + # extract or compute sequence statistics + if self.tfr_pattern is None: + self.tfr_path = "%s/tfrecords/%s-*.tfr" % (self.data_dir, self.split_label) + self.num_seqs = data_stats["%s_seqs" % self.split_label] + else: + self.tfr_path = "%s/tfrecords/%s" % (self.data_dir, self.tfr_pattern) + self.compute_stats() + + # make tf.data.Dataset object + self.make_dataset() + +
+[docs] + def batches_per_epoch(self): + """Compute number of batches per epoch.""" + return self.num_seqs // self.batch_size
+ + +
+[docs] + def distribute(self, strategy): + """Wrap Dataset to distribute across devices.""" + self.dataset = strategy.experimental_distribute_dataset(self.dataset)
+ + +
+[docs] + def generate_parser(self, raw: bool = False): + """Generate parser function for TFRecordDataset.""" + + def parse_proto(example_protos): + """Parse TFRecord protobuf.""" + + # define features + features = { + TFR_INPUT: tf.io.FixedLenFeature([], tf.string), + TFR_OUTPUT: tf.io.FixedLenFeature([], tf.string), + } + + # parse example into features + parsed_features = tf.io.parse_single_example( + example_protos, features=features + ) + + # decode sequence + sequence = tf.io.decode_raw(parsed_features[TFR_INPUT], tf.uint8) + if not raw: + if self.seq_1hot: + sequence = tf.reshape(sequence, [self.seq_length]) + sequence = tf.one_hot(sequence, 1 + self.seq_depth, dtype=tf.uint8) + sequence = sequence[:, :-1] # drop N + else: + sequence = tf.reshape(sequence, [self.seq_length, self.seq_depth]) + if self.seq_length_crop > 0: + crop_len = (self.seq_length - self.seq_length_crop) // 2 + sequence = sequence[crop_len:-crop_len, :] + sequence = tf.cast(sequence, tf.float32) + + # decode targets + targets = tf.io.decode_raw(parsed_features[TFR_OUTPUT], tf.float16) + if not raw: + targets = tf.reshape(targets, [self.target_length, self.num_targets]) + targets = tf.cast(targets, tf.float32) + if self.targets_slice is not None: + targets = targets[:, self.targets_slice] + + return sequence, targets + + return parse_proto
+ + +
+[docs] + def make_dataset(self, cycle_length=4): + """Make tf.data.Dataset w/ transformations.""" + + # initialize dataset from TFRecords glob + tfr_files = natsorted(glob.glob(self.tfr_path)) + if tfr_files: + dataset = tf.data.Dataset.from_tensor_slices(tfr_files) + else: + print("Cannot order TFRecords %s" % self.tfr_path, file=sys.stderr) + dataset = tf.data.Dataset.list_files(self.tfr_path) + + # train + if self.mode == "train": + # repeat + dataset = dataset.repeat() + + # interleave files + dataset = dataset.interleave( + map_func=file_to_records, + cycle_length=cycle_length, + num_parallel_calls=tf.data.experimental.AUTOTUNE, + ) + + # shuffle + dataset = dataset.shuffle( + buffer_size=self.shuffle_buffer, reshuffle_each_iteration=True + ) + + # valid/test + else: + # flat mix files + dataset = dataset.flat_map(file_to_records) + + # map parser across files + dataset = dataset.map(self.generate_parser()) + + # batch + dataset = dataset.batch(self.batch_size) + + # prefetch + dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) + + # hold on + self.dataset = dataset
+ + +
+[docs] + def compute_stats(self): + """Iterate over the TFRecords to count sequences, and infer + seq_depth and num_targets.""" + with tf.name_scope("stats"): + # read TF Records + dataset = tf.data.Dataset.list_files(self.tfr_path) + dataset = dataset.flat_map(file_to_records) + dataset = dataset.map(self.generate_parser(raw=True)) + dataset = dataset.batch(1) + + self.num_seqs = 0 + if self.num_targets is not None: + targets_nonzero = np.zeros(self.num_targets, dtype="bool") + + for seq_raw, targets_raw in dataset: + # infer seq_depth + seq_1hot = seq_raw.numpy().reshape((self.seq_length, -1)) + + # infer num_targets + targets1 = targets_raw.numpy().reshape(self.target_length, -1) + if self.num_targets is None: + self.num_targets = targets1.shape[-1] + targets_nonzero = (targets1 != 0).sum(axis=0) > 0 + else: + assert self.num_targets == targets1.shape[-1] + targets_nonzero = np.logical_or( + targets_nonzero, (targets1 != 0).sum(axis=0) > 0 + ) + + # count sequences + self.num_seqs += 1 + + # warn user about nonzero targets + if self.num_seqs > 0: + self.num_targets_nonzero = (targets_nonzero > 0).sum() + print( + "%s has %d sequences with %d/%d targets" + % ( + self.tfr_path, + self.num_seqs, + self.num_targets_nonzero, + self.num_targets, + ), + flush=True, + ) + else: + self.num_targets_nonzero = None + print( + "%s has %d sequences with 0 targets" % (self.tfr_path, self.num_seqs), + flush=True, + )
+ + +
+[docs] + def numpy( + self, + return_inputs=True, + return_outputs=True, + step=1, + target_slice=None, + dtype="float16", + ): + """Convert TFR inputs and/or outputs to numpy arrays.""" + with tf.name_scope("numpy"): + # initialize dataset from TFRecords glob + tfr_files = natsorted(glob.glob(self.tfr_path)) + if tfr_files: + # dataset = tf.data.Dataset.list_files(tf.constant(tfr_files), shuffle=False) + dataset = tf.data.Dataset.from_tensor_slices(tfr_files) + else: + print("Cannot order TFRecords %s" % self.tfr_path, file=sys.stderr) + dataset = tf.data.Dataset.list_files(self.tfr_path) + + # read TF Records + dataset = dataset.flat_map(file_to_records) + dataset = dataset.map(self.generate_parser(raw=True)) + dataset = dataset.batch(1) + + # initialize inputs and outputs + seqs_1hot = [] + targets = [] + + # collect inputs and outputs + for seq_raw, targets_raw in dataset: + # sequence + if return_inputs: + seq_1hot = seq_raw.numpy().reshape((self.seq_length, -1)) + if self.seq_length_crop > 0: + crop_len = (self.seq_length - self.seq_length_crop) // 2 + seq_1hot = seq_1hot[crop_len:-crop_len, :] + seqs_1hot.append(seq_1hot) + + # targets + if return_outputs: + targets1 = targets_raw.numpy().astype(dtype) + targets1 = np.reshape(targets1, (self.target_length, -1)) + if target_slice is not None: + targets1 = targets1[:, target_slice] + if step > 1: + step_i = np.arange(0, self.target_length, step) + targets1 = targets1[step_i, :] + targets.append(targets1) + + # make arrays + seqs_1hot = np.array(seqs_1hot) + targets = np.array(targets, dtype=dtype) + + # return + if return_inputs and return_outputs: + return seqs_1hot, targets + elif return_inputs: + return seqs_1hot + else: + return targets
+
+ + + +
+[docs] +def targets_prep_strand(targets_df): + """Adjust targets table for merged stranded datasets. + + Args: + targets_df: pandas DataFrame of targets + + Returns: + targets_df: pandas DataFrame of targets, with stranded + targets collapsed into a single row + """ + # attach strand + targets_strand = [] + for _, target in targets_df.iterrows(): + if target.strand_pair == target.name: + targets_strand.append(".") + else: + targets_strand.append(target.identifier[-1]) + targets_df["strand"] = targets_strand + + # collapse stranded + strand_mask = targets_df.strand != "-" + targets_strand_df = targets_df[strand_mask] + + return targets_strand_df
+ + + +
+[docs] +def untransform_preds(preds, targets_df, unscale=False): + """Undo the squashing transformations performed for the tasks. + + Args: + preds (np.array): Predictions LxT. + targets_df (pd.DataFrame): Targets information table. + + Returns: + preds (np.array): Untransformed predictions LxT. + """ + # clip soft + cs = np.expand_dims(np.array(targets_df.clip_soft), axis=0) + preds_unclip = cs - 1 + (preds - cs + 1) ** 2 + preds = np.where(preds > cs, preds_unclip, preds) + + # sqrt + sqrt_mask = np.array([ss.find("_sqrt") != -1 for ss in targets_df.sum_stat]) + preds[:, sqrt_mask] = -1 + (preds[:, sqrt_mask] + 1) ** 2 # (4 / 3) + + # scale + if unscale: + scale = np.expand_dims(np.array(targets_df.scale), axis=0) + preds = preds / scale + + return preds
+ + + +
+[docs] +def untransform_preds1(preds, targets_df, unscale=False): + """Undo the squashing transformations performed for the tasks. + + Args: + preds (np.array): Predictions LxT. + targets_df (pd.DataFrame): Targets information table. + + Returns: + preds (np.array): Untransformed predictions LxT. + """ + # scale + scale = np.expand_dims(np.array(targets_df.scale), axis=0) + preds = preds / scale + + # clip soft + cs = np.expand_dims(np.array(targets_df.clip_soft), axis=0) + preds_unclip = cs + (preds - cs) ** 2 + preds = np.where(preds > cs, preds_unclip, preds) + + # ** 0.75 + sqrt_mask = np.array([ss.find("_sqrt") != -1 for ss in targets_df.sum_stat]) + preds[:, sqrt_mask] = (preds[:, sqrt_mask]) ** (4 / 3) + + # unscale + if not unscale: + preds = preds * scale + + return preds
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/baskerville/dna.html b/_modules/baskerville/dna.html new file mode 100644 index 0000000..703d54a --- /dev/null +++ b/_modules/baskerville/dna.html @@ -0,0 +1,501 @@ + + + + + + baskerville.dna — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for baskerville.dna

+# Copyright 2023 Calico LLC
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     https://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =========================================================================
+import random
+import sys
+
+import numpy as np
+
+"""
+dna.py
+
+Basic methods to interact with DNA sequences.
+"""
+
+
+
+[docs] +def dna_rc(seq: str): + """Reverse complement a DNA sequence. + + Args: + seq (str): DNA sequence. + + Returns: + Reverse complement of the input sequence. + """ + return seq.translate(str.maketrans("ATCGatcg", "TAGCtagc"))[::-1]
+ + + +
+[docs] +def dna_1hot( + seq: str, seq_len: int = None, n_uniform: bool = False, n_sample: bool = False +): + """Convert a DNA sequence to a 1-hot encoding. + + Args: + seq (str): DNA sequence. + seq_len (int): length to extend/trim sequences to. + n_uniform (bool): represent N's as 0.25, forcing float16, + n_sample (bool): sample ACGT for N + + Returns: + seq_code (np.array): 1-hot encoding of DNA sequence. + """ + if seq_len is None: + seq_len = len(seq) + seq_start = 0 + else: + if seq_len <= len(seq): + # trim the sequence + seq_trim = (len(seq) - seq_len) // 2 + seq = seq[seq_trim : seq_trim + seq_len] + seq_start = 0 + else: + seq_start = (seq_len - len(seq)) // 2 + + seq = seq.upper() + + # map nt's to a matrix len(seq)x4 of 0's and 1's. + if n_uniform: + seq_code = np.zeros((seq_len, 4), dtype="float16") + else: + seq_code = np.zeros((seq_len, 4), dtype="bool") + + for i in range(seq_len): + if i >= seq_start and i - seq_start < len(seq): + nt = seq[i - seq_start] + if nt == "A": + seq_code[i, 0] = 1 + elif nt == "C": + seq_code[i, 1] = 1 + elif nt == "G": + seq_code[i, 2] = 1 + elif nt == "T": + seq_code[i, 3] = 1 + else: + if n_uniform: + seq_code[i, :] = 0.25 + elif n_sample: + ni = random.randint(0, 3) + seq_code[i, ni] = 1 + + return seq_code
+ + + +
+[docs] +def dna_1hot_index(seq: str, n_sample: bool = False): + """Convert a DNA sequence to an index encoding. + + Args: + seq (str): DNA sequence. + n_sample (bool): sample ACGT for N + + Returns: + seq_code (np.array): Index encoding of DNA sequence. + """ + seq_len = len(seq) + seq = seq.upper() + + # map nt's to a len(seq) of 0,1,2,3 + seq_code = np.zeros(seq_len, dtype="uint8") + + for i in range(seq_len): + nt = seq[i] + if nt == "A": + seq_code[i] = 0 + elif nt == "C": + seq_code[i] = 1 + elif nt == "G": + seq_code[i] = 2 + elif nt == "T": + seq_code[i] = 3 + else: + if n_sample: + seq_code[i] = random.randint(0, 3) + else: + seq_code[i] = 4 + + return seq_code
+ + + +
+[docs] +def hot1_augment(Xb, fwdrc: bool = True, shift: int = 0): + """Transform a batch of one hot coded sequences to augment training. + + Args: + Xb (np.array): Batch x Length x 4 one hot coded sequences. + fwdrc (bool): Representing forward versus reverse complement strand. + shift (int): Shift sequences by this many positions. + + Returns: + Xbt (np.array): Transformed batch of sequences. + """ + if Xb.ndim == 2: + singleton = True + Xb = np.expand_dims(Xb, axis=0) + else: + singleton = False + + if Xb.dtype == bool: + nval = 0 + else: + nval = 0.25 + + if shift == 0: + Xbt = Xb + + elif shift > 0: + Xbt = np.zeros(Xb.shape, dtype=Xb.dtype) + + # fill in left unknowns + Xbt[:, :shift, :] = nval + + # fill in sequence + Xbt[:, shift:, :] = Xb[:, :-shift, :] + # e.g. + # Xbt[:,1:,] = Xb[:,:-1,:] + + elif shift < 0: + Xbt = np.zeros(Xb.shape) + + # fill in right unknowns + Xbt[:, shift:, :] = nval + + # fill in sequence + Xbt[:, :shift, :] = Xb[:, -shift:, :] + # e.g. + # Xb_shift[:,:-1,:] = Xb[:,1:,:] + + if not fwdrc: + Xbt = hot1_rc(Xbt) + + if singleton: + Xbt = Xbt[0] + + return Xbt
+ + + +
+[docs] +def hot1_delete(seq_1hot, pos: int, delete_len: int, pad_value=None): + """Delete nucleotides starting at a given position + in the Lx4 1-hot encoded sequence. + + Args: + seq_1hot (np.array): 1-hot encoded sequence. + pos (int): Position to start deleting. + delete_len (int): Number of nucleotides to delete. + pad_value (float): Value to pad the end with. + + Returns: + seq_1hot (np.array): In-place transformed sequence. + """ + # shift left + seq_1hot[pos:-delete_len, :] = seq_1hot[pos + delete_len :, :] + # e.g. + # seq_1hot[100:-3,:] = seq_1hot[100+3:,:] + + # change right end to N's + if pad_value is None: + if seq_1hot.dtype == bool: + pad_value = 0 + else: + pad_value = 0.25 + + seq_1hot[-delete_len:, :4] = pad_value
+ + + +
+[docs] +def hot1_dna(seqs_1hot): + """Convert 1-hot coded sequences to ACGTN. + + Args: + seq_1hot (np.array): 1-hot encoded sequences. + + Returns: + seqs [str]: List of DNA sequences. + """ + + singleton = False + if seqs_1hot.ndim == 2: + singleton = True + seqs_1hot = np.expand_dims(seqs_1hot, 0) + + seqs = [] + for si in range(seqs_1hot.shape[0]): + seq_list = ["A"] * seqs_1hot.shape[1] + for li in range(seqs_1hot.shape[1]): + if seqs_1hot[si, li, 0] == 1: + seq_list[li] = "A" + elif seqs_1hot[si, li, 1] == 1: + seq_list[li] = "C" + elif seqs_1hot[si, li, 2] == 1: + seq_list[li] = "G" + elif seqs_1hot[si, li, 3] == 1: + seq_list[li] = "T" + else: + seq_list[li] = "N" + + seqs.append("".join(seq_list)) + + if singleton: + seqs = seqs[0] + + return seqs
+ + + +
+[docs] +def hot1_get(seqs_1hot, pos: int): + """Return the nucleotide corresponding to the one hot coding + of position "pos" in the Lx4 array seqs_1hot. + + Args: + seqs_1hot (np.array): 1-hot encoded sequences. + pos (int): Position to get nucleotide. + + Returns: + nt (str): Nucleotide. + """ + if seqs_1hot[pos, 0] == 1: + nt = "A" + elif seqs_1hot[pos, 1] == 1: + nt = "C" + elif seqs_1hot[pos, 2] == 1: + nt = "G" + elif seqs_1hot[pos, 3] == 1: + nt = "T" + else: + nt = "N" + return nt
+ + + +
+[docs] +def hot1_insert(seq_1hot, pos: int, insert_seq: str): + """Insert sequence at a given position in the 1-hot encoded sequence. + + Args: + seq_1hot (np.array): 1-hot encoded sequence. + pos (int): Position to insert sequence. + insert_seq (str): Sequence to insert. + + Returns: + seq_1hot (np.array): In-place transformed sequence. + """ + # shift right + seq_1hot[pos + len(insert_seq) :, :] = seq_1hot[pos : -len(insert_seq), :] + # e.g. + # seq_1hot[100+3:,:] = seq_1hot[100:-3,:] + + # reset + seq_1hot[pos : pos + len(insert_seq), :4] = 0 + + for i in range(len(insert_seq)): + nt = insert_seq[i] + + # set + if nt == "A": + seq_1hot[pos + i, 0] = 1 + elif nt == "C": + seq_1hot[pos + i, 1] = 1 + elif nt == "G": + seq_1hot[pos + i, 2] = 1 + elif nt == "T": + seq_1hot[pos + i, 3] = 1 + else: + print("Invalid nucleotide insert %s" % nt, file=sys.stderr)
+ + + +
+[docs] +def hot1_rc(seqs_1hot): + """Reverse complement a batch of one hot coded sequences, + while being robust to additional tracks beyond the four + nucleotides. + + Args: + seqs_1hot (np.array): 1-hot encoded sequences. + + Returns: + seqs_1hot_rc (np.array): Reverse complemented sequences. + """ + if seqs_1hot.ndim == 2: + singleton = True + seqs_1hot = np.expand_dims(seqs_1hot, axis=0) + else: + singleton = False + + seqs_1hot_rc = seqs_1hot.copy() + + # reverse + seqs_1hot_rc = seqs_1hot_rc[:, ::-1, :] + + # swap A and T + seqs_1hot_rc[:, :, [0, 3]] = seqs_1hot_rc[:, :, [3, 0]] + + # swap C and G + seqs_1hot_rc[:, :, [1, 2]] = seqs_1hot_rc[:, :, [2, 1]] + + if singleton: + seqs_1hot_rc = seqs_1hot_rc[0] + + return seqs_1hot_rc
+ + + +
+[docs] +def hot1_set(seq_1hot, pos: int, nt: str): + """Set position in a 1-hot encoded sequence to given nucleotide. + + Args: + seq_1hot (np.array): 1-hot encoded sequence. + pos (int): Position to set nucleotide. + nt (str): Nucleotide to set. + + Returns: + seq_1hot (np.array): In-place transformed sequence. + """ + # reset + seq_1hot[pos, :4] = 0 + + # set + if nt == "A": + seq_1hot[pos, 0] = 1 + elif nt == "C": + seq_1hot[pos, 1] = 1 + elif nt == "G": + seq_1hot[pos, 2] = 1 + elif nt == "T": + seq_1hot[pos, 3] = 1 + else: + print("Invalid nucleotide set %s" % nt, file=sys.stderr)
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/baskerville/gene.html b/_modules/baskerville/gene.html new file mode 100644 index 0000000..a96b3ef --- /dev/null +++ b/_modules/baskerville/gene.html @@ -0,0 +1,366 @@ + + + + + + baskerville.gene — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for baskerville.gene

+# Copyright 2022 Calico LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =========================================================================
+
+from intervaltree import IntervalTree
+import numpy as np
+import pybedtools
+
+
+
+[docs] +class GenomicInterval: + def __init__(self, start, end, chrom=None, strand=None): + self.start = start + self.end = end + self.chrom = chrom + self.strand = strand + + def __eq__(self, other): + return self.start == other.start + + def __lt__(self, other): + return self.start < other.start + + def __cmp__(self, x): + if self.start < x.start: + return -1 + elif self.start > x.start: + return 1 + else: + return 0 + + def __str__(self): + if self.chrom is None: + label = "[%d-%d]" % (self.start, self.end) + else: + label = "%s:%d-%d" % (self.chrom, self.start, self.end) + return label
+ + + +
+[docs] +class Gene: + """Class for managing genes in an isoform-agnostic way, taking + the union of exons across isoforms.""" + + def __init__(self, chrom, strand, kv): + self.chrom = chrom + self.strand = strand + self.kv = kv + self.exons = IntervalTree() + +
+[docs] + def add_exon(self, start, end): + """BED 0-indexing assumed.""" + self.exons[start:end] = True
+ + +
+[docs] + def get_exons(self): + self.exons.merge_overlaps() + return sorted(self.exons)
+ + +
+[docs] + def midpoint(self): + positions = [] + for exon in self.get_exons(): + positions += range(exon.begin, exon.end) + midp = int(np.mean(positions)) + return midp
+ + +
+[docs] + def span(self): + exon_starts = [exon.begin for exon in self.exons] + exon_ends = [exon.end for exon in self.exons] + return min(exon_starts), max(exon_ends)
+ + +
+[docs] + def output_slice(self, seq_start, seq_len, model_stride, span=False): + gene_slice = [] + + if span: + gene_start, gene_end = self.span() + + # clip left boundaries + gene_seq_start = max(0, gene_start - seq_start) + gene_seq_end = max(0, gene_end - seq_start) + + # requires >50% overlap + slice_start = int(np.round(gene_seq_start / model_stride)) + slice_end = int(np.round(gene_seq_end / model_stride)) + + # clip right boundaries + slice_max = int(seq_len / model_stride) + slice_start = min(slice_start, slice_max) + slice_end = min(slice_end, slice_max) + + gene_slice = range(slice_start, slice_end) + + else: + for exon in self.get_exons(): + # clip left boundaries + exon_seq_start = max(0, exon.begin - seq_start) + exon_seq_end = max(0, exon.end - seq_start) + + # requires >50% overlap + slice_start = int(np.round(exon_seq_start / model_stride)) + slice_end = int(np.round(exon_seq_end / model_stride)) + + # clip right boundaries + slice_max = int(seq_len / model_stride) + slice_start = min(slice_start, slice_max) + slice_end = min(slice_end, slice_max) + + gene_slice.extend(range(slice_start, slice_end)) + + return np.array(gene_slice)
+
+ + + +
+[docs] +class Transcriptome: + def __init__(self, gtf_file): + self.genes = {} + self.read_gtf(gtf_file) + +
+[docs] + def read_gtf(self, gtf_file): + if gtf_file[-3:] == ".gz": + gtf_in = gzip.open(gtf_file, "rt") + else: + gtf_in = open(gtf_file) + + # ignore header + line = gtf_in.readline() + while line[0] == "#": + line = gtf_in.readline() + + while line: + a = line.split("\t") + if a[2] == "exon": + chrom = a[0] + start = int(a[3]) + end = int(a[4]) + strand = a[6] + kv = gtf_kv(a[8]) + gene_id = kv["gene_id"] + + # initialize gene + if gene_id not in self.genes: + self.genes[gene_id] = Gene(chrom, strand, kv) + + # add exon + self.genes[gene_id].add_exon(start - 1, end) + + line = gtf_in.readline() + + gtf_in.close()
+ + +
+[docs] + def bedtool_exon(self): + # assemble sequence bedtool + bed_lines = [] + for gene_id, gene in self.genes.items(): + for exon in gene.get_exons(): + exon_line = "%s %d %d %s . %s" % ( + gene.chrom, + exon.begin, + exon.end, + gene_id, + gene.strand, + ) + bed_lines.append(exon_line) + genes_bedt = pybedtools.BedTool("\n".join(bed_lines), from_string=True) + return genes_bedt
+ + +
+[docs] + def bedtool_span(self): + # assemble sequence bedtool + bed_lines = [] + for gene_id, gene in self.genes.items(): + gene_start, gene_end = gene.span() + span_line = "%s %d %d %s . %s" % ( + gene.chrom, + gene_start, + gene_end, + gene_id, + gene.strand, + ) + bed_lines.append(span_line) + genes_bedt = pybedtools.BedTool("\n".join(bed_lines), from_string=True) + return genes_bedt
+ + +
+[docs] + def write_bed_exon(self, bed_file): + pass
+ + +
+[docs] + def write_bed_span(self, bed_file): + pass
+
+ + + +################################################################################ +# Methods +################################################################################ +
+[docs] +def gtf_kv(s): + """Convert the last gtf section of key/value pairs into a dict.""" + d = {} + + a = s.split(";") + for key_val in a: + if key_val.strip(): + eq_i = key_val.find("=") + if eq_i != -1 and key_val[eq_i - 1] != '"': + kvs = key_val.split("=") + else: + kvs = key_val.split() + + key = kvs[0] + if kvs[1][0] == '"' and kvs[-1][-1] == '"': + val = (" ".join(kvs[1:]))[1:-1].strip() + else: + val = (" ".join(kvs[1:])).strip() + + d[key] = val + + return d
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/baskerville/layers.html b/_modules/baskerville/layers.html new file mode 100644 index 0000000..7927308 --- /dev/null +++ b/_modules/baskerville/layers.html @@ -0,0 +1,1504 @@ + + + + + + baskerville.layers — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for baskerville.layers

+# Copyright 2023 Calico LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =========================================================================
+import pdb
+import sys
+from typing import Optional, List
+
+import numpy as np
+import tensorflow as tf
+
+gpu_devices = tf.config.experimental.list_physical_devices("GPU")
+for device in gpu_devices:
+    tf.config.experimental.set_memory_growth(device, True)
+
+############################################################
+# Basic
+############################################################
+
+
+
+[docs] +class Scale(tf.keras.layers.Layer): + """Scale the input by a learned value. + + Args: + axis (int or [int]): Axis/axes along which to scale. + initializer: Initializer for the scale weight. + """ + + def __init__(self, axis=-1, initializer="zeros"): + super(Scale, self).__init__() + if isinstance(axis, (list, tuple)): + self.axis = axis[:] + elif isinstance(axis, int): + self.axis = axis + else: + raise TypeError( + "Expected an int or a list/tuple of ints for the " + "argument 'axis', but received: %r" % axis + ) + self.initializer = tf.keras.initializers.get(initializer) + +
+[docs] + def build(self, input_shape): + # input_shape = tensor_shape.TensorShape(input_shape) + if not input_shape.ndims: + raise ValueError("Input has undefined rank.") + ndims = len(input_shape) + + # Convert axis to list and resolve negatives + if isinstance(self.axis, int): + self.axis = [self.axis] + elif isinstance(self.axis, tuple): + self.axis = list(self.axis) + for idx, x in enumerate(self.axis): + if x < 0: + self.axis[idx] = ndims + x + + # Validate axes + for x in self.axis: + if x < 0 or x >= ndims: + raise ValueError("Invalid axis: %d" % x) + if len(self.axis) != len(set(self.axis)): + raise ValueError("Duplicate axis: {}".format(tuple(self.axis))) + + param_shape = [input_shape[dim] for dim in self.axis] + + self.scale = self.add_weight( + name="scale", + shape=param_shape, + initializer=self.initializer, + trainable=True, + )
+ + +
+[docs] + def call(self, x): + return x * self.scale
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config.update( + { + "axis": self.axis, + "initializer": tf.keras.initializers.serialize(self.initializer), + } + ) + return config
+
+ + + +
+[docs] +class Softplus(tf.keras.layers.Layer): + """Safe softplus, clipping large values.""" + + def __init__(self, exp_max=10000): + super(Softplus, self).__init__() + self.exp_max = exp_max + +
+[docs] + def call(self, x): + x = tf.clip_by_value(x, -self.exp_max, self.exp_max) + return tf.keras.activations.softplus(x)
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config["exp_max"] = self.exp_max + return config
+
+ + + +############################################################ +# Center ops +############################################################ + + +
+[docs] +class CenterSlice(tf.keras.layers.Layer): + """Scale the input by a learned value. + + Args: + axis (int or [int]): Axis/axes along which to scale. + initializer: Initializer for the scale weight. + """ + + def __init__(self, center): + super(CenterSlice, self).__init__() + self.center = center + +
+[docs] + def call(self, x): + seq_len = x.shape[1] + center_start = (seq_len - self.center) // 2 + center_end = center_start + self.center + return x[:, center_start:center_end, :]
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config.update({"center": self.center}) + return config
+
+ + + +
+[docs] +class CenterAverage(tf.keras.layers.Layer): + """Average the center of the input. + + Args: + center (int): Length of the center slice. + """ + + def __init__(self, center): + super(CenterAverage, self).__init__() + self.center = center + self.slice = CenterSlice(self.center) + +
+[docs] + def call(self, x): + return tf.keras.backend.mean(self.slice(x), axis=1, keepdims=True)
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config.update({"center": self.center}) + return config
+
+ + + +
+[docs] +class LengthAverage(tf.keras.layers.Layer): + """Average across a variable length sequence.""" + + def __init__(self): + super(LengthAverage, self).__init__() + +
+[docs] + def call(self, x, seq): + # focus on nt's + seq = seq[..., :4] + + # collapse nt axis + seq_pos = tf.math.reduce_sum(seq, axis=-1) + + # collapse length axis + seq_len = tf.math.reduce_sum(seq_pos, axis=-1) + seq_len = tf.expand_dims(seq_len, axis=-1) + + # sum across length + x_sum = tf.math.reduce_mean(x, axis=-2) + + # divide by true sequence length + x_avg = x_sum / seq_len + + return x_avg
+
+ + + +############################################################ +# Attention +############################################################ + + +def _prepend_dims(x, num_dims): + return tf.reshape(x, shape=[1] * num_dims + x.shape) + + +
+[docs] +def positional_features_central_mask( + positions: tf.Tensor, feature_size: int, seq_length: int +): + """Positional features using a central mask (allow only central features).""" + pow_rate = np.exp(np.log(seq_length + 1) / feature_size).astype("float32") + center_widths = tf.pow(pow_rate, tf.range(1, feature_size + 1, dtype=tf.float32)) + center_widths = center_widths - 1 + center_widths = _prepend_dims(center_widths, positions.shape.rank) + outputs = tf.cast(center_widths > tf.abs(positions)[..., tf.newaxis], tf.float32) + tf.TensorShape(outputs.shape).assert_is_compatible_with( + positions.shape + [feature_size] + ) + return outputs
+ + + +
+[docs] +def positional_features( + positions: tf.Tensor, feature_size: int, seq_length: int, symmetric=False +): + """Compute relative positional encodings/features. + + Each positional feature function will compute/provide the same fraction of + features, making up the total of feature_size. + + Args: + positions: Tensor of relative positions of arbitrary shape. + feature_size: Total number of basis functions. + seq_length: Sequence length denoting the characteristic length that + the individual positional features can use. This is required since the + parametrization of the input features should be independent of `positions` + while it could still require to use the total number of features. + symmetric: If True, the resulting features will be symmetric across the + relative position of 0 (i.e. only absolute value of positions will + matter). If false, then both the symmetric and asymmetric version + (symmetric multiplied by sign(positions)) of the features will be used. + + Returns: + Tensor of shape: `positions.shape + (feature_size,)`. + """ + if symmetric: + num_components = 1 + else: + num_components = 2 + num_basis_per_class = feature_size // num_components + + embeddings = positional_features_central_mask( + positions, num_basis_per_class, seq_length + ) + + if not symmetric: + embeddings = tf.concat( + [embeddings, tf.sign(positions)[..., tf.newaxis] * embeddings], axis=-1 + ) + + tf.TensorShape(embeddings.shape).assert_is_compatible_with( + positions.shape + [feature_size] + ) + + return embeddings
+ + + +
+[docs] +def relative_shift(x): + """Shift the relative logits like in TransformerXL.""" + # We prepend zeros on the final timescale dimension. + to_pad = tf.zeros_like(x[..., :1]) + x = tf.concat([to_pad, x], -1) + _, num_heads, t1, t2 = x.shape + x = tf.reshape(x, [-1, num_heads, t2, t1]) + x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1]) + x = tf.reshape(x, [-1, num_heads, t1, t2 - 1]) + x = tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, (t2 + 1) // 2]) + return x
+ + + +
+[docs] +class MultiheadAttention(tf.keras.layers.Layer): + """Multi-head attention.""" + + def __init__( + self, + value_size, + key_size, + heads, + scaling=True, + attention_dropout_rate=0, + relative_position_symmetric=False, + relative_position_functions=["positional_features_central_mask"], + num_position_features=None, + positional_dropout_rate=0, + content_position_bias=True, + zero_initialize=True, + transpose_stride=0, + gated=False, + initializer="he_normal", + l2_scale=0, + qkv_width=1, + ): + """Creates a MultiheadAttention module. + Original version written by Ziga Avsec. + + Args: + value_size: The size of each value embedding per head. + key_size: The size of each key and query embedding per head. + heads: The number of independent queries per timestep. + scaling: Whether to scale the attention logits. + attention_dropout_rate: Dropout rate for attention logits. + relative_position_symmetric: If True, the symmetric version of basis + functions will be used. If False, a symmetric and asymmetric versions + will be use. + relative_position_functions: List of function names used for relative + positional biases. + num_position_features: Number of relative positional features + to compute. If None, `value_size * num_heads` is used. + positional_dropout_rate: Dropout rate for the positional encodings if + relative positions are used. + zero_initialize: if True, the final linear layer will be 0 initialized. + initializer: Initializer for the projection layers. If unspecified, + VarianceScaling is used with scale = 2.0. + """ + super().__init__() + self._value_size = value_size + self._key_size = key_size + self._num_heads = heads + self._attention_dropout_rate = attention_dropout_rate + self._scaling = scaling + self._gated = gated + self._relative_position_symmetric = relative_position_symmetric + self._relative_position_functions = relative_position_functions + if num_position_features is None: + # num_position_features needs to be divisible by the number of + # relative positional functions *2 (for symmetric & asymmetric version). + divisible_by = 2 * len(self._relative_position_functions) + self._num_position_features = ( + self._value_size // divisible_by + ) * divisible_by + else: + self._num_position_features = num_position_features + self._positional_dropout_rate = positional_dropout_rate + self._content_position_bias = content_position_bias + self._l2_scale = l2_scale + self._initializer = initializer + + key_proj_size = self._key_size * self._num_heads + embedding_size = self._value_size * self._num_heads + + if qkv_width == 1: + # standard dense layers + self._q_layer = tf.keras.layers.Dense( + key_proj_size, + name="q_layer", + use_bias=False, + kernel_regularizer=tf.keras.regularizers.l2(self._l2_scale), + kernel_initializer=self._initializer, + ) + self._k_layer = tf.keras.layers.Dense( + key_proj_size, + name="k_layer", + use_bias=False, + kernel_regularizer=tf.keras.regularizers.l2(self._l2_scale), + kernel_initializer=self._initializer, + ) + self._v_layer = tf.keras.layers.Dense( + embedding_size, + name="v_layer", + use_bias=False, + kernel_regularizer=tf.keras.regularizers.l2(self._l2_scale), + kernel_initializer=self._initializer, + ) + else: + # CvT separable convolutions + self._q_layer = tf.keras.layers.SeparableConv1D( + key_proj_size, + kernel_size=qkv_width, + padding="same", + name="q_layer", + use_bias=False, + depthwise_regularizer=tf.keras.regularizers.l2(self._l2_scale), + pointwise_regularizer=tf.keras.regularizers.l2(self._l2_scale), + depthwise_initializer=self._initializer, + pointwise_initializer=self._initializer, + ) + self._k_layer = tf.keras.layers.SeparableConv1D( + key_proj_size, + kernel_size=qkv_width, + padding="same", + name="k_layer", + use_bias=False, + depthwise_regularizer=tf.keras.regularizers.l2(self._l2_scale), + pointwise_regularizer=tf.keras.regularizers.l2(self._l2_scale), + depthwise_initializer=self._initializer, + pointwise_initializer=self._initializer, + ) + self._v_layer = tf.keras.layers.SeparableConv1D( + embedding_size, + kernel_size=qkv_width, + padding="same", + name="v_layer", + use_bias=False, + depthwise_regularizer=tf.keras.regularizers.l2(self._l2_scale), + pointwise_regularizer=tf.keras.regularizers.l2(self._l2_scale), + depthwise_initializer=self._initializer, + pointwise_initializer=self._initializer, + ) + + if self._gated: + self._gate_layer = tf.keras.layers.Dense( + embedding_size, + activation="activation", + name="gate", + use_bias=False, + kernel_regularizer=tf.keras.regularizers.l2(self._l2_scale), + kernel_initializer=self._initializer, + ) + w_init = tf.keras.initializers.Zeros() if zero_initialize else self._initializer + if transpose_stride > 0: + self._embedding_layer = tf.keras.layers.Conv1DTranspose( + filters=embedding_size, + kernel_size=3, + strides=transpose_stride, + padding="same", + kernel_regularizer=tf.keras.regularizers.l2(self._l2_scale), + kernel_initializer=w_init, + ) + else: + self._embedding_layer = tf.keras.layers.Dense( + embedding_size, + name="embedding_layer", + kernel_regularizer=tf.keras.regularizers.l2(self._l2_scale), + kernel_initializer=w_init, + ) + + # Create relative position layers + self._r_k_layer = tf.keras.layers.Dense( + key_proj_size, + name="r_k_layer", + use_bias=False, + kernel_regularizer=tf.keras.regularizers.l2(self._l2_scale), + kernel_initializer=self._initializer, + ) + self._r_w_bias = self.add_weight( + "%s/r_w_bias" % self.name, + shape=[1, self._num_heads, 1, self._key_size], + initializer=self._initializer, + dtype=tf.float32, + ) + self._r_r_bias = self.add_weight( + "%s/r_r_bias" % self.name, + shape=[1, self._num_heads, 1, self._key_size], + initializer=self._initializer, + dtype=tf.float32, + ) + + def _multihead_output(self, linear_layer, inputs): + """Applies a standard linear to inputs and returns multihead output.""" + output = linear_layer(inputs) # [B, T, H * KV] + _, seq_len, num_channels = output.shape + + # Split H * Channels into separate axes. + num_kv_channels = num_channels // self._num_heads + output = tf.reshape( + output, shape=[-1, seq_len, self._num_heads, num_kv_channels] + ) + # [B, T, H, KV] -> [B, H, T, KV] + return tf.transpose(output, [0, 2, 1, 3]) + +
+[docs] + def call(self, inputs, training=False): + # Initialise the projection layers. + embedding_size = self._value_size * self._num_heads + seq_len = inputs.shape[1] + + # Compute q, k and v as multi-headed projections of the inputs. + q = self._multihead_output(self._q_layer, inputs) # [B, H, T, K] + k = self._multihead_output(self._k_layer, inputs) # [B, H, T, K] + v = self._multihead_output(self._v_layer, inputs) # [B, H, T, V] + + # Scale the query by the square-root of key size. + if self._scaling: + q *= self._key_size**-0.5 + + # [B, H, T', T] + content_logits = tf.matmul(q + self._r_w_bias, k, transpose_b=True) + + if self._num_position_features == 0: + logits = content_logits + else: + # Project positions to form relative keys. + distances = tf.range(-seq_len + 1, seq_len, dtype=tf.float32)[tf.newaxis] + positional_encodings = positional_features( + positions=distances, + feature_size=self._num_position_features, + seq_length=seq_len, + symmetric=self._relative_position_symmetric, + ) + # [1, 2T-1, Cr] + + if training: + positional_encodings = tf.nn.dropout( + positional_encodings, rate=self._positional_dropout_rate + ) + + # [1, H, 2T-1, K] + r_k = self._multihead_output(self._r_k_layer, positional_encodings) + + # Add shifted relative logits to content logits. + if self._content_position_bias: + # [B, H, T', 2T-1] + relative_logits = tf.matmul(q + self._r_r_bias, r_k, transpose_b=True) + else: + # [1, H, 1, 2T-1] + relative_logits = tf.matmul(self._r_r_bias, r_k, transpose_b=True) + # [1, H, T', 2T-1] + relative_logits = tf.broadcast_to( + relative_logits, + shape=(1, self._num_heads, seq_len, 2 * seq_len - 1), + ) + + # [B, H, T', T] + relative_logits = relative_shift(relative_logits) + logits = content_logits + relative_logits + + # softmax across length + weights = tf.nn.softmax(logits) + + # Dropout on the attention weights. + if training: + weights = tf.nn.dropout(weights, rate=self._attention_dropout_rate) + + # Transpose and reshape the output. + output = tf.matmul(weights, v) # [B, H, T', V] + output_transpose = tf.transpose(output, [0, 2, 1, 3]) # [B, T', H, V] + attended_inputs = tf.reshape( + output_transpose, shape=[-1, seq_len, embedding_size] + ) + + # Gate + if self._gated: + attended_inputs = self._gate_layer(attended_inputs) + + # Final linear layer + output = self._embedding_layer(attended_inputs) + + return output
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config.update({"value_size": self._value_size, "key_size": self._key_size}) + return config
+
+ + + +
+[docs] +class WheezeExcite(tf.keras.layers.Layer): + def __init__(self, pool_size): + super(WheezeExcite, self).__init__() + self.pool_size = pool_size + assert self.pool_size % 2 == 1 + self.paddings = [[0, 0], [self.pool_size // 2, self.pool_size // 2], [0, 0]] + +
+[docs] + def build(self, input_shape): + self.num_channels = input_shape[-1] + + self.wheeze = tf.keras.layers.AveragePooling1D( + self.pool_size, strides=1, padding="valid" + ) + + self.excite1 = tf.keras.layers.Dense( + units=self.num_channels // 4, activation="relu" + ) + self.excite2 = tf.keras.layers.Dense(units=self.num_channels, activation="relu")
+ + +
+[docs] + def call(self, x): + # pad + x_pad = tf.pad(x, self.paddings, "SYMMETRIC") + + # squeeze + x_squeeze = self.wheeze(x_pad) + + # excite + x_excite = self.excite1(x_squeeze) + x_excite = self.excite2(x_excite) + x_excite = tf.keras.activations.sigmoid(x_excite) + + # scale + xs = x * x_excite + + return xs
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config.update({"pool_size": self.pool_size}) + return config
+
+ + + +
+[docs] +class SqueezeExcite(tf.keras.layers.Layer): + def __init__( + self, + activation="relu", + additive=False, + bottleneck_ratio=8, + norm_type=None, + bn_momentum=0.9, + ): + super(SqueezeExcite, self).__init__() + self.activation = activation + self.additive = additive + self.norm_type = norm_type + self.bn_momentum = bn_momentum + self.bottleneck_ratio = bottleneck_ratio + +
+[docs] + def build(self, input_shape): + self.num_channels = input_shape[-1] + + if len(input_shape) == 3: + self.one_or_two = "one" + self.gap = tf.keras.layers.GlobalAveragePooling1D() + elif len(input_shape) == 4: + self.one_or_two = "two" + self.gap = tf.keras.layers.GlobalAveragePooling2D() + else: + print( + "SqueezeExcite: input dim %d unexpected" % len(input_shape), + file=sys.stderr, + ) + exit(1) + + self.dense1 = tf.keras.layers.Dense( + units=self.num_channels // self.bottleneck_ratio, activation="relu" + ) + self.dense2 = tf.keras.layers.Dense(units=self.num_channels, activation=None)
+ + + # normalize + # if self.norm_type == 'batch-sync': + # self.norm = tf.keras.layers.experimental.SyncBatchNormalization( + # momentum=self.bn_momentum, gamma_initializer='zeros') + # elif self.norm_type == 'batch': + # self.norm = tf.keras.layers.BatchNormalization( + # momentum=self.bn_momentum, gamma_initializer='zeros') + # elif self.norm_type == 'layer': + # self.norm = tf.keras.layers.LayerNormalization( + # gamma_initializer='zeros') + # else: + # self.norm = None + +
+[docs] + def call(self, x): + # activate + x = activate(x, self.activation) + + # squeeze + squeeze = self.gap(x) + + # excite + excite = self.dense1(squeeze) + excite = self.dense2(excite) + # if self.norm is not None: + # excite = self.norm(excite) + + # scale + if self.one_or_two == "one": + excite = tf.reshape(excite, [-1, 1, self.num_channels]) + else: + excite = tf.reshape(excite, [-1, 1, 1, self.num_channels]) + + if self.additive: + xs = x + excite + else: + excite = tf.keras.activations.sigmoid(excite) + xs = x * excite + + return xs
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config.update( + { + "activation": self.activation, + "additive": self.additive, + "norm_type": self.norm_type, + "bn_momentum": self.bn_momentum, + "bottleneck_ratio": self.bottleneck_ratio, + } + ) + return config
+
+ + + +
+[docs] +class GlobalContext(tf.keras.layers.Layer): + def __init__(self): + super(GlobalContext, self).__init__() + +
+[docs] + def build(self, input_shape): + self.num_channels = input_shape[-1] + + self.context_key = tf.keras.layers.Dense(units=1, activation=None) + + self.dense1 = tf.keras.layers.Dense(units=self.num_channels // 4) + self.ln = tf.keras.layers.LayerNormalization() + self.dense2 = tf.keras.layers.Dense(units=self.num_channels)
+ + +
+[docs] + def call(self, x): + # context attention + keys = self.context_key(x) # [batch x length x 1] + attention = tf.keras.activations.softmax(keys, axis=-2) # [batch x length x 1] + + # context summary + context = x * attention # [batch x length x channels] + context = tf.keras.backend.sum( + context, axis=-2, keepdims=True + ) # [batch x 1 x channels] + + # transform + transform = self.dense1(context) # [batch x 1 x channels/4] + transform = tf.keras.activations.relu( + self.ln(transform) + ) # [batch x 1 x channels/4] + transform = self.dense2(transform) # [batch x 1 x channels] + # transform = tf.reshape(transform, [-1,1,self.num_channels]) + + # fusion + xs = x + transform # [batch x length x channels] + + return xs
+
+ + + +############################################################ +# Pooling +############################################################ +
+[docs] +class SoftmaxPool1D(tf.keras.layers.Layer): + """Pooling operation with optional weights.""" + + def __init__( + self, pool_size: int = 2, per_channel: bool = False, init_gain: float = 2.0 + ): + """Softmax pooling. + + Args: + pool_size: Pooling size, same as in Max/AvgPooling. + per_channel: If True, the logits/softmax weights will be computed for + each channel separately. If False, same weights will be used across all + channels. + init_gain: When 0.0 is equivalent to avg pooling, and when + ~2.0 and it's equivalent to max pooling. + """ + super(SoftmaxPool1D, self).__init__() + self.pool_size = pool_size + self.per_channel = per_channel + self.init_gain = init_gain + self.logit_linear = None + +
+[docs] + def build(self, input_shape): + self.num_channels = input_shape[-1] + self.logit_linear = tf.keras.layers.Dense( + units=self.num_channels if self.per_channel else 1, + use_bias=False, + kernel_initializer=tf.keras.initializers.Identity(self.init_gain), + )
+ + +
+[docs] + def call(self, inputs): + _, seq_length, num_channels = inputs.shape + inputs = tf.reshape( + inputs, (-1, seq_length // self.pool_size, self.pool_size, num_channels) + ) + return tf.reduce_sum( + inputs * tf.nn.softmax(self.logit_linear(inputs), axis=-2), axis=-2 + )
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config.update({"pool_size": self.pool_size, "init_gain": self.init_gain}) + return config
+
+ + + +############################################################ +# Position +############################################################ +
+[docs] +class ConcatPosition(tf.keras.layers.Layer): + """Concatenate position to 1d feature vectors.""" + + def __init__(self, transform=None, power=1): + super(ConcatPosition, self).__init__() + self.transform = transform + self.power = power + +
+[docs] + def call(self, inputs): + input_shape = tf.shape(inputs) + batch_size, seq_len = input_shape[0], input_shape[1] + + pos_range = tf.range(-seq_len // 2, seq_len // 2) + if self.transform is None: + pos_feature = pos_range + elif self.transform == "abs": + pos_feature = tf.math.abs(pos_range) + elif self.transform == "reversed": + pos_feature = pos_range[::-1] + else: + raise ValueError("Unknown ConcatPosition transform.") + + if self.power != 1: + pos_feature = tf.pow(pos_feature, self.power) + pos_feature = tf.expand_dims(pos_feature, axis=0) + pos_feature = tf.expand_dims(pos_feature, axis=-1) + pos_feature = tf.tile(pos_feature, [batch_size, 1, 1]) + pos_feature = tf.dtypes.cast(pos_feature, dtype=tf.float32) + + return tf.concat([pos_feature, inputs], axis=-1)
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config.update({"transform": self.transform, "power": self.power}) + return config
+
+ + + +############################################################ +# 2D +############################################################ +
+[docs] +class OneToTwo(tf.keras.layers.Layer): + """Transform 1d to 2d with i,j vectors operated on.""" + + def __init__(self, operation="mean"): + super(OneToTwo, self).__init__() + self.operation = operation.lower() + valid_operations = ["concat", "mean", "max", "multipy", "multiply1"] + assert self.operation in valid_operations + +
+[docs] + def call(self, oned): + _, seq_len, features = oned.shape + + twod1 = tf.tile(oned, [1, seq_len, 1]) + twod1 = tf.reshape(twod1, [-1, seq_len, seq_len, features]) + twod2 = tf.transpose(twod1, [0, 2, 1, 3]) + + if self.operation == "concat": + twod = tf.concat([twod1, twod2], axis=-1) + + elif self.operation == "multiply": + twod = tf.multiply(twod1, twod2) + + elif self.operation == "multiply1": + twod = tf.multiply(twod1 + 1, twod2 + 1) - 1 + + else: + twod1 = tf.expand_dims(twod1, axis=-1) + twod2 = tf.expand_dims(twod2, axis=-1) + twod = tf.concat([twod1, twod2], axis=-1) + + if self.operation == "mean": + twod = tf.reduce_mean(twod, axis=-1) + + elif self.operation == "max": + twod = tf.reduce_max(twod, axis=-1) + + return twod
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config["operation"] = self.operation + return config
+
+ + + +
+[docs] +class ConcatDist2D(tf.keras.layers.Layer): + """Concatenate the pairwise distance to 2d feature matrix.""" + + def __init__(self): + super(ConcatDist2D, self).__init__() + +
+[docs] + def call(self, inputs): + input_shape = tf.shape(inputs) + batch_size, seq_len = input_shape[0], input_shape[1] + + ## concat 2D distance ## + pos = tf.expand_dims(tf.range(0, seq_len), axis=-1) + matrix_repr1 = tf.tile(pos, [1, seq_len]) + matrix_repr2 = tf.transpose(matrix_repr1, [1, 0]) + dist = tf.math.abs(tf.math.subtract(matrix_repr1, matrix_repr2)) + dist = tf.dtypes.cast(dist, tf.float32) + dist = tf.expand_dims(dist, axis=-1) + dist = tf.expand_dims(dist, axis=0) + dist = tf.tile(dist, [batch_size, 1, 1, 1]) + return tf.concat([inputs, dist], axis=-1)
+
+ + + +
+[docs] +class UpperTri(tf.keras.layers.Layer): + """Unroll matrix to its upper triangular portion.""" + + def __init__(self, diagonal_offset=2): + super(UpperTri, self).__init__() + self.diagonal_offset = diagonal_offset + +
+[docs] + def call(self, inputs): + seq_len = inputs.shape[1] + output_dim = inputs.shape[-1] + + if type(seq_len) == tf.compat.v1.Dimension: + seq_len = seq_len.value + output_dim = output_dim.value + + triu_tup = np.triu_indices(seq_len, self.diagonal_offset) + triu_index = list(triu_tup[0] + seq_len * triu_tup[1]) + unroll_repr = tf.reshape(inputs, [-1, seq_len**2, output_dim]) + return tf.gather(unroll_repr, triu_index, axis=1)
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config["diagonal_offset"] = self.diagonal_offset + return config
+
+ + + +
+[docs] +class Symmetrize2D(tf.keras.layers.Layer): + """Take the average of a matrix and its transpose to enforce symmetry.""" + + def __init__(self): + super(Symmetrize2D, self).__init__() + +
+[docs] + def call(self, x): + x_t = tf.transpose(x, [0, 2, 1, 3]) + x_sym = (x + x_t) / 2 + return x_sym
+
+ + + +############################################################ +# Augmentation +############################################################ + + +
+[docs] +class EnsembleReverseComplement(tf.keras.layers.Layer): + """Expand tensor to include reverse complement of one hot encoded DNA sequence.""" + + def __init__(self): + super(EnsembleReverseComplement, self).__init__() + +
+[docs] + def call(self, seqs_1hot): + if not isinstance(seqs_1hot, list): + seqs_1hot = [seqs_1hot] + + ens_seqs_1hot = [] + for seq_1hot in seqs_1hot: + rc_seq_1hot = tf.gather(seq_1hot, [3, 2, 1, 0], axis=-1) + rc_seq_1hot = tf.reverse(rc_seq_1hot, axis=[1]) + ens_seqs_1hot += [ + (seq_1hot, tf.constant(False)), + (rc_seq_1hot, tf.constant(True)), + ] + + return ens_seqs_1hot
+
+ + + +
+[docs] +class StochasticReverseComplement(tf.keras.layers.Layer): + """Stochastically reverse complement a one hot encoded DNA sequence.""" + + def __init__(self): + super(StochasticReverseComplement, self).__init__() + +
+[docs] + def call(self, seq_1hot, training=None): + if training: + rc_seq_1hot = tf.gather(seq_1hot, [3, 2, 1, 0], axis=-1) + rc_seq_1hot = tf.reverse(rc_seq_1hot, axis=[1]) + reverse_bool = tf.random.uniform(shape=[]) > 0.5 + src_seq_1hot = tf.cond(reverse_bool, lambda: rc_seq_1hot, lambda: seq_1hot) + return src_seq_1hot, reverse_bool + else: + return seq_1hot, tf.constant(False)
+
+ + + +
+[docs] +class SwitchReverse(tf.keras.layers.Layer): + """Reverse predictions if the inputs were reverse complemented.""" + + def __init__(self, strand_pair=None): + super(SwitchReverse, self).__init__() + self.strand_pair = strand_pair + +
+[docs] + def call(self, x_reverse): + x = x_reverse[0] + reverse = x_reverse[1] + + xd = len(x.shape) + if xd == 2: + # because we collapsed length already + rev_axes = [] + elif xd == 3: + # length axis + rev_axes = [1] + elif xd == 4: + # 2d spatial axes + rev_axes = [1, 2] + else: + raise ValueError("Cannot recognize SwitchReverse input dimensions %d." % xd) + + if len(rev_axes) > 0: + xr = tf.keras.backend.switch(reverse, tf.reverse(x, axis=rev_axes), x) + else: + xr = x + + if self.strand_pair is None: + xrs = xr + else: + xrs = tf.keras.backend.switch( + reverse, tf.gather(xr, self.strand_pair, axis=-1), xr + ) + + return xrs
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config["strand_pair"] = self.strand_pair + return config
+
+ + + +
+[docs] +class SwitchReverseTriu(tf.keras.layers.Layer): + def __init__(self, diagonal_offset): + super(SwitchReverseTriu, self).__init__() + self.diagonal_offset = diagonal_offset + +
+[docs] + def call(self, x_reverse): + x_ut = x_reverse[0] + reverse = x_reverse[1] + + # infer original sequence length + ut_len = x_ut.shape[1] + if type(ut_len) == tf.compat.v1.Dimension: + ut_len = ut_len.value + seq_len = int(np.sqrt(2 * ut_len + 0.25) - 0.5) + seq_len += self.diagonal_offset + + # get triu indexes + ut_indexes = np.triu_indices(seq_len, self.diagonal_offset) + assert len(ut_indexes[0]) == ut_len + + # construct a ut matrix of ut indexes + mat_ut_indexes = np.zeros(shape=(seq_len, seq_len), dtype="int") + mat_ut_indexes[ut_indexes] = np.arange(ut_len) + + # make lower diag mask + mask_ut = np.zeros(shape=(seq_len, seq_len), dtype="bool") + mask_ut[ut_indexes] = True + mask_ld = ~mask_ut + + # construct a matrix of symmetric ut indexes + mat_indexes = mat_ut_indexes + np.multiply(mask_ld, mat_ut_indexes.T) + + # reverse complement + mat_rc_indexes = mat_indexes[::-1, ::-1] + + # extract ut order + rc_ut_order = mat_rc_indexes[ut_indexes] + + return tf.keras.backend.switch( + reverse, tf.gather(x_ut, rc_ut_order, axis=1), x_ut + )
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config["diagonal_offset"] = self.diagonal_offset + return config
+
+ + + +
+[docs] +class EnsembleShift(tf.keras.layers.Layer): + """Expand tensor to include shifts of one hot encoded DNA sequence.""" + + def __init__(self, shifts=[0], pad="uniform"): + super(EnsembleShift, self).__init__() + self.shifts = shifts + self.pad = pad + +
+[docs] + def call(self, seqs_1hot): + if not isinstance(seqs_1hot, list): + seqs_1hot = [seqs_1hot] + + ens_seqs_1hot = [] + for seq_1hot in seqs_1hot: + for shift in self.shifts: + ens_seqs_1hot.append(shift_sequence(seq_1hot, shift)) + + return ens_seqs_1hot
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config.update({"shifts": self.shifts, "pad": self.pad}) + return config
+
+ + + +
+[docs] +class StochasticShift(tf.keras.layers.Layer): + """Stochastically shift a one hot encoded DNA sequence.""" + + def __init__(self, shift_max=0, symmetric=True, pad="uniform"): + super(StochasticShift, self).__init__() + self.shift_max = shift_max + self.symmetric = symmetric + if self.symmetric: + self.augment_shifts = tf.range(-self.shift_max, self.shift_max + 1) + else: + self.augment_shifts = tf.range(0, self.shift_max + 1) + self.pad = pad + +
+[docs] + def call(self, seq_1hot, training=None): + if training: + shift_i = tf.random.uniform( + shape=[], minval=0, dtype=tf.int64, maxval=len(self.augment_shifts) + ) + shift = tf.gather(self.augment_shifts, shift_i) + sseq_1hot = tf.cond( + tf.not_equal(shift, 0), + lambda: shift_sequence(seq_1hot, shift), + lambda: seq_1hot, + ) + return sseq_1hot + else: + return seq_1hot
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config.update( + {"shift_max": self.shift_max, "symmetric": self.symmetric, "pad": self.pad} + ) + return config
+
+ + + +
+[docs] +def shift_sequence(seq, shift, pad_value=0): + """Shift a sequence left or right by shift_amount. + + Args: + seq: [batch_size, seq_length, seq_depth] sequence + shift: signed shift value (tf.int32 or int) + pad_value: value to fill the padding (primitive or scalar tf.Tensor) + """ + if seq.shape.ndims != 3: + raise ValueError("input sequence should be rank 3") + input_shape = seq.shape + + pad = pad_value * tf.ones_like(seq[:, 0 : tf.abs(shift), :]) + + def _shift_right(_seq): + # shift is positive + sliced_seq = _seq[:, :-shift:, :] + return tf.concat([pad, sliced_seq], axis=1) + + def _shift_left(_seq): + # shift is negative + sliced_seq = _seq[:, -shift:, :] + return tf.concat([sliced_seq, pad], axis=1) + + sseq = tf.cond( + tf.greater(shift, 0), lambda: _shift_right(seq), lambda: _shift_left(seq) + ) + sseq.set_shape(input_shape) + + return sseq
+ + + +############################################################ +# Factorization +############################################################ + + +
+[docs] +class FactorInverse(tf.keras.layers.Layer): + """Inverse a target matrix factorization.""" + + def __init__(self, components_npy): + super(FactorInverse, self).__init__() + self.components_npy = components_npy + self.components = tf.constant(np.load(components_npy), dtype=tf.float32) + +
+[docs] + def call(self, W): + return tf.keras.backend.dot(W, self.components)
+ + +
+[docs] + def get_config(self): + config = super().get_config().copy() + config.update({"components_npy": self.components_npy}) + return config
+
+ + + +############################################################ +# helpers +############################################################ + + +
+[docs] +def activate(current, activation, verbose=False): + if verbose: + print("activate:", activation) + if activation == "relu": + current = tf.keras.layers.ReLU()(current) + elif activation == "polyrelu": + current = PolyReLU()(current) + elif activation == "gelu": + current = tf.keras.activations.gelu(current, approximate=True) + elif activation == "sigmoid": + current = tf.keras.activations.sigmoid(current) + elif activation == "tanh": + current = tf.keras.activations.tanh(current) + elif activation == "exp": + current = Exp()(current) + elif activation == "softplus": + current = Softplus()(current) + elif activation == "linear" or activation is None: + pass + else: + print('Unrecognized activation "%s"' % activation, file=sys.stderr) + exit(1) + + return current
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/baskerville/metrics.html b/_modules/baskerville/metrics.html new file mode 100644 index 0000000..0cfb2ce --- /dev/null +++ b/_modules/baskerville/metrics.html @@ -0,0 +1,650 @@ + + + + + + baskerville.metrics — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for baskerville.metrics

+# Copyright 2023 Calico LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =========================================================================
+import pdb
+import numpy as np
+import tensorflow as tf
+from tensorflow.python.keras import backend as K
+from tensorflow.python.keras.utils import losses_utils
+from tensorflow.python.keras.losses import LossFunctionWrapper
+from tensorflow.python.keras.utils import metrics_utils
+
+gpu_devices = tf.config.experimental.list_physical_devices("GPU")
+for device in gpu_devices:
+    tf.config.experimental.set_memory_growth(device, True)
+
+
+################################################################################
+# Losses
+################################################################################
+
+[docs] +def mean_squared_error_udot(y_true, y_pred, udot_weight: float = 1): + """Mean squared error with mean-normalized specificity term.""" + mse_term = tf.keras.losses.mean_squared_error(y_true, y_pred) + + yn_true = y_true - tf.math.reduce_mean(y_true, axis=-1, keepdims=True) + yn_pred = y_pred - tf.math.reduce_mean(y_pred, axis=-1, keepdims=True) + udot_term = -tf.reduce_mean(yn_true * yn_pred, axis=-1) + + return mse_term + udot_weight * udot_term
+ + + +
+[docs] +class MeanSquaredErrorUDot(LossFunctionWrapper): + """Mean squared error with mean-normalized specificity term. + + Args: + udot_weight: Weight of the mean-normalized specificity term. + """ + + def __init__( + self, + udot_weight: float = 1, + reduction=losses_utils.ReductionV2.AUTO, + name: str = "mse_udot", + ): + self.udot_weight = udot_weight + mse_udot = lambda yt, yp: mean_squared_error_udot(yt, yp, self.udot_weight) + super(MeanSquaredErrorUDot, self).__init__( + mse_udot, name=name, reduction=reduction + )
+ + + +
+[docs] +def poisson_kl(y_true, y_pred, kl_weight=1, epsilon=1e-3): + # poisson loss + poisson_term = tf.keras.losses.poisson(y_true, y_pred) + + # add epsilon to protect against all tiny values + y_true += epsilon + y_pred += epsilon + + # normalize to sum to one + yn_true = y_true / tf.math.reduce_sum(y_true, axis=-1, keepdims=True) + yn_pred = y_pred / tf.math.reduce_sum(y_pred, axis=-1, keepdims=True) + + # kl term + kl_term = tf.keras.losses.kl_divergence(yn_true, yn_pred) + + # weighted combination + return poisson_term + kl_weight * kl_term
+ + + +
+[docs] +class PoissonKL(LossFunctionWrapper): + """Possion decomposition with KL specificity term. + + Args: + kl_weight (float): Weight of the KL specificity term. + """ + + def __init__( + self, + kl_weight: int = 1, + reduction=losses_utils.ReductionV2.AUTO, + name="poisson_kl", + ): + self.kl_weight = kl_weight + pois_kl = lambda yt, yp: poisson_kl(yt, yp, self.kl_weight) + super(PoissonKL, self).__init__(pois_kl, name=name, reduction=reduction)
+ + + +
+[docs] +def poisson_multinomial( + y_true, + y_pred, + total_weight: float = 1, + epsilon: float = 1e-6, + rescale: bool = False, +): + """Possion decomposition with multinomial specificity term. + + Args: + total_weight (float): Weight of the Poisson total term. + epsilon (float): Added small value to avoid log(0). + """ + seq_len = y_true.shape[1] + + # add epsilon to protect against tiny values + y_true += epsilon + y_pred += epsilon + + # sum across lengths + s_true = tf.math.reduce_sum(y_true, axis=-2, keepdims=True) + s_pred = tf.math.reduce_sum(y_pred, axis=-2, keepdims=True) + + # normalize to sum to one + p_pred = y_pred / s_pred + + # total count poisson loss + poisson_term = tf.keras.losses.poisson(s_true, s_pred) # B x T + poisson_term /= seq_len + + # multinomial loss + pl_pred = tf.math.log(p_pred) # B x L x T + multinomial_dot = -tf.math.multiply(y_true, pl_pred) # B x L x T + multinomial_term = tf.math.reduce_sum(multinomial_dot, axis=-2) # B x T + multinomial_term /= seq_len + + # normalize to scale of 1:1 term ratio + loss_raw = multinomial_term + total_weight * poisson_term + if rescale: + loss_rescale = loss_raw * 2 / (1 + total_weight) + else: + loss_rescale = loss_raw + + return loss_rescale
+ + + +
+[docs] +class PoissonMultinomial(LossFunctionWrapper): + """Possion decomposition with multinomial specificity term. + + Args: + total_weight (float): Weight of the Poisson total term. + epsilon (float): Added small value to avoid log(0). + """ + + def __init__( + self, + total_weight=1, + reduction=losses_utils.ReductionV2.AUTO, + name: str = "poisson_multinomial", + ): + self.total_weight = total_weight + pois_mn = lambda yt, yp: poisson_multinomial(yt, yp, self.total_weight) + super(PoissonMultinomial, self).__init__( + pois_mn, name=name, reduction=reduction + )
+ + + +################################################################################ +# Metrics +################################################################################ +
+[docs] +class SeqAUC(tf.keras.metrics.AUC): + """AUC metric for multi-task sequence data. + + Args: + curve (str): Metric type--'ROC' or 'PR'. + summarize (bool): Whether to summarize over all tasks. + """ + + def __init__( + self, curve: str = "ROC", name: str = None, summarize: bool = True, **kwargs + ): + if name is None: + if curve == "ROC": + name = "auroc" + elif curve == "PR": + name = "auprc" + super(SeqAUC, self).__init__(curve=curve, name=name, multi_label=True, **kwargs) + self._summarize = summarize + +
+[docs] + def update_state(self, y_true, y_pred, **kwargs): + """Flatten sequence length before update.""" + + # flatten batch and sequence length + num_targets = y_pred.shape[-1] + y_true = tf.reshape(y_true, (-1, num_targets)) + y_pred = tf.reshape(y_pred, (-1, num_targets)) + + # update + super(SeqAUC, self).update_state(y_true, y_pred, **kwargs)
+ + +
+[docs] + def interpolate_pr_auc(self): + """Add option to remove summary.""" + dtp = self.true_positives[: self.num_thresholds - 1] - self.true_positives[1:] + p = tf.math.add(self.true_positives, self.false_positives) + dp = p[: self.num_thresholds - 1] - p[1:] + prec_slope = tf.math.divide_no_nan(dtp, tf.maximum(dp, 0), name="prec_slope") + intercept = self.true_positives[1:] - tf.multiply(prec_slope, p[1:]) + + safe_p_ratio = tf.where( + tf.logical_and(p[: self.num_thresholds - 1] > 0, p[1:] > 0), + tf.math.divide_no_nan( + p[: self.num_thresholds - 1], + tf.maximum(p[1:], 0), + name="recall_relative_ratio", + ), + tf.ones_like(p[1:]), + ) + + pr_auc_increment = tf.math.divide_no_nan( + prec_slope * (dtp + intercept * tf.math.log(safe_p_ratio)), + tf.maximum(self.true_positives[1:] + self.false_negatives[1:], 0), + name="pr_auc_increment", + ) + + if self.multi_label: + by_label_auc = tf.reduce_sum( + pr_auc_increment, name=self.name + "_by_label", axis=0 + ) + + if self._summarize: + if self.label_weights is None: + # Evenly weighted average of the label AUCs. + return tf.reduce_mean(by_label_auc, name=self.name) + else: + # Weighted average of the label AUCs. + return tf.math.divide_no_nan( + tf.reduce_sum(tf.multiply(by_label_auc, self.label_weights)), + tf.reduce_sum(self.label_weights), + name=self.name, + ) + else: + return by_label_auc + else: + if self._summarize: + return tf.reduce_sum(pr_auc_increment, name="interpolate_pr_auc") + else: + return pr_auc_increment
+ + +
+[docs] + def result(self): + """Add option to remove summary. + It's not clear why, but these metrics_utils == aren't working for tf2.6 on. + I'm hacking a solution to compare the values instead.""" + if ( + self.curve.value == metrics_utils.AUCCurve.PR.value + and self.summation_method.value + == metrics_utils.AUCSummationMethod.INTERPOLATION.value + ): + # This use case is different and is handled separately. + return self.interpolate_pr_auc() + + # Set `x` and `y` values for the curves based on `curve` config. + recall = tf.math.divide_no_nan( + self.true_positives, tf.math.add(self.true_positives, self.false_negatives) + ) + if self.curve.value == metrics_utils.AUCCurve.ROC.value: + fp_rate = tf.math.divide_no_nan( + self.false_positives, + tf.math.add(self.false_positives, self.true_negatives), + ) + x = fp_rate + y = recall + else: # curve == 'PR'. + precision = tf.math.divide_no_nan( + self.true_positives, + tf.math.add(self.true_positives, self.false_positives), + ) + x = recall + y = precision + + # Find the rectangle heights based on `summation_method`. + if ( + self.summation_method.value + == metrics_utils.AUCSummationMethod.INTERPOLATION.value + ): + # Note: the case ('PR', 'interpolation') has been handled above. + heights = (y[: self.num_thresholds - 1] + y[1:]) / 2.0 + elif ( + self.summation_method.value + == metrics_utils.AUCSummationMethod.MINORING.value + ): + heights = tf.minimum(y[: self.num_thresholds - 1], y[1:]) + else: # self.summation_method = metrics_utils.AUCSummationMethod.MAJORING: + heights = tf.maximum(y[: self.num_thresholds - 1], y[1:]) + + # Sum up the areas of all the rectangles. + if self.multi_label: + riemann_terms = tf.multiply(x[: self.num_thresholds - 1] - x[1:], heights) + by_label_auc = tf.reduce_sum( + riemann_terms, name=self.name + "_by_label", axis=0 + ) + + if self._summarize: + if self.label_weights is None: + # Unweighted average of the label AUCs. + return tf.reduce_mean(by_label_auc, name=self.name) + else: + # Weighted average of the label AUCs. + return tf.math.div_no_nan( + tf.reduce_sum(tf.multiply(by_label_auc, self.label_weights)), + tf.reduce_sum(self.label_weights), + name=self.name, + ) + else: + return by_label_auc + else: + if self._summarize: + return tf.reduce_sum( + tf.multiply(x[: self.num_thresholds - 1] - x[1:], heights), + name=self.name, + ) + else: + return tf.multiply(x[: self.num_thresholds - 1] - x[1:], heights)
+
+ + + +
+[docs] +class PearsonR(tf.keras.metrics.Metric): + """PearsonR metric for multi-task data. + + Args: + num_targets (int): Number of tasks. + summarize (bool): Whether to summarize over all tasks. + """ + + def __init__(self, num_targets, summarize=True, name="pearsonr", **kwargs): + super(PearsonR, self).__init__(name=name, **kwargs) + self._summarize = summarize + self._shape = (num_targets,) + self._count = self.add_weight( + name="count", shape=self._shape, initializer="zeros" + ) + + self._product = self.add_weight( + name="product", shape=self._shape, initializer="zeros" + ) + self._true_sum = self.add_weight( + name="true_sum", shape=self._shape, initializer="zeros" + ) + self._true_sumsq = self.add_weight( + name="true_sumsq", shape=self._shape, initializer="zeros" + ) + self._pred_sum = self.add_weight( + name="pred_sum", shape=self._shape, initializer="zeros" + ) + self._pred_sumsq = self.add_weight( + name="pred_sumsq", shape=self._shape, initializer="zeros" + ) + +
+[docs] + def update_state(self, y_true, y_pred, sample_weight=None): + """Update metric state for a batch.""" + y_true = tf.cast(y_true, "float32") + y_pred = tf.cast(y_pred, "float32") + + if len(y_true.shape) == 2: + reduce_axes = 0 + else: + reduce_axes = [0, 1] + + product = tf.reduce_sum(tf.multiply(y_true, y_pred), axis=reduce_axes) + self._product.assign_add(product) + + true_sum = tf.reduce_sum(y_true, axis=reduce_axes) + self._true_sum.assign_add(true_sum) + + true_sumsq = tf.reduce_sum(tf.math.square(y_true), axis=reduce_axes) + self._true_sumsq.assign_add(true_sumsq) + + pred_sum = tf.reduce_sum(y_pred, axis=reduce_axes) + self._pred_sum.assign_add(pred_sum) + + pred_sumsq = tf.reduce_sum(tf.math.square(y_pred), axis=reduce_axes) + self._pred_sumsq.assign_add(pred_sumsq) + + count = tf.ones_like(y_true) + count = tf.reduce_sum(count, axis=reduce_axes) + self._count.assign_add(count)
+ + +
+[docs] + def result(self): + """Compute PearsonR result from state.""" + true_mean = tf.divide(self._true_sum, self._count) + true_mean2 = tf.math.square(true_mean) + pred_mean = tf.divide(self._pred_sum, self._count) + pred_mean2 = tf.math.square(pred_mean) + + term1 = self._product + term2 = -tf.multiply(true_mean, self._pred_sum) + term3 = -tf.multiply(pred_mean, self._true_sum) + term4 = tf.multiply(self._count, tf.multiply(true_mean, pred_mean)) + covariance = term1 + term2 + term3 + term4 + + true_var = self._true_sumsq - tf.multiply(self._count, true_mean2) + pred_var = self._pred_sumsq - tf.multiply(self._count, pred_mean2) + pred_var = tf.where( + tf.greater(pred_var, 1e-12), pred_var, np.inf * tf.ones_like(pred_var) + ) + + tp_var = tf.multiply(tf.math.sqrt(true_var), tf.math.sqrt(pred_var)) + correlation = tf.divide(covariance, tp_var) + + if self._summarize: + return tf.reduce_mean(correlation) + else: + return correlation
+ + +
+[docs] + def reset_state(self): + """Reset metric state.""" + K.batch_set_value([(v, np.zeros(self._shape)) for v in self.variables])
+
+ + + +
+[docs] +class R2(tf.keras.metrics.Metric): + """R2 metric for multi-task data. + + Args: + num_targets (int): Number of tasks. + summarize (bool): Whether to summarize over all tasks. + """ + + def __init__(self, num_targets, summarize=True, name="r2", **kwargs): + super(R2, self).__init__(name=name, **kwargs) + self._summarize = summarize + self._shape = (num_targets,) + self._count = self.add_weight( + name="count", shape=self._shape, initializer="zeros" + ) + + self._true_sum = self.add_weight( + name="true_sum", shape=self._shape, initializer="zeros" + ) + self._true_sumsq = self.add_weight( + name="true_sumsq", shape=self._shape, initializer="zeros" + ) + + self._product = self.add_weight( + name="product", shape=self._shape, initializer="zeros" + ) + self._pred_sumsq = self.add_weight( + name="pred_sumsq", shape=self._shape, initializer="zeros" + ) + +
+[docs] + def update_state(self, y_true, y_pred, sample_weight=None): + """Update metric state for a batch.""" + y_true = tf.cast(y_true, "float32") + y_pred = tf.cast(y_pred, "float32") + + if len(y_true.shape) == 2: + reduce_axes = 0 + else: + reduce_axes = [0, 1] + + true_sum = tf.reduce_sum(y_true, axis=reduce_axes) + self._true_sum.assign_add(true_sum) + + true_sumsq = tf.reduce_sum(tf.math.square(y_true), axis=reduce_axes) + self._true_sumsq.assign_add(true_sumsq) + + product = tf.reduce_sum(tf.multiply(y_true, y_pred), axis=reduce_axes) + self._product.assign_add(product) + + pred_sumsq = tf.reduce_sum(tf.math.square(y_pred), axis=reduce_axes) + self._pred_sumsq.assign_add(pred_sumsq) + + count = tf.ones_like(y_true) + count = tf.reduce_sum(count, axis=reduce_axes) + self._count.assign_add(count)
+ + +
+[docs] + def result(self): + """Compute R2 result from state.""" + true_mean = tf.divide(self._true_sum, self._count) + true_mean2 = tf.math.square(true_mean) + + total = self._true_sumsq - tf.multiply(self._count, true_mean2) + + resid1 = self._pred_sumsq + resid2 = -2 * self._product + resid3 = self._true_sumsq + resid = resid1 + resid2 + resid3 + + r2 = tf.ones_like(self._shape, dtype=tf.float32) - tf.divide(resid, total) + + if self._summarize: + return tf.reduce_mean(r2) + else: + return r2
+ + +
+[docs] + def reset_state(self): + """Reset metric state.""" + K.batch_set_value([(v, np.zeros(self._shape)) for v in self.variables])
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/baskerville/seqnn.html b/_modules/baskerville/seqnn.html new file mode 100644 index 0000000..ee39235 --- /dev/null +++ b/_modules/baskerville/seqnn.html @@ -0,0 +1,1232 @@ + + + + + + baskerville.seqnn — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for baskerville.seqnn

+# Copyright 2023 Calico LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =========================================================================
+import pdb
+import sys
+import time
+
+from natsort import natsorted
+import numpy as np
+import tensorflow as tf
+import gc
+from baskerville import blocks
+from baskerville import layers
+from baskerville import metrics
+
+
+
+[docs] +class SeqNN: + """Sequence neural network model. + + Args: + params (dict): Model specification and parameters. + """ + + def __init__(self, params: dict): + self.set_defaults() + for key, value in params.items(): + self.__setattr__(key, value) + self.build_model() + self.ensemble = None + +
+[docs] + def set_defaults(self): + """Set default parameters. + + Only necessary for my bespoke parameters. + Others are best defaulted closer to the source. + """ + self.augment_rc = False + self.augment_shift = [0] + self.strand_pair = [] + self.verbose = True
+ + +
+[docs] + def build_block(self, current, block_params): + """Construct a SeqNN block. + + Args: + current: Current Tensor. + block_params (dict): Block parameters. + Returns: + current: New current Tensor. + """ + block_args = {} + + # extract name + block_name = block_params["name"] + + # save upper_tri flatten + self.preds_triu |= block_name == "upper_tri" + + # if Keras, get block variables names + pass_all_globals = True + if block_name[0].isupper(): + pass_all_globals = False + block_func = blocks.keras_func[block_name] + block_varnames = block_func.__init__.__code__.co_varnames + + # set global defaults + global_vars = [ + "activation", + "batch_norm", + "bn_momentum", + "norm_type", + "l2_scale", + "l1_scale", + "padding", + "kernel_initializer", + ] + for gv in global_vars: + gv_value = getattr(self, gv, False) + if gv_value and (pass_all_globals or gv in block_varnames): + block_args[gv] = gv_value + + # set remaining params + block_args.update(block_params) + del block_args["name"] + + # save representations + if block_name.find("tower") != -1: + block_args["reprs"] = self.reprs + + # U-net helper + if block_name.startswith("unet_"): + # find matching representation + unet_repr = None + for seq_repr in reversed(self.reprs[:-1]): + if seq_repr.shape[1] == current.shape[1] * 2: + unet_repr = seq_repr + break + if unet_repr is None: + print( + "Could not find matching representation for length %d" + % current.shape[1], + sys.stderr, + ) + exit(1) + block_args["unet_repr"] = unet_repr + + # switch for block + if block_name[0].islower(): + block_func = blocks.name_func[block_name] + current = block_func(current, **block_args) + + else: + block_func = blocks.keras_func[block_name] + current = block_func(**block_args)(current) + + return current
+ + +
+[docs] + def build_model(self, save_reprs: bool = True): + """Build the model.""" + + ################################################### + # inputs + sequence = tf.keras.Input(shape=(self.seq_length, 4), name="sequence") + current = sequence + + # augmentation + if self.augment_rc: + current, reverse_bool = layers.StochasticReverseComplement()(current) + if self.augment_shift != [0]: + current = layers.StochasticShift(self.augment_shift)(current) + self.preds_triu = False + + ################################################### + # build convolution blocks + self.reprs = [] + for bi, block_params in enumerate(self.trunk): + current = self.build_block(current, block_params) + if save_reprs: + self.reprs.append(current) + + # final activation + current = layers.activate(current, self.activation) + + # make model trunk + trunk_output = current + self.model_trunk = tf.keras.Model(inputs=sequence, outputs=trunk_output) + + ################################################### + # heads + head_keys = natsorted([v for v in vars(self) if v.startswith("head")]) + self.heads = [getattr(self, hk) for hk in head_keys] + + self.head_output = [] + for hi, head in enumerate(self.heads): + if not isinstance(head, list): + head = [head] + + # reset to trunk output + current = trunk_output + + # build blocks + for bi, block_params in enumerate(head): + current = self.build_block(current, block_params) + + if hi < len(self.strand_pair): + strand_pair = self.strand_pair[hi] + else: + strand_pair = None + + # transform back from reverse complement + if self.augment_rc: + if self.preds_triu: + current = layers.SwitchReverseTriu(self.diagonal_offset)( + [current, reverse_bool] + ) + else: + current = layers.SwitchReverse(strand_pair)([current, reverse_bool]) + + # save head output + self.head_output.append(current) + + ################################################### + # compile model(s) + self.models = [] + for ho in self.head_output: + self.models.append(tf.keras.Model(inputs=sequence, outputs=ho)) + self.model = self.models[0] + if self.verbose: + print(self.model.summary()) + + # track pooling/striding and cropping + self.track_sequence(sequence)
+ + +
+[docs] + def build_embed(self, conv_layer_i: int, batch_norm: bool = True): + """Build model to embed sequences into specific layer.""" + if conv_layer_i == -1: + self.model = self.model_trunk + + else: + if batch_norm: + conv_layer = self.get_bn_layer(conv_layer_i) + else: + conv_layer = self.get_conv_layer(conv_layer_i) + + self.model = tf.keras.Model( + inputs=self.model.inputs, outputs=conv_layer.output + )
+ + +
+[docs] + def build_ensemble(self, ensemble_rc: bool = False, ensemble_shifts=[0]): + """Build ensemble of models computing on augmented input sequences.""" + shift_bool = len(ensemble_shifts) > 1 or ensemble_shifts[0] != 0 + if ensemble_rc or shift_bool: + # sequence input + sequence = tf.keras.Input(shape=(self.seq_length, 4), name="sequence") + sequences = [sequence] + + if shift_bool: + # generate shifted sequences + sequences = layers.EnsembleShift(ensemble_shifts)(sequences) + + if ensemble_rc: + # generate reverse complements and indicators + sequences_rev = layers.EnsembleReverseComplement()(sequences) + else: + sequences_rev = [(seq, tf.constant(False)) for seq in sequences] + + if len(self.strand_pair) == 0: + strand_pair = None + else: + strand_pair = self.strand_pair[0] + + # predict each sequence + if self.preds_triu: + preds = [ + layers.SwitchReverseTriu(self.diagonal_offset)( + [self.model(seq), rp] + ) + for (seq, rp) in sequences_rev + ] + else: + preds = [ + layers.SwitchReverse(strand_pair)([self.model(seq), rp]) + for (seq, rp) in sequences_rev + ] + + # create layer + preds_avg = tf.keras.layers.Average()(preds) + + # create meta model + self.ensemble = tf.keras.Model(inputs=sequence, outputs=preds_avg)
+ + +
+[docs] + def build_sad(self): + """Sum across length axis, in graph.""" + # sequence input + sequence = tf.keras.Input(shape=(self.seq_length, 4), name="sequence") + + # predict + predictions = self.model(sequence) + preds_len = predictions.shape[1] + + # sum pool + sad = preds_len * tf.keras.layers.GlobalAveragePooling1D()(predictions) + + # replace model + self.model = tf.keras.Model(inputs=sequence, outputs=sad)
+ + +
+[docs] + def build_slice(self, target_slice=None, target_sum: bool = False): + """Slice and/or sum across tasks, in graph.""" + if target_slice is not None or target_sum: + # sequence input + sequence = tf.keras.Input(shape=(self.seq_length, 4), name="sequence") + + # predict + predictions = self.model(sequence) + + # slice + if target_slice is None: + predictions_slice = predictions + else: + predictions_slice = tf.gather(predictions, target_slice, axis=-1) + + # sum + if target_sum: + predictions_sum = tf.reduce_sum( + predictions_slice, keepdims=True, axis=-1 + ) + else: + predictions_sum = predictions_slice + + # replace model + self.model = tf.keras.Model(inputs=sequence, outputs=predictions_sum)
+ + +
+[docs] + def downcast(self, dtype=tf.float16, head_i=None): + """Downcast model output type.""" + # choose model + if self.ensemble is not None: + model = self.ensemble + elif head_i is not None: + model = self.models[head_i] + else: + model = self.model + + # sequence input + sequence = tf.keras.Input(shape=(self.seq_length, 4), name="sequence") + + # predict and downcast + preds = model(sequence) + preds = tf.cast(preds, dtype) + model_down = tf.keras.Model(inputs=sequence, outputs=preds) + + # replace model + if self.ensemble is not None: + self.ensemble = model_down + elif head_i is not None: + self.models[head_i] = model_down + else: + self.model = model_down
+ + +
+[docs] + def evaluate( + self, seq_data, head_i=None, loss_label: str = "poisson", loss_fn=None + ): + """Evaluate model on SeqDataset.""" + # choose model + if self.ensemble is not None: + model = self.ensemble + elif head_i is not None: + model = self.models[head_i] + else: + model = self.model + + # compile with dense metrics + num_targets = model.output_shape[-1] + + if loss_fn is None: + loss_fn = loss_label + + if loss_label == "bce": + model.compile( + optimizer=tf.keras.optimizers.SGD(), + loss=loss_fn, + metrics=[ + metrics.SeqAUC(curve="ROC", summarize=False), + metrics.SeqAUC(curve="PR", summarize=False), + ], + ) + else: + model.compile( + optimizer=tf.keras.optimizers.SGD(), + loss=loss_fn, + metrics=[ + metrics.PearsonR(num_targets, summarize=False), + metrics.R2(num_targets, summarize=False), + ], + ) + + # evaluate + return model.evaluate(seq_data.dataset)
+ + +
+[docs] + def get_bn_layer(self, bn_layer_i=0): + """Return specified batch normalization layer.""" + bn_layers = [ + layer + for layer in self.model.layers + if layer.name.startswith("batch_normalization") + ] + return bn_layers[bn_layer_i]
+ + +
+[docs] + def get_conv_layer(self, conv_layer_i=0): + """Return specified convolution layer.""" + conv_layers = [ + layer for layer in self.model.layers if layer.name.startswith("conv") + ] + return conv_layers[conv_layer_i]
+ + +
+[docs] + def get_dense_layer(self, layer_i=0): + """Return specified dense layer.""" + dense_layers = [ + layer for layer in self.model.layers if layer.name.startswith("dense") + ] + return dense_layers[layer_i]
+ + +
+[docs] + def get_conv_weights(self, conv_layer_i=0): + """Return kernel weights for specified convolution layer.""" + conv_layer = self.get_conv_layer(conv_layer_i) + weights = conv_layer.weights[0].numpy() + weights = np.transpose(weights, [2, 1, 0]) + return weights
+ + +
+[docs] + def gradients( + self, + seq_1hot, + head_i=None, + target_slice=None, + pos_slice=None, + pos_mask=None, + pos_slice_denom=None, + pos_mask_denom=None, + chunk_size=None, + batch_size=1, + track_scale=1.0, + track_transform=1.0, + clip_soft=None, + pseudo_count=0.0, + no_transform=False, + use_mean=False, + use_ratio=False, + use_logodds=False, + subtract_avg=True, + input_gate=True, + smooth_grad=False, + n_samples=5, + sample_prob=0.875, + dtype="float16", + ): + """Compute input gradients for sequences (GPU-friendly).""" + + # start time + t0 = time.time() + + # choose model + if self.ensemble is not None: + model = self.ensemble + elif head_i is not None: + model = self.models[head_i] + else: + model = self.model + + # verify tensor shape(s) + seq_1hot = seq_1hot.astype("float32") + target_slice = np.array(target_slice).astype("int32") + pos_slice = np.array(pos_slice).astype("int32") + + # convert constants to tf tensors + track_scale = tf.constant(track_scale, dtype=tf.float32) + track_transform = tf.constant(track_transform, dtype=tf.float32) + if clip_soft is not None: + clip_soft = tf.constant(clip_soft, dtype=tf.float32) + pseudo_count = tf.constant(pseudo_count, dtype=tf.float32) + + if pos_mask is not None: + pos_mask = np.array(pos_mask).astype("float32") + + if use_ratio and pos_slice_denom is not None: + pos_slice_denom = np.array(pos_slice_denom).astype("int32") + + if pos_mask_denom is not None: + pos_mask_denom = np.array(pos_mask_denom).astype("float32") + + if len(seq_1hot.shape) < 3: + seq_1hot = seq_1hot[None, ...] + + if len(target_slice.shape) < 2: + target_slice = target_slice[None, ...] + + if len(pos_slice.shape) < 2: + pos_slice = pos_slice[None, ...] + + if pos_mask is not None and len(pos_mask.shape) < 2: + pos_mask = pos_mask[None, ...] + + if use_ratio and pos_slice_denom is not None and len(pos_slice_denom.shape) < 2: + pos_slice_denom = pos_slice_denom[None, ...] + + if pos_mask_denom is not None and len(pos_mask_denom.shape) < 2: + pos_mask_denom = pos_mask_denom[None, ...] + + # chunk parameters + num_chunks = 1 + if chunk_size is None: + chunk_size = seq_1hot.shape[0] + else: + num_chunks = int(np.ceil(seq_1hot.shape[0] / chunk_size)) + + # loop over chunks + grad_chunks = [] + for ci in range(num_chunks): + # collect chunk + seq_1hot_chunk = seq_1hot[ci * chunk_size : (ci + 1) * chunk_size, ...] + target_slice_chunk = target_slice[ + ci * chunk_size : (ci + 1) * chunk_size, ... + ] + pos_slice_chunk = pos_slice[ci * chunk_size : (ci + 1) * chunk_size, ...] + + pos_mask_chunk = None + if pos_mask is not None: + pos_mask_chunk = pos_mask[ci * chunk_size : (ci + 1) * chunk_size, ...] + + pos_slice_denom_chunk = None + pos_mask_denom_chunk = None + if use_ratio and pos_slice_denom is not None: + pos_slice_denom_chunk = pos_slice_denom[ + ci * chunk_size : (ci + 1) * chunk_size, ... + ] + + if pos_mask_denom is not None: + pos_mask_denom_chunk = pos_mask_denom[ + ci * chunk_size : (ci + 1) * chunk_size, ... + ] + + actual_chunk_size = seq_1hot_chunk.shape[0] + + # sample noisy (discrete) perturbations of the input pattern chunk + if smooth_grad: + seq_1hot_chunk_corrupted = np.repeat( + np.copy(seq_1hot_chunk), n_samples, axis=0 + ) + + for example_ix in range(seq_1hot_chunk.shape[0]): + for sample_ix in range(n_samples): + corrupt_index = np.nonzero( + np.random.rand(seq_1hot_chunk.shape[1]) >= sample_prob + )[0] + + rand_nt_index = np.random.choice( + [0, 1, 2, 3], size=(corrupt_index.shape[0],) + ) + + seq_1hot_chunk_corrupted[ + example_ix * n_samples + sample_ix, corrupt_index, : + ] = 0.0 + seq_1hot_chunk_corrupted[ + example_ix * n_samples + sample_ix, + corrupt_index, + rand_nt_index, + ] = 1.0 + + seq_1hot_chunk = seq_1hot_chunk_corrupted + target_slice_chunk = np.repeat( + np.copy(target_slice_chunk), n_samples, axis=0 + ) + pos_slice_chunk = np.repeat(np.copy(pos_slice_chunk), n_samples, axis=0) + + if pos_mask is not None: + pos_mask_chunk = np.repeat( + np.copy(pos_mask_chunk), n_samples, axis=0 + ) + + if use_ratio and pos_slice_denom is not None: + pos_slice_denom_chunk = np.repeat( + np.copy(pos_slice_denom_chunk), n_samples, axis=0 + ) + + if pos_mask_denom is not None: + pos_mask_denom_chunk = np.repeat( + np.copy(pos_mask_denom_chunk), n_samples, axis=0 + ) + + # convert to tf tensors + seq_1hot_chunk = tf.convert_to_tensor(seq_1hot_chunk, dtype=tf.float32) + target_slice_chunk = tf.convert_to_tensor( + target_slice_chunk, dtype=tf.int32 + ) + pos_slice_chunk = tf.convert_to_tensor(pos_slice_chunk, dtype=tf.int32) + + if pos_mask is not None: + pos_mask_chunk = tf.convert_to_tensor(pos_mask_chunk, dtype=tf.float32) + + if use_ratio and pos_slice_denom is not None: + pos_slice_denom_chunk = tf.convert_to_tensor( + pos_slice_denom_chunk, dtype=tf.int32 + ) + + if pos_mask_denom is not None: + pos_mask_denom_chunk = tf.convert_to_tensor( + pos_mask_denom_chunk, dtype=tf.float32 + ) + + # batching parameters + num_batches = int( + np.ceil( + actual_chunk_size * (n_samples if smooth_grad else 1) / batch_size + ) + ) + + # loop over batches + grad_batches = [] + for bi in range(num_batches): + # collect batch + seq_1hot_batch = seq_1hot_chunk[ + bi * batch_size : (bi + 1) * batch_size, ... + ] + target_slice_batch = target_slice_chunk[ + bi * batch_size : (bi + 1) * batch_size, ... + ] + pos_slice_batch = pos_slice_chunk[ + bi * batch_size : (bi + 1) * batch_size, ... + ] + + pos_mask_batch = None + if pos_mask is not None: + pos_mask_batch = pos_mask_chunk[ + bi * batch_size : (bi + 1) * batch_size, ... + ] + + pos_slice_denom_batch = None + pos_mask_denom_batch = None + if use_ratio and pos_slice_denom is not None: + pos_slice_denom_batch = pos_slice_denom_chunk[ + bi * batch_size : (bi + 1) * batch_size, ... + ] + + if pos_mask_denom is not None: + pos_mask_denom_batch = pos_mask_denom_chunk[ + bi * batch_size : (bi + 1) * batch_size, ... + ] + + grad_batch = ( + self.gradients_func( + model, + seq_1hot_batch, + target_slice_batch, + pos_slice_batch, + pos_mask_batch, + pos_slice_denom_batch, + pos_mask_denom_batch, + track_scale, + track_transform, + clip_soft, + pseudo_count, + no_transform, + use_mean, + use_ratio, + use_logodds, + subtract_avg, + input_gate, + ) + .numpy() + .astype(dtype) + ) + + grad_batches.append(grad_batch) + + # concat gradient batches + grads = np.concatenate(grad_batches, axis=0) + + # aggregate noisy gradient perturbations + if smooth_grad: + grads_smoothed = np.zeros( + (grads.shape[0] // n_samples, grads.shape[1], grads.shape[2]), + dtype="float32", + ) + + for example_ix in range(grads_smoothed.shape[0]): + for sample_ix in range(n_samples): + grads_smoothed[example_ix, ...] += grads[ + example_ix * n_samples + sample_ix, ... + ] + + grads = grads_smoothed / float(n_samples) + grads = grads.astype(dtype) + + grad_chunks.append(grads) + + # collect garbage + gc.collect() + + # concat gradient chunks + grads = np.concatenate(grad_chunks, axis=0) + + # aggregate and broadcast to original input pattern + if input_gate: + grads = np.sum(grads, axis=-1, keepdims=True) * seq_1hot + + print("Completed gradient computation in %ds" % (time.time() - t0)) + + return grads
+ + +
+[docs] + @tf.function + def gradients_func( + self, + model, + seq_1hot, + target_slice, + pos_slice, + pos_mask=None, + pos_slice_denom=None, + pos_mask_denom=True, + track_scale=1.0, + track_transform=1.0, + clip_soft=None, + pseudo_count=0.0, + no_transform=False, + use_mean=False, + use_ratio=False, + use_logodds=False, + subtract_avg=True, + input_gate=True, + ): + with tf.GradientTape() as tape: + tape.watch(seq_1hot) + + # predict + preds = tf.gather( + model(seq_1hot, training=False), target_slice, axis=-1, batch_dims=1 + ) + + if not no_transform: + # undo scale + preds = preds / track_scale + + # undo soft_clip + if clip_soft is not None: + preds = tf.where( + preds > clip_soft, (preds - clip_soft) ** 2 + clip_soft, preds + ) + + # undo sqrt + preds = preds ** (1.0 / track_transform) + + # aggregate over tracks (average) + preds = tf.reduce_mean(preds, axis=-1) + + # slice specified positions + preds_slice = tf.gather(preds, pos_slice, axis=-1, batch_dims=1) + if pos_mask is not None: + preds_slice = preds_slice * pos_mask + + # slice denominator positions + if use_ratio and pos_slice_denom is not None: + preds_slice_denom = tf.gather( + preds, pos_slice_denom, axis=-1, batch_dims=1 + ) + if pos_mask_denom is not None: + preds_slice_denom = preds_slice_denom * pos_mask_denom + + # aggregate over positions + if not use_mean: + preds_agg = tf.reduce_sum(preds_slice, axis=-1) + if use_ratio and pos_slice_denom is not None: + preds_agg_denom = tf.reduce_sum(preds_slice_denom, axis=-1) + else: + if pos_mask is not None: + preds_agg = tf.reduce_sum(preds_slice, axis=-1) / tf.reduce_sum( + pos_mask, axis=-1 + ) + else: + preds_agg = tf.reduce_mean(preds_slice, axis=-1) + + if use_ratio and pos_slice_denom is not None: + if pos_mask_denom is not None: + preds_agg_denom = tf.reduce_sum( + preds_slice_denom, axis=-1 + ) / tf.reduce_sum(pos_mask_denom, axis=-1) + else: + preds_agg_denom = tf.reduce_mean(preds_slice_denom, axis=-1) + + # compute final statistic to take gradient of + if no_transform: + score_ratios = preds_agg + elif not use_ratio: + score_ratios = tf.math.log(preds_agg + pseudo_count + 1e-6) + else: + if not use_logodds: + score_ratios = tf.math.log( + (preds_agg + pseudo_count) / (preds_agg_denom + pseudo_count) + + 1e-6 + ) + else: + score_ratios = tf.math.log( + ((preds_agg + pseudo_count) / (preds_agg_denom + pseudo_count)) + / ( + 1.0 + - ( + (preds_agg + pseudo_count) + / (preds_agg_denom + pseudo_count) + ) + ) + + 1e-6 + ) + + # compute gradient + grads = tape.gradient(score_ratios, seq_1hot) + + # zero mean each position + if subtract_avg: + grads = grads - tf.reduce_mean(grads, axis=-1, keepdims=True) + + # multiply by input + if input_gate: + grads = grads * seq_1hot + + return grads
+ + +
+[docs] + def gradients_orig( + self, seq_1hot, head_i=None, pos_slice=None, batch_size=8, dtype="float16" + ): + """Compute input gradients for each task. + + Args: + seq_1hot (np.array): 1-hot encoded sequence. + head_i (int): Model head index. + pos_slice ([int]): Sequence positions to consider. + batch_size (int): number of tasks to compute gradients for at once. + dtype: Returned data type. + Returns: + Gradients for each task. + """ + # choose model + if self.ensemble is not None: + model = self.ensemble + elif head_i is not None: + model = self.models[head_i] + else: + model = self.model + + # verify tensor shape + seq_1hot = seq_1hot.astype("float32") + seq_1hot = tf.convert_to_tensor(seq_1hot, dtype=tf.float32) + if len(seq_1hot.shape) < 3: + seq_1hot = tf.expand_dims(seq_1hot, axis=0) + + # batching parameters + num_targets = model.output_shape[-1] + num_batches = int(np.ceil(num_targets / batch_size)) + + ti_start = 0 + grads = [] + for bi in range(num_batches): + # sequence input + sequence = tf.keras.Input(shape=(self.seq_length, 4), name="sequence") + + # predict + predictions = model(sequence) + + # slice + ti_end = min(num_targets, ti_start + batch_size) + target_slice = np.arange(ti_start, ti_end) + predictions_slice = tf.gather(predictions, target_slice, axis=-1) + + # replace model + model_batch = tf.keras.Model(inputs=sequence, outputs=predictions_slice) + + # compute gradients + t0 = time.time() + grads_batch = self.gradients_func(model_batch, seq_1hot, pos_slice) + print("Batch gradient computation in %ds" % (time.time() - t0)) + + # convert numpy dtype + grads_batch = grads_batch.numpy().astype(dtype) + grads.append(grads_batch) + + # next batch + ti_start += batch_size + + # concat target batches + grads = np.concatenate(grads, axis=-1) + + return grads
+ + +
+[docs] + @tf.function + def gradients_func_orig(self, model, seq_1hot, pos_slice): + """Compute input gradients for each task. + + Args: + model (tf.keras.Model): Model to compute gradients for. + seq_1hot (tf.Tensor): 1-hot encoded sequence. + pos_slice ([int]): Sequence positions to consider. + + Returns: + grads (tf.Tensor): Gradients for each task. + """ + with tf.GradientTape() as tape: + tape.watch(seq_1hot) + + # predict + preds = model(seq_1hot, training=False) + + if pos_slice is not None: + # slice specified positions + preds = tf.gather(preds, pos_slice, axis=-2) + + # sum across positions + preds = tf.reduce_sum(preds, axis=-2) + + # compute jacboian + grads = tape.jacobian(preds, seq_1hot) + grads = tf.squeeze(grads) + grads = tf.transpose(grads, [1, 2, 0]) + + # zero mean each position + grads = grads - tf.reduce_mean(grads, axis=-2, keepdims=True) + + return grads
+ + +
+[docs] + def num_targets(self, head_i=None): + """Return number of targets.""" + if head_i is None: + return self.model.output_shape[-1] + else: + return self.models[head_i].output_shape[-1]
+ + + def __call__(self, x, head_i=None, dtype="float32"): + """Predict targets for single batch.""" + # choose model + if self.ensemble is not None: + model = self.ensemble + elif head_i is not None: + model = self.models[head_i] + else: + model = self.model + + return model(x).numpy().astype(dtype) + +
+[docs] + def predict( + self, + seq_data, + head_i: int = None, + generator: bool = False, + stream: bool = False, + step: int = 1, + dtype: str = "float32", + **kwargs, + ): + """Predict targets for SeqDataset, with more options. + + Args: + seq_data (SeqDataset): Dataset to predict on. + head_i (int): Model head index. + generator (bool): Use generator to predict on dataset. + stream (bool): Stream predictions from dataset. + step (int): Step size. + dtype (str): Data type to return. + """ + # choose model + if self.ensemble is not None: + model = self.ensemble + elif head_i is not None: + model = self.models[head_i] + else: + model = self.model + + dataset = getattr(seq_data, "dataset", None) + if dataset is None: + dataset = seq_data + + # step slice + preds_len = model.outputs[0].shape[1] + step_i = np.arange(0, preds_len, step) + + # predict + if generator: + preds = model.predict_generator(dataset, **kwargs).astype(dtype) + elif stream: + preds = [] + for x, y in seq_data.dataset: + yh = model.predict(x, **kwargs) + if step > 1: + yh = yh[:, step_i, :] + preds.append(yh.astype(dtype)) + preds = np.concatenate(preds, axis=0, dtype=dtype) + else: + preds = model.predict(dataset, **kwargs).astype(dtype) + + if not stream and step > 1: + preds = preds[:, step_i, :] + + return preds
+ + +
+[docs] + def restore(self, model_file, head_i=0, trunk=False): + """Restore weights from saved model.""" + if trunk: + self.model_trunk.load_weights(model_file) + else: + self.models[head_i].load_weights(model_file) + self.model = self.models[head_i]
+ + +
+[docs] + def save(self, model_file, trunk=False): + """Save model weights to file. + + Args: + model_file (str): Path to save model weights. + trunk (bool): Save trunk weights only. + """ + if trunk: + self.model_trunk.save(model_file, include_optimizer=False) + else: + self.model.save(model_file, include_optimizer=False)
+ + +
+[docs] + def step(self, step=2, head_i=None): + """Create new model to step positions across sequence. + + Args: + step (int): Step size. + head_i (int): Model head index. + """ + # choose model + if self.ensemble is not None: + model = self.ensemble + elif head_i is not None: + model = self.models[head_i] + else: + model = self.model + + # sequence input + sequence = tf.keras.Input(shape=(self.seq_length, 4), name="sequence") + + # predict and step across positions + preds = model(sequence) + step_positions = np.arange(preds.shape[1], step=step) + preds_step = tf.gather(preds, step_positions, axis=-2) + model_step = tf.keras.Model(inputs=sequence, outputs=preds_step) + + # replace model + if self.ensemble is not None: + self.ensemble = model_step + elif head_i is not None: + self.models[head_i] = model_step + else: + self.model = model_step
+ + +
+[docs] + def track_sequence(self, sequence): + """Track pooling, striding, and cropping of sequence. + + Args: + sequence (tf.Tensor): Sequence input. + """ + self.model_strides = [] + self.target_lengths = [] + self.target_crops = [] + for model in self.models: + # determine model stride + self.model_strides.append(1) + for layer in self.model.layers: + if hasattr(layer, "strides") or hasattr(layer, "size"): + stride_factor = layer.input_shape[1] / layer.output_shape[1] + self.model_strides[-1] *= stride_factor + self.model_strides[-1] = int(self.model_strides[-1]) + + # determine predictions length before cropping + if type(sequence.shape[1]) == tf.compat.v1.Dimension: + target_full_length = sequence.shape[1].value // self.model_strides[-1] + else: + target_full_length = sequence.shape[1] // self.model_strides[-1] + + # determine predictions length after cropping + self.target_lengths.append(model.outputs[0].shape[1]) + if type(self.target_lengths[-1]) == tf.compat.v1.Dimension: + self.target_lengths[-1] = self.target_lengths[-1].value + self.target_crops.append( + (target_full_length - self.target_lengths[-1]) // 2 + ) + + if self.verbose: + print("model_strides", self.model_strides) + print("target_lengths", self.target_lengths) + print("target_crops", self.target_crops)
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/baskerville/snps.html b/_modules/baskerville/snps.html new file mode 100644 index 0000000..89f12ae --- /dev/null +++ b/_modules/baskerville/snps.html @@ -0,0 +1,811 @@ + + + + + + baskerville.snps — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for baskerville.snps

+import json
+import pdb
+import sys
+
+import h5py
+import numpy as np
+import pandas as pd
+import pysam
+from scipy.sparse import dok_matrix
+from scipy.special import rel_entr
+from tqdm import tqdm
+
+from baskerville import dna
+from baskerville import dataset
+from baskerville import seqnn
+from baskerville import vcf as bvcf
+
+
+
+[docs] +def score_snps(params_file, model_file, vcf_file, worker_index, options): + """ + Score SNPs in a VCF file with a SeqNN model. + + :param params_file: Model parameters + :param model_file: Saved model weights + :param vcf_file: VCF + :param worker_index + :param options: options from cmd args + :return: + """ + + ################################################################# + # read parameters and targets + + # read model parameters + with open(params_file) as params_open: + params = json.load(params_open) + params_model = params["model"] + + # read targets + targets_df = pd.read_csv(options.targets_file, sep="\t", index_col=0) + + # handle strand pairs + if "strand_pair" in targets_df.columns: + # prep strand + targets_strand_df = dataset.targets_prep_strand(targets_df) + + # set strand pairs (using new indexing) + orig_new_index = dict(zip(targets_df.index, np.arange(targets_df.shape[0]))) + targets_strand_pair = np.array( + [orig_new_index[ti] for ti in targets_df.strand_pair] + ) + params_model["strand_pair"] = [targets_strand_pair] + + # construct strand sum transform + strand_transform = make_strand_transform(targets_df, targets_strand_df) + else: + targets_strand_df = targets_df + strand_transform = None + + ################################################################# + # setup model + + # can we sum on GPU? + sum_length = options.snp_stats == "SAD" + + seqnn_model = seqnn.SeqNN(params_model) + seqnn_model.restore(model_file) + seqnn_model.build_slice(targets_df.index) + if sum_length: + seqnn_model.build_sad() + seqnn_model.build_ensemble(options.rc) + + # shift outside seqnn + num_shifts = len(options.shifts) + + targets_length = seqnn_model.target_lengths[0] + num_targets = seqnn_model.num_targets() + if options.targets_file is None: + target_ids = ["t%d" % ti for ti in range(num_targets)] + target_labels = [""] * len(target_ids) + targets_strand_df = pd.DataFrame( + {"identifier": target_ids, "description": target_labels} + ) + + ################################################################# + # load SNPs + + # clustering SNPs requires sorted VCF and no reference flips + snps_clustered = options.cluster_snps_pct > 0 + + # filter for worker SNPs + if options.processes is None: + start_i = None + end_i = None + else: + # determine boundaries + num_snps = bvcf.vcf_count(vcf_file) + worker_bounds = np.linspace(0, num_snps, options.processes + 1, dtype="int") + start_i = worker_bounds[worker_index] + end_i = worker_bounds[worker_index + 1] + + # read SNPs + snps = bvcf.vcf_snps( + vcf_file, + require_sorted=snps_clustered, + flip_ref=~snps_clustered, + validate_ref_fasta=options.genome_fasta, + start_i=start_i, + end_i=end_i, + ) + + # cluster SNPs + if snps_clustered: + snp_clusters = cluster_snps( + snps, params_model["seq_length"], options.cluster_snps_pct + ) + else: + snp_clusters = [] + for snp in snps: + snp_clusters.append(SNPCluster()) + snp_clusters[-1].add_snp(snp) + + # delimit sequence boundaries + [sc.delimit(params_model["seq_length"]) for sc in snp_clusters] + + # open genome FASTA + genome_open = pysam.Fastafile(options.genome_fasta) + + ################################################################# + # predict SNP scores, write output + + # setup output + scores_out = initialize_output_h5( + options.out_dir, + options.snp_stats, + snps, + targets_length, + targets_strand_df, + num_shifts, + ) + + # SNP index + si = 0 + + for sc in tqdm(snp_clusters): + snp_1hot_list = sc.get_1hots(genome_open) + ref_1hot = np.expand_dims(snp_1hot_list[0], axis=0) + + # predict reference + ref_preds = [] + for shift in options.shifts: + ref_1hot_shift = dna.hot1_augment(ref_1hot, shift=shift) + ref_preds_shift = seqnn_model(ref_1hot_shift)[0] + + # untransform predictions + if options.targets_file is not None: + if options.untransform_old: + ref_preds_shift = dataset.untransform_preds1( + ref_preds_shift, targets_df + ) + else: + ref_preds_shift = dataset.untransform_preds( + ref_preds_shift, targets_df + ) + + # sum strand pairs + if strand_transform is not None: + ref_preds_shift = ref_preds_shift * strand_transform + + # save shift prediction + ref_preds.append(ref_preds_shift) + ref_preds = np.array(ref_preds) + + ai = 0 + for alt_1hot in snp_1hot_list[1:]: + alt_1hot = np.expand_dims(alt_1hot, axis=0) + + # add compensation shifts for indels + indel_size = sc.snps[ai].indel_size() + if indel_size == 0: + alt_shifts = options.shifts + else: + # repeat reference predictions + ref_preds = np.repeat(ref_preds, 2, axis=0) + + # add compensation shifts + alt_shifts = [] + for shift in options.shifts: + alt_shifts.append(shift) + alt_shifts.append(shift - indel_size) + + # predict alternate + alt_preds = [] + for shift in alt_shifts: + alt_1hot_shift = dna.hot1_augment(alt_1hot, shift=shift) + alt_preds_shift = seqnn_model(alt_1hot_shift)[0] + + # untransform predictions + if options.targets_file is not None: + if options.untransform_old: + alt_preds_shift = dataset.untransform_preds1( + alt_preds_shift, targets_df + ) + else: + alt_preds_shift = dataset.untransform_preds( + alt_preds_shift, targets_df + ) + + # sum strand pairs + if strand_transform is not None: + alt_preds_shift = alt_preds_shift * strand_transform + + # save shift prediction + alt_preds.append(alt_preds_shift) + + # flip reference and alternate + if snps[si].flipped: + rp_snp = np.array(alt_preds) + ap_snp = np.array(ref_preds) + else: + rp_snp = np.array(ref_preds) + ap_snp = np.array(alt_preds) + + # write SNP + if sum_length: + write_snp(rp_snp, ap_snp, scores_out, si, options.snp_stats) + else: + write_snp_len(rp_snp, ap_snp, scores_out, si, options.snp_stats) + + # update SNP index + si += 1 + + # close genome + genome_open.close() + + # compute SAD distributions across variants + write_pct(scores_out, options.snp_stats) + scores_out.close()
+ + + +
+[docs] +def cluster_snps(snps, seq_len: int, center_pct: float): + """Cluster a sorted list of SNPs into regions that will satisfy + the required center_pct. + + Args: + snps [SNP]: List of SNPs. + seq_len (int): Sequence length. + center_pct (float): Percent of sequence length to cluster SNPs. + """ + valid_snp_distance = int(seq_len * center_pct) + + snp_clusters = [] + cluster_chr = None + + for snp in snps: + if snp.chr == cluster_chr and snp.pos < cluster_pos0 + valid_snp_distance: + # append to latest cluster + snp_clusters[-1].add_snp(snp) + else: + # initialize new cluster + snp_clusters.append(SNPCluster()) + snp_clusters[-1].add_snp(snp) + cluster_chr = snp.chr + cluster_pos0 = snp.pos + + return snp_clusters
+ + + +
+[docs] +def initialize_output_h5( + out_dir, snp_stats, snps, targets_length, targets_df, num_shifts +): + """Initialize an output HDF5 file for SAD stats. + + Args: + out_dir (str): Output directory. + snp_stats [str]: List of SAD stats to compute. + snps [SNP]: List of SNPs. + targets_length (int): Targets' sequence length + targets_df (pd.DataFrame): Targets DataFrame. + num_shifts (int): Number of shifts. + """ + + num_targets = targets_df.shape[0] + num_snps = len(snps) + + scores_out = h5py.File("%s/scores.h5" % out_dir, "w") + + # write SNPs + snp_ids = np.array([snp.rsid for snp in snps], "S") + scores_out.create_dataset("snp", data=snp_ids) + + # write SNP chr + snp_chr = np.array([snp.chr for snp in snps], "S") + scores_out.create_dataset("chr", data=snp_chr) + + # write SNP pos + snp_pos = np.array([snp.pos for snp in snps], dtype="uint32") + scores_out.create_dataset("pos", data=snp_pos) + + # write SNP reference allele + snp_refs = [] + snp_alts = [] + for snp in snps: + if snp.flipped: + snp_refs.append(snp.alt_alleles[0]) + snp_alts.append(snp.ref_allele) + else: + snp_refs.append(snp.ref_allele) + snp_alts.append(snp.alt_alleles[0]) + snp_refs = np.array(snp_refs, "S") + snp_alts = np.array(snp_alts, "S") + scores_out.create_dataset("ref_allele", data=snp_refs) + scores_out.create_dataset("alt_allele", data=snp_alts) + + # write targets + scores_out.create_dataset("target_ids", data=np.array(targets_df.identifier, "S")) + scores_out.create_dataset( + "target_labels", data=np.array(targets_df.description, "S") + ) + + # initialize SAD stats + for snp_stat in snp_stats: + if snp_stat in ["REF", "ALT"]: + scores_out.create_dataset( + snp_stat, + shape=(num_snps, num_shifts, targets_length, num_targets), + dtype="float16", + ) + else: + scores_out.create_dataset( + snp_stat, shape=(num_snps, num_targets), dtype="float16" + ) + + return scores_out
+ + + +
+[docs] +def make_alt_1hot(ref_1hot, snp_seq_pos, ref_allele, alt_allele): + """Return alternative allele one hot coding. + + Args: + ref_1hot (np.array): Reference allele one hot coding. + snp_seq_pos (int): SNP position in sequence. + ref_allele (str): Reference allele. + alt_allele (str): Alternative allele. + + Returns: + np.array: Alternative allele one hot coding. + """ + ref_n = len(ref_allele) + alt_n = len(alt_allele) + + # copy reference + alt_1hot = np.copy(ref_1hot) + + if alt_n == ref_n: + # SNP + dna.hot1_set(alt_1hot, snp_seq_pos, alt_allele) + + elif ref_n > alt_n: + # deletion + delete_len = ref_n - alt_n + if ref_allele[0] == alt_allele[0]: + dna.hot1_delete(alt_1hot, snp_seq_pos + 1, delete_len) + else: + print( + "WARNING: Deletion first nt does not match: %s %s" + % (ref_allele, alt_allele), + file=sys.stderr, + ) + + else: + # insertion + if ref_allele[0] == alt_allele[0]: + dna.hot1_insert(alt_1hot, snp_seq_pos + 1, alt_allele[1:]) + else: + print( + "WARNING: Insertion first nt does not match: %s %s" + % (ref_allele, alt_allele), + file=sys.stderr, + ) + + return alt_1hot
+ + + +
+[docs] +def make_strand_transform(targets_df, targets_strand_df): + """Make a sparse matrix to sum strand pairs. + + Args: + targets_df (pd.DataFrame): Targets DataFrame. + targets_strand_df (pd.DataFrame): Targets DataFrame, with strand pairs collapsed. + + Returns: + scipy.sparse.csr_matrix: Sparse matrix to sum strand pairs. + """ + + # initialize sparse matrix + strand_transform = dok_matrix((targets_df.shape[0], targets_strand_df.shape[0])) + + # fill in matrix + ti = 0 + sti = 0 + for _, target in targets_df.iterrows(): + strand_transform[ti, sti] = True + if target.strand_pair == target.name: + sti += 1 + else: + if target.identifier[-1] == "-": + sti += 1 + ti += 1 + strand_transform = strand_transform.tocsr() + + return strand_transform
+ + + +
+[docs] +def write_pct(scores_out, snp_stats): + """Compute percentile values for each target and write to HDF5. + + Args: + scores_out (h5py.File): Output HDF5 file. + snp_stats [str]: List of SAD stats to compute. + """ + # define percentiles + d_fine = 0.001 + d_coarse = 0.01 + percentiles_neg = np.arange(d_fine, 0.1, d_fine) + percentiles_base = np.arange(0.1, 0.9, d_coarse) + percentiles_pos = np.arange(0.9, 1, d_fine) + + percentiles = np.concatenate([percentiles_neg, percentiles_base, percentiles_pos]) + scores_out.create_dataset("percentiles", data=percentiles) + + for snp_stat in snp_stats: + if snp_stat not in ["REF", "ALT"]: + snp_stat_pct = "%s_pct" % snp_stat + + # compute + sad_pct = np.percentile(scores_out[snp_stat], 100 * percentiles, axis=0).T + sad_pct = sad_pct.astype("float16") + + # save + scores_out.create_dataset(snp_stat_pct, data=sad_pct, dtype="float16")
+ + + +
+[docs] +def write_snp(ref_preds_sum, alt_preds_sum, scores_out, si, snp_stats): + """Write SNP predictions to HDF, assuming the length dimension has + been collapsed. + + Args: + ref_preds_sum (np.array): Reference allele predictions. + alt_preds_sum (np.array): Alternative allele predictions. + scores_out (h5py.File): Output HDF5 file. + si (int): SNP index. + snp_stats [str]: List of SAD stats to compute. + """ + + # compare reference to alternative via mean subtraction + if "SAD" in snp_stats: + sad = alt_preds_sum - ref_preds_sum + sad = sad.mean(axis=0) + scores_out["SAD"][si] = sad.astype("float16")
+ + + +
+[docs] +def write_snp_len(ref_preds, alt_preds, scores_out, si, snp_stats): + """Write SNP predictions to HDF, assuming the length dimension has + been maintained. + + Args: + ref_preds (np.array): Reference allele predictions. + alt_preds (np.array): Alternative allele predictions. + scores_out (h5py.File): Output HDF5 file. + si (int): SNP index. + snp_stats [str]: List of SAD stats to compute. + """ + num_shifts, seq_length, num_targets = ref_preds.shape + + # log/sqrt + ref_preds_log = np.log2(ref_preds + 1) + alt_preds_log = np.log2(alt_preds + 1) + ref_preds_sqrt = np.sqrt(ref_preds) + alt_preds_sqrt = np.sqrt(alt_preds) + + # sum across length + ref_preds_sum = ref_preds.sum(axis=(0, 1)) + alt_preds_sum = alt_preds.sum(axis=(0, 1)) + ref_preds_log_sum = ref_preds_log.sum(axis=(0, 1)) + alt_preds_log_sum = alt_preds_log.sum(axis=(0, 1)) + ref_preds_sqrt_sum = ref_preds_sqrt.sum(axis=(0, 1)) + alt_preds_sqrt_sum = alt_preds_sqrt.sum(axis=(0, 1)) + + # difference + altref_diff = alt_preds - ref_preds + altref_adiff = np.abs(altref_diff) + altref_log_diff = alt_preds_log - ref_preds_log + altref_log_adiff = np.abs(altref_log_diff) + altref_sqrt_diff = alt_preds_sqrt - ref_preds_sqrt + altref_sqrt_adiff = np.abs(altref_sqrt_diff) + + # compare reference to alternative via sum subtraction + if "SAD" in snp_stats: + sad = alt_preds_sum - ref_preds_sum + sad = np.clip(sad, np.finfo(np.float16).min, np.finfo(np.float16).max) + scores_out["SAD"][si] = sad.astype("float16") + if "logSAD" in snp_stats: + log_sad = alt_preds_log_sum - ref_preds_log_sum + log_sad = np.clip(log_sad, np.finfo(np.float16).min, np.finfo(np.float16).max) + scores_out["logSAD"][si] = log_sad.astype("float16") + if "sqrtSAD" in snp_stats: + sqrt_sad = alt_preds_sqrt_sum - ref_preds_sqrt_sum + sqrt_sad = np.clip(sqrt_sad, np.finfo(np.float16).min, np.finfo(np.float16).max) + scores_out["sqrtSAD"][si] = sqrt_sad.astype("float16") + + # compare reference to alternative via max subtraction + if "SAX" in snp_stats: + sax = [] + for s in range(num_shifts): + max_i = np.argmax(altref_adiff[s], axis=0) + sax.append(altref_diff[s, max_i, np.arange(num_targets)]) + sax = np.array(sax).mean(axis=0) + scores_out["SAX"][si] = sax.astype("float16") + + # L1 norm of difference vector + if "D1" in snp_stats: + sad_d1 = altref_adiff.sum(axis=1) + sad_d1 = np.clip(sad_d1, np.finfo(np.float16).min, np.finfo(np.float16).max) + sad_d1 = sad_d1.mean(axis=0) + scores_out["D1"][si] = sad_d1.mean().astype("float16") + if "logD1" in snp_stats: + log_d1 = altref_log_adiff.sum(axis=1) + log_d1 = np.clip(log_d1, np.finfo(np.float16).min, np.finfo(np.float16).max) + log_d1 = log_d1.mean(axis=0) + scores_out["logD1"][si] = log_d1.astype("float16") + if "sqrtD1" in snp_stats: + sqrt_d1 = altref_sqrt_adiff.sum(axis=1) + sqrt_d1 = np.clip(sqrt_d1, np.finfo(np.float16).min, np.finfo(np.float16).max) + sqrt_d1 = sqrt_d1.mean(axis=0) + scores_out["sqrtD1"][si] = sqrt_d1.astype("float16") + + # L2 norm of difference vector + if "D2" in snp_stats: + altref_diff2 = np.power(altref_diff, 2) + sad_d2 = np.sqrt(altref_diff2.sum(axis=1)) + sad_d2 = np.clip(sad_d2, np.finfo(np.float16).min, np.finfo(np.float16).max) + sad_d2 = sad_d2.mean(axis=0) + scores_out["D2"][si] = sad_d2.astype("float16") + if "logD2" in snp_stats: + altref_log_diff2 = np.power(altref_log_diff, 2) + log_d2 = np.sqrt(altref_log_diff2.sum(axis=1)) + log_d2 = np.clip(log_d2, np.finfo(np.float16).min, np.finfo(np.float16).max) + log_d2 = log_d2.mean(axis=0) + scores_out["logD2"][si] = log_d2.astype("float16") + if "sqrtD2" in snp_stats: + altref_sqrt_diff2 = np.power(altref_sqrt_diff, 2) + sqrt_d2 = np.sqrt(altref_sqrt_diff2.sum(axis=1)) + sqrt_d2 = np.clip(sqrt_d2, np.finfo(np.float16).min, np.finfo(np.float16).max) + sqrt_d2 = sqrt_d2.mean(axis=0) + scores_out["sqrtD2"][si] = sqrt_d2.astype("float16") + + if "JS" in snp_stats: + # normalized scores + pseudocounts = np.percentile(ref_preds, 25, axis=1) + ref_preds_norm = ref_preds + pseudocounts + ref_preds_norm /= ref_preds_norm.sum(axis=1) + alt_preds_norm = alt_preds + pseudocounts + alt_preds_norm /= alt_preds_norm.sum(axis=1) + + # compare normalized JS + js_dist = [] + for s in range(num_shifts): + ref_alt_entr = rel_entr(ref_preds_norm[s], alt_preds_norm[s]).sum(axis=0) + alt_ref_entr = rel_entr(alt_preds_norm[s], ref_preds_norm[s]).sum(axis=0) + js_dist.append((ref_alt_entr + alt_ref_entr) / 2) + js_dist = np.mean(js_dist, axis=0) + scores_out["JS"][si] = js_dist.astype("float16") + if "logJS" in snp_stats: + # normalized scores + pseudocounts = np.percentile(ref_preds_log, 25, axis=0) + ref_preds_log_norm = ref_preds_log + pseudocounts + ref_preds_log_norm /= ref_preds_log_norm.sum(axis=0) + alt_preds_log_norm = alt_preds_log + pseudocounts + alt_preds_log_norm /= alt_preds_log_norm.sum(axis=0) + + # compare normalized JS + log_js_dist = [] + for s in range(num_shifts): + ref_alt_entr = rel_entr(ref_preds_log_norm[s], alt_preds_log_norm[s]).sum( + axis=0 + ) + alt_ref_entr = rel_entr(alt_preds_log_norm[s], ref_preds_log_norm[s]).sum( + axis=0 + ) + log_js_dist.append((ref_alt_entr + alt_ref_entr) / 2) + log_js_dist = np.mean(log_js_dist, axis=0) + scores_out["logJS"][si] = log_js_dist.astype("float16") + + # predictions + if "REF" in snp_stats: + ref_preds = np.clip( + ref_preds, np.finfo(np.float16).min, np.finfo(np.float16).max + ) + scores_out["REF"][si] = ref_preds.astype("float16") + if "ALT" in snp_stats: + alt_preds = np.clip( + alt_preds, np.finfo(np.float16).min, np.finfo(np.float16).max + ) + scores_out["ALT"][si] = alt_preds.astype("float16")
+ + + +
+[docs] +class SNPCluster: + def __init__(self): + self.snps = [] + self.chr = None + self.start = None + self.end = None + +
+[docs] + def add_snp(self, snp): + """Add SNP to cluster.""" + self.snps.append(snp)
+ + +
+[docs] + def delimit(self, seq_len): + """Delimit sequence boundaries.""" + positions = [snp.pos for snp in self.snps] + pos_min = np.min(positions) + pos_max = np.max(positions) + pos_mid = (pos_min + pos_max) // 2 + + self.chr = self.snps[0].chr + self.start = pos_mid - seq_len // 2 + self.end = self.start + seq_len + + for snp in self.snps: + snp.seq_pos = snp.pos - 1 - self.start
+ + +
+[docs] + def get_1hots(self, genome_open): + """Get list of one hot coded sequences.""" + seqs1_list = [] + + # extract reference + if self.start < 0: + ref_seq = ( + "N" * (-self.start) + genome_open.fetch(self.chr, 0, self.end).upper() + ) + else: + ref_seq = genome_open.fetch(self.chr, self.start, self.end).upper() + + # extend to full length + if len(ref_seq) < self.end - self.start: + ref_seq += "N" * (self.end - self.start - len(ref_seq)) + + # verify reference alleles + for snp in self.snps: + ref_n = len(snp.ref_allele) + ref_snp = ref_seq[snp.seq_pos : snp.seq_pos + ref_n] + if snp.ref_allele != ref_snp: + print( + "ERROR: %s does not match reference %s" % (snp, ref_snp), + file=sys.stderr, + ) + exit(1) + + # 1 hot code reference sequence + ref_1hot = dna.dna_1hot(ref_seq) + seqs1_list = [ref_1hot] + + # make alternative 1 hot coded sequences + # (assuming SNP is 1-based indexed) + for snp in self.snps: + alt_1hot = make_alt_1hot( + ref_1hot, snp.seq_pos, snp.ref_allele, snp.alt_alleles[0] + ) + seqs1_list.append(alt_1hot) + + return seqs1_list
+
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/baskerville/trainer.html b/_modules/baskerville/trainer.html new file mode 100644 index 0000000..ebeb530 --- /dev/null +++ b/_modules/baskerville/trainer.html @@ -0,0 +1,1120 @@ + + + + + + baskerville.trainer — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for baskerville.trainer

+# Copyright 2023 Calico LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =========================================================================
+import time
+import pdb
+
+import numpy as np
+import tensorflow as tf
+
+from baskerville import metrics
+
+
+
+[docs] +def parse_loss( + loss_label, + strategy=None, + keras_fit: bool = True, + spec_weight: float = 1, + total_weight: float = 1, +): + """Parse loss function from label, strategy, and fitting method. + + Args: + loss_label (str): Loss function label. + strategy: tf.distribute.Strategy object. + keras_fit (bool): Use Keras fit method instead of custom loop. + spec_weight (float): Specificity weight for PoissonKL. + total_weight (float): Total weight for PoissionMultinomial. + + Returns: + loss_fn: tf.keras.losses.Loss object. + """ + if strategy is not None and not keras_fit: + if loss_label == "mse": + loss_fn = tf.keras.losses.MeanSquaredError( + reduction=tf.keras.losses.Reduction.NONE + ) + elif loss_label == "bce": + loss_fn = tf.keras.losses.BinaryCrossentropy( + reduction=tf.keras.losses.Reduction.NONE + ) + elif loss_label == "poisson_mn": + loss_fn = metrics.PoissonMultinomial( + total_weight, reduction=tf.keras.losses.Reduction.NONE + ) + else: + loss_fn = tf.keras.losses.Poisson(reduction=tf.keras.losses.Reduction.NONE) + else: + if loss_label == "mse": + loss_fn = tf.keras.losses.MeanSquaredError() + elif loss_label == "mse_udot": + loss_fn = metrics.MeanSquaredErrorUDot(spec_weight) + elif loss_label == "bce": + loss_fn = tf.keras.losses.BinaryCrossentropy() + elif loss_label == "poisson_kl": + loss_fn = metrics.PoissonKL(spec_weight) + elif loss_label == "poisson_mn": + loss_fn = metrics.PoissonMultinomial(total_weight) + else: + loss_fn = tf.keras.losses.Poisson() + + return loss_fn
+ + + +
+[docs] +class Trainer: + """Model training class. + + Args: + params (dict): Training parameters dictionary. + train_data: Dataset object or list of Dataset objects. + eval_data: Dataset object or list of Dataset objects. + out_dir (str): Output directory name. + strategy: tf.distribute.Strategy object. + num_gpu (int): Number of GPUs to use. Default: 1. + keras_fit (bool): Use Keras fit method instead of custom loop. + """ + + def __init__( + self, + params: dict, + train_data, + eval_data, + out_dir: str, + strategy=None, + num_gpu: int = 1, + keras_fit: bool = False, + ): + self.params = params + self.train_data = train_data + if type(self.train_data) is not list: + self.train_data = [self.train_data] + self.eval_data = eval_data + if type(self.eval_data) is not list: + self.eval_data = [self.eval_data] + self.out_dir = out_dir + self.strategy = strategy + self.num_gpu = num_gpu + self.batch_size = self.train_data[0].batch_size + self.compiled = False + + # early stopping + self.patience = self.params.get("patience", 20) + + # compute batches/epoch + self.train_epoch_batches = [td.batches_per_epoch() for td in self.train_data] + self.eval_epoch_batches = [ed.batches_per_epoch() for ed in self.eval_data] + self.train_epochs_min = self.params.get("train_epochs_min", 1) + self.train_epochs_max = self.params.get("train_epochs_max", 10000) + + # dataset + self.num_datasets = len(self.train_data) + self.dataset_indexes = [] + for di in range(self.num_datasets): + self.dataset_indexes += [di] * self.train_epoch_batches[di] + self.dataset_indexes = np.array(self.dataset_indexes) + + # loss + self.spec_weight = self.params.get("spec_weight", 1) + self.total_weight = self.params.get("total_weight", 1) + self.loss = self.params.get("loss", "poisson").lower() + self.loss_fn = parse_loss( + self.loss, self.strategy, keras_fit, self.spec_weight, self.total_weight + ) + + # optimizer + self.make_optimizer() + +
+[docs] + def compile(self, seqnn_model): + for model in seqnn_model.models: + if self.loss == "bce": + model_metrics = [ + metrics.SeqAUC(curve="ROC"), + metrics.SeqAUC(curve="PR"), + ] + else: + num_targets = model.output_shape[-1] + model_metrics = [metrics.PearsonR(num_targets), metrics.R2(num_targets)] + + model.compile( + loss=self.loss_fn, optimizer=self.optimizer, metrics=model_metrics + ) + self.compiled = True
+ + +
+[docs] + def fit_keras(self, seqnn_model): + if not self.compiled: + self.compile(seqnn_model) + + if self.loss == "bce": + early_stop = EarlyStoppingMin( + monitor="val_loss", + mode="min", + verbose=1, + patience=self.patience, + min_epoch=self.train_epochs_min, + ) + save_best = tf.keras.callbacks.ModelCheckpoint( + "%s/model_best.h5" % self.out_dir, + save_best_only=True, + mode="min", + monitor="val_loss", + verbose=1, + ) + else: + early_stop = EarlyStoppingMin( + monitor="val_pearsonr", + mode="max", + verbose=1, + patience=self.patience, + min_epoch=self.train_epochs_min, + ) + save_best = tf.keras.callbacks.ModelCheckpoint( + "%s/model_best.h5" % self.out_dir, + save_best_only=True, + mode="max", + monitor="val_pearsonr", + verbose=1, + ) + + callbacks = [ + early_stop, + tf.keras.callbacks.TensorBoard(self.out_dir), + tf.keras.callbacks.ModelCheckpoint("%s/model_check.h5" % self.out_dir), + save_best, + ] + + seqnn_model.model.fit( + self.train_data[0].dataset, + epochs=self.train_epochs_max, + steps_per_epoch=self.train_epoch_batches[0], + callbacks=callbacks, + validation_data=self.eval_data[0].dataset, + validation_steps=self.eval_epoch_batches[0], + )
+ + +
+[docs] + def fit2(self, seqnn_model): + """Train the model using a custom loop for two separate datasets.""" + if not self.compiled: + self.compile(seqnn_model) + + assert len(seqnn_model.models) >= self.num_datasets + + # inform optimizer about all trainable variables (v2.11-) + vars_set = set() + trainable_vars = [] + for di in range(self.num_datasets): + for v in seqnn_model.models[di].trainable_variables: + if v.name not in vars_set: + vars_set.add(v.name) + trainable_vars.append(v) + try: + self.optimizer.build(trainable_vars) + except AttributeError: + pass + + ################################################################ + # prep + + # metrics + train_loss, train_r, train_r2 = [], [], [] + valid_loss, valid_r, valid_r2 = [], [], [] + for di in range(self.num_datasets): + num_targets = seqnn_model.models[di].output_shape[-1] + train_loss.append(tf.keras.metrics.Mean(name="train%d_loss" % di)) + train_r.append(metrics.PearsonR(num_targets, name="train%d_r" % di)) + train_r2.append(metrics.R2(num_targets, name="train%d_r2" % di)) + valid_loss.append(tf.keras.metrics.Mean(name="valid%d_loss" % di)) + valid_r.append(metrics.PearsonR(num_targets, name="valid%d_r" % di)) + valid_r2.append(metrics.R2(num_targets, name="valid%d_r2" % di)) + + if self.strategy is None: + # generate decorated train steps + @tf.function + def train_step0(x, y): + with tf.GradientTape() as tape: + pred = seqnn_model.models[0](x, training=True) + loss = self.loss_fn(y, pred) + sum(seqnn_model.models[0].losses) + train_loss[0](loss) + train_r[0](y, pred) + train_r2[0](y, pred) + gradients = tape.gradient( + loss, seqnn_model.models[0].trainable_variables + ) + self.optimizer.apply_gradients( + zip(gradients, seqnn_model.models[0].trainable_variables) + ) + + @tf.function + def eval_step0(x, y): + pred = seqnn_model.models[0](x, training=False) + loss = self.loss_fn(y, pred) + sum(seqnn_model.models[0].losses) + valid_loss[0](loss) + valid_r[0](y, pred) + valid_r2[0](y, pred) + + if self.num_datasets > 1: + + @tf.function + def train_step1(x, y): + with tf.GradientTape() as tape: + pred = seqnn_model.models[1](x, training=True) + loss = self.loss_fn(y, pred) + sum(seqnn_model.models[1].losses) + train_loss[1](loss) + train_r[1](y, pred) + train_r2[1](y, pred) + gradients = tape.gradient( + loss, seqnn_model.models[1].trainable_variables + ) + self.optimizer.apply_gradients( + zip(gradients, seqnn_model.models[1].trainable_variables) + ) + + @tf.function + def eval_step1(x, y): + pred = seqnn_model.models[1](x, training=False) + loss = self.loss_fn(y, pred) + sum(seqnn_model.models[1].losses) + valid_loss[1](loss) + valid_r[1](y, pred) + valid_r2[1](y, pred) + + else: + + def train_step0(x, y): + with tf.GradientTape() as tape: + pred = seqnn_model.models[0](x, training=True) + loss_batch_len = self.loss_fn(y, pred) + loss_batch = tf.reduce_mean(loss_batch_len, axis=-1) + loss = tf.reduce_sum(loss_batch) / self.batch_size + loss += sum(seqnn_model.models[0].losses) / self.num_gpu + train_r[0](y, pred) + train_r2[0](y, pred) + gradients = tape.gradient( + loss, seqnn_model.models[0].trainable_variables + ) + self.optimizer.apply_gradients( + zip(gradients, seqnn_model.models[0].trainable_variables) + ) + return loss + + @tf.function + def train_step0_distr(xd, yd): + replica_losses = self.strategy.run(train_step0, args=(xd, yd)) + loss = self.strategy.reduce( + tf.distribute.ReduceOp.SUM, replica_losses, axis=None + ) + train_loss[0](loss) + + def eval_step0(x, y): + pred = seqnn_model.models[0](x, training=False) + loss = self.loss_fn(y, pred) + sum(seqnn_model.models[0].losses) + valid_loss[0](loss) + valid_r[0](y, pred) + valid_r2[0](y, pred) + + @tf.function + def eval_step0_distr(xd, yd): + return self.strategy.run(eval_step0, args=(xd, yd)) + + if self.num_datasets > 1: + + def train_step1(x, y): + with tf.GradientTape() as tape: + pred = seqnn_model.models[1](x, training=True) + loss_batch_len = self.loss_fn(y, pred) + loss_batch = tf.reduce_mean(loss_batch_len, axis=-1) + loss = tf.reduce_sum(loss_batch) / self.batch_size + loss += sum(seqnn_model.models[1].losses) / self.num_gpu + train_loss[1](loss) + train_r[1](y, pred) + train_r2[1](y, pred) + gradients = tape.gradient( + loss, seqnn_model.models[1].trainable_variables + ) + self.optimizer.apply_gradients( + zip(gradients, seqnn_model.models[1].trainable_variables) + ) + return loss + + @tf.function + def train_step1_distr(xd, yd): + replica_losses = self.strategy.run(train_step1, args=(xd, yd)) + loss = self.strategy.reduce( + tf.distribute.ReduceOp.SUM, replica_losses, axis=None + ) + train_loss[1](loss) + + def eval_step1(x, y): + pred = seqnn_model.models[1](x, training=False) + loss = self.loss_fn(y, pred) + sum(seqnn_model.models[1].losses) + valid_loss[1](loss) + valid_r[1](y, pred) + valid_r2[1](y, pred) + + @tf.function + def eval_step1_distr(xd, yd): + return self.strategy.run(eval_step1, args=(xd, yd)) + + # checkpoint manager + managers = [] + for di in range(self.num_datasets): + ckpt = tf.train.Checkpoint( + model=seqnn_model.models[di], optimizer=self.optimizer + ) + ckpt_dir = "%s/model%d" % (self.out_dir, di) + manager = tf.train.CheckpointManager(ckpt, ckpt_dir, max_to_keep=1) + if manager.latest_checkpoint: + ckpt.restore(manager.latest_checkpoint) + ckpt_end = 5 + manager.latest_checkpoint.find("ckpt-") + epoch_start = int(manager.latest_checkpoint[ckpt_end:]) + if self.strategy is None: + opt_iters = self.optimizer.iterations + else: + opt_iters = self.optimizer.iterations.values[0] + print( + "Checkpoint restored at epoch %d, optimizer iteration %d." + % (epoch_start, opt_iters) + ) + else: + print("No checkpoints found.") + epoch_start = 0 + managers.append(manager) + + # improvement variables + valid_best = [-np.inf] * self.num_datasets + unimproved = [0] * self.num_datasets + + ################################################################ + # training loop + + first_step = True + for ei in range(epoch_start, self.train_epochs_max): + if ei >= self.train_epochs_min and np.min(unimproved) > self.patience: + break + else: + # shuffle datasets + np.random.shuffle(self.dataset_indexes) + + # get iterators + train_data_iters = [iter(td.dataset) for td in self.train_data] + + # train + t0 = time.time() + for di in self.dataset_indexes: + x, y = safe_next(train_data_iters[di]) + if self.strategy is None: + if di == 0: + train_step0(x, y) + else: + train_step1(x, y) + else: + if di == 0: + train_step0_distr(x, y) + else: + train_step1_distr(x, y) + if first_step: + print("Successful first step!", flush=True) + first_step = False + + print("Epoch %d - %ds" % (ei, (time.time() - t0))) + for di in range(self.num_datasets): + print(" Data %d" % di, end="") + model = seqnn_model.models[di] + + # print training accuracy + print( + " - train_loss: %.4f" % train_loss[di].result().numpy(), end="" + ) + print(" - train_r: %.4f" % train_r[di].result().numpy(), end="") + print(" - train_r: %.4f" % train_r2[di].result().numpy(), end="") + + # evaluate + for x, y in self.eval_data[di].dataset: + if self.strategy is None: + if di == 0: + eval_step0(x, y) + else: + eval_step1(x, y) + else: + if di == 0: + eval_step0_distr(x, y) + else: + eval_step1_distr(x, y) + + # print validation accuracy + print( + " - valid_loss: %.4f" % valid_loss[di].result().numpy(), end="" + ) + print(" - valid_r: %.4f" % valid_r[di].result().numpy(), end="") + print(" - valid_r2: %.4f" % valid_r2[di].result().numpy(), end="") + early_stop_stat = valid_r[di].result().numpy() + + # checkpoint + managers[di].save() + model.save( + "%s/model%d_check.h5" % (self.out_dir, di), + include_optimizer=False, + ) + + # check best + if early_stop_stat > valid_best[di]: + print(" - best!", end="") + unimproved[di] = 0 + valid_best[di] = early_stop_stat + model.save( + "%s/model%d_best.h5" % (self.out_dir, di), + include_optimizer=False, + ) + else: + unimproved[di] += 1 + print("", flush=True) + + # reset metrics + train_loss[di].reset_states() + train_r[di].reset_states() + train_r2[di].reset_states() + valid_loss[di].reset_states() + valid_r[di].reset_states() + valid_r2[di].reset_states()
+ + +
+[docs] + def fit_tape(self, seqnn_model): + """Train the model using a custom tf.GradientTape loop.""" + if not self.compiled: + self.compile(seqnn_model) + model = seqnn_model.model + + # metrics + num_targets = model.output_shape[-1] + train_loss = tf.keras.metrics.Mean(name="train_loss") + train_r = metrics.PearsonR(num_targets, name="train_r") + train_r2 = metrics.R2(num_targets, name="train_r2") + valid_loss = tf.keras.metrics.Mean(name="valid_loss") + valid_r = metrics.PearsonR(num_targets, name="valid_r") + valid_r2 = metrics.R2(num_targets, name="valid_r2") + + if self.strategy is None: + + @tf.function + def train_step(x, y): + with tf.GradientTape() as tape: + pred = model(x, training=True) + loss = self.loss_fn(y, pred) + sum(model.losses) + train_loss(loss) + train_r(y, pred) + train_r2(y, pred) + gradients = tape.gradient(loss, model.trainable_variables) + if self.agc_clip is not None: + gradients = adaptive_clip_grad( + model.trainable_variables, gradients, self.agc_clip + ) + self.optimizer.apply_gradients( + zip(gradients, model.trainable_variables) + ) + + @tf.function + def eval_step(x, y): + pred = model(x, training=False) + loss = self.loss_fn(y, pred) + sum(model.losses) + valid_loss(loss) + valid_r(y, pred) + valid_r2(y, pred) + + else: + + def train_step(x, y): + with tf.GradientTape() as tape: + pred = model(x, training=True) + loss_batch_len = self.loss_fn(y, pred) + loss_batch = tf.reduce_mean(loss_batch_len, axis=-1) + loss = tf.reduce_sum(loss_batch) / self.batch_size + loss += sum(model.losses) / self.num_gpu + train_r(y, pred) + train_r2(y, pred) + gradients = tape.gradient(loss, model.trainable_variables) + self.optimizer.apply_gradients( + zip(gradients, model.trainable_variables) + ) + return loss + + @tf.function + def train_step_distr(xd, yd): + replica_losses = self.strategy.run(train_step, args=(xd, yd)) + loss = self.strategy.reduce( + tf.distribute.ReduceOp.SUM, replica_losses, axis=None + ) + train_loss(loss) + + def eval_step(x, y): + pred = model(x, training=False) + loss = self.loss_fn(y, pred) + sum(model.losses) + valid_loss(loss) + valid_r(y, pred) + valid_r2(y, pred) + + @tf.function + def eval_step_distr(xd, yd): + return self.strategy.run(eval_step, args=(xd, yd)) + + # checkpoint manager + ckpt = tf.train.Checkpoint(model=seqnn_model.model, optimizer=self.optimizer) + manager = tf.train.CheckpointManager(ckpt, self.out_dir, max_to_keep=1) + if manager.latest_checkpoint: + ckpt.restore(manager.latest_checkpoint) + ckpt_end = 5 + manager.latest_checkpoint.find("ckpt-") + epoch_start = int(manager.latest_checkpoint[ckpt_end:]) + if self.strategy is None: + opt_iters = self.optimizer.iterations + else: + opt_iters = self.optimizer.iterations.values[0] + print( + "Checkpoint restored at epoch %d, optimizer iteration %d." + % (epoch_start, opt_iters) + ) + else: + print("No checkpoints found.") + epoch_start = 0 + + # improvement variables + valid_best = -np.inf + unimproved = 0 + + # training loop + for ei in range(epoch_start, self.train_epochs_max): + if ei >= self.train_epochs_min and unimproved > self.patience: + break + else: + # train + t0 = time.time() + train_iter = iter(self.train_data[0].dataset) + for si in range(self.train_epoch_batches[0]): + x, y = safe_next(train_iter) + if self.strategy is not None: + train_step_distr(x, y) + else: + train_step(x, y) + if ei == epoch_start and si == 0: + print("Successful first step!", flush=True) + + # evaluate + for x, y in self.eval_data[0].dataset: + if self.strategy is not None: + eval_step_distr(x, y) + else: + eval_step(x, y) + + # print training accuracy + train_loss_epoch = train_loss.result().numpy() + train_r_epoch = train_r.result().numpy() + train_r2_epoch = train_r2.result().numpy() + print( + "Epoch %d - %ds - train_loss: %.4f - train_r: %.4f - train_r2: %.4f" + % ( + ei, + (time.time() - t0), + train_loss_epoch, + train_r_epoch, + train_r2_epoch, + ), + end="", + ) + + # print validation accuracy + valid_loss_epoch = valid_loss.result().numpy() + valid_r_epoch = valid_r.result().numpy() + valid_r2_epoch = valid_r2.result().numpy() + print( + " - valid_loss: %.4f - valid_r: %.4f - valid_r2: %.4f" + % (valid_loss_epoch, valid_r_epoch, valid_r2_epoch), + end="", + ) + + # checkpoint + manager.save() + seqnn_model.save("%s/model_check.h5" % self.out_dir) + + # check best + valid_best_epoch = valid_r_epoch + valid_r2_epoch / 4 + if valid_best_epoch > valid_best: + print(" - best!", end="") + unimproved = 0 + valid_best = valid_best_epoch + seqnn_model.save("%s/model_best.h5" % self.out_dir) + else: + unimproved += 1 + print("", flush=True) + + # reset metrics + train_loss.reset_states() + train_r.reset_states() + train_r2.reset_states() + valid_loss.reset_states() + valid_r.reset_states() + valid_r2.reset_states()
+ + +
+[docs] + def make_optimizer(self): + """Make optimizer object from given parameters.""" + cyclical1 = True + for lrs_param in [ + "initial_learning_rate", + "maximal_learning_rate", + "final_learning_rate", + "train_epochs_cycle1", + ]: + cyclical1 = cyclical1 & (lrs_param in self.params) + if cyclical1: + step_size = self.params["train_epochs_cycle1"] * sum( + self.train_epoch_batches + ) + initial_learning_rate = self.params.get("initial_learning_rate") + lr_schedule = Cyclical1LearningRate( + initial_learning_rate=self.params["initial_learning_rate"], + maximal_learning_rate=self.params["maximal_learning_rate"], + final_learning_rate=self.params["final_learning_rate"], + step_size=step_size, + ) + else: + # schedule (currently OFF) + initial_learning_rate = self.params.get("learning_rate", 0.01) + if False: + lr_schedule = keras.optimizers.schedules.ExponentialDecay( + initial_learning_rate, + decay_steps=self.params.get("decay_steps", 100000), + decay_rate=self.params.get("decay_rate", 0.96), + staircase=True, + ) + else: + lr_schedule = initial_learning_rate + + if "warmup_steps" in self.params: + lr_schedule = WarmUp( + initial_learning_rate=initial_learning_rate, + warmup_steps=self.params["warmup_steps"], + decay_schedule=lr_schedule, + ) + + global_clipnorm = self.params.get("global_clipnorm", None) + if "clip_norm" in self.params: + clip_norm = self.params["clip_norm"] + elif "clipnorm" in self.params: + clip_norm = self.params["clipnorm"] + else: + clip_norm = None + + # adaptive gradient clipping handled in fit method + self.agc_clip = self.params.get("agc_clip", None) + + # optimizer + optimizer_type = self.params.get("optimizer", "sgd").lower() + if optimizer_type == "adam": + self.optimizer = tf.keras.optimizers.Adam( + learning_rate=lr_schedule, + beta_1=self.params.get("adam_beta1", 0.9), + beta_2=self.params.get("adam_beta2", 0.999), + clipnorm=clip_norm, + global_clipnorm=global_clipnorm, + amsgrad=False, + ) # reduces performance in my experience + + elif optimizer_type == "adamw": + self.optimizer = tf.keras.optimizers.AdamW( + weight_decay=self.params.get("weight_decay", 0), + learning_rate=lr_schedule, + beta_1=self.params.get("adam_beta1", 0.9), + beta_2=self.params.get("adam_beta2", 0.999), + clipnorm=clip_norm, + global_clipnorm=global_clipnorm, + amsgrad=False, + ) # reduces performance in my experience + + elif optimizer_type in ["sgd", "momentum"]: + self.optimizer = tf.keras.optimizers.SGD( + learning_rate=lr_schedule, + momentum=self.params.get("momentum", 0.99), + clipnorm=clip_norm, + global_clipnorm=global_clipnorm, + ) + + else: + print("Cannot recognize optimization algorithm %s" % optimizer_type) + exit(1)
+
+ + + +################################################################ +# AGC +# https://github.com/sayakpaul/Adaptive-Gradient-Clipping + + +
+[docs] +def compute_norm(x, axis, keepdims): + """Compute L2 norm of a tensor across an axis.""" + return tf.math.reduce_sum(x**2, axis=axis, keepdims=keepdims) ** 0.5
+ + + +
+[docs] +def unitwise_norm(x): + """Compute L2 norm of a tensor across its last dimension.""" + if len(x.get_shape()) <= 1: # Scalars and vectors + axis = None + keepdims = False + elif len(x.get_shape()) in [2, 3]: # Linear layers of shape IO or multihead linear + axis = 0 + keepdims = True + elif len(x.get_shape()) == 4: # Conv kernels of shape HWIO + axis = [ + 0, + 1, + 2, + ] + keepdims = True + else: + raise ValueError(f"Got a parameter with shape not in [1, 2, 4]! {x}") + return compute_norm(x, axis, keepdims)
+ + + +
+[docs] +def adaptive_clip_grad( + parameters, gradients, clip_factor: float = 0.1, eps: float = 1e-3 +): + """Adaptive gradient clipping.""" + new_grads = [] + for params, grads in zip(parameters, gradients): + p_norm = unitwise_norm(params) + max_norm = tf.math.maximum(p_norm, eps) * clip_factor + grad_norm = unitwise_norm(grads) + clipped_grad = grads * (max_norm / tf.math.maximum(grad_norm, 1e-6)) + new_grad = tf.where(grad_norm < max_norm, grads, clipped_grad) + new_grads.append(new_grad) + return new_grads
+ + + +
+[docs] +class EarlyStoppingMin(tf.keras.callbacks.EarlyStopping): + """Stop training when a monitored quantity has stopped improving. + + Args: + min_epoch: Minimum number of epochs before considering stopping. + """ + + def __init__(self, min_epoch: int = 0, **kwargs): + super(EarlyStoppingMin, self).__init__(**kwargs) + self.min_epoch = min_epoch + +
+[docs] + def on_epoch_end(self, epoch, logs=None): + current = self.get_monitor_value(logs) + if current is None: + return + if self.monitor_op(current - self.min_delta, self.best): + self.best = current + self.wait = 0 + if self.restore_best_weights: + self.best_weights = self.model.get_weights() + else: + self.wait += 1 + if epoch >= self.min_epoch and self.wait >= self.patience: + self.stopped_epoch = epoch + self.model.stop_training = True + if self.restore_best_weights: + if self.verbose > 0: + print("Restoring model weights from the end of the best epoch.") + self.model.set_weights(self.best_weights)
+
+ + + +
+[docs] +class Cyclical1LearningRate(tf.keras.optimizers.schedules.LearningRateSchedule): + """A LearningRateSchedule that uses cyclical schedule. + https://yashuseth.blog/2018/11/26/hyper-parameter-tuning-best-practices-learning-rate-batch-size-momentum-weight-decay/ + + Args: + initial_learning_rate (float): The initial learning rate. + maximal_learning_rate (float): The maximal learning rate after warm up. + final_learning_rate (float): The final learning rate after cycle. + step_size (int): Cycle step size. + name (str, optional): The name of the schedule. Defaults to "Cyclical1LearningRate". + """ + + def __init__( + self, + initial_learning_rate: float, + maximal_learning_rate: float, + final_learning_rate: float, + step_size, + name: str = "Cyclical1LearningRate", + ): + super().__init__() + self.initial_learning_rate = initial_learning_rate + self.maximal_learning_rate = maximal_learning_rate + self.final_learning_rate = final_learning_rate + self.step_size = step_size + self.name = name + + def __call__(self, step): + with tf.name_scope(self.name or "Cyclical1LearningRate"): + initial_learning_rate = tf.convert_to_tensor( + self.initial_learning_rate, name="initial_learning_rate" + ) + dtype = initial_learning_rate.dtype + maximal_learning_rate = tf.cast(self.maximal_learning_rate, dtype) + final_learning_rate = tf.cast(self.final_learning_rate, dtype) + + step_size = tf.cast(self.step_size, dtype) + cycle = tf.floor(1 + step / (2 * step_size)) + x = tf.abs(step / step_size - 2 * cycle + 1) + + lr = tf.where( + step > 2 * step_size, + final_learning_rate, + initial_learning_rate + + (maximal_learning_rate - initial_learning_rate) + * tf.maximum(tf.cast(0, dtype), (1 - x)), + ) + return lr + +
+[docs] + def get_config(self): + return { + "initial_learning_rate": self.initial_learning_rate, + "maximal_learning_rate": self.maximal_learning_rate, + "final_learning_rate": self.final_learning_rate, + "step_size": self.step_size, + }
+
+ + + +
+[docs] +class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule): + """ + Applies a warmup schedule on a given learning rate decay schedule. + (h/t HuggingFace.) + + Args: + initial_learning_rate (:obj:`float`): Initial learning rate after the warmup + (so this will be the learning rate at the end of the warmup). + decay_schedule (:obj:`Callable`): The learning rate or schedule function to + apply after the warmup for the rest of training. + warmup_steps (:obj:`int`): The number of steps for the warmup part of training. + power (:obj:`float`, `optional`): Power to use for the polynomial warmup + (defaults is a linear warmup). + name (:obj:`str`, `optional`): Optional name prefix for the returned tensors + during the schedule. + """ + + def __init__( + self, + initial_learning_rate: float, + warmup_steps: int, + decay_schedule: None, + power: float = 1.0, + name: str = None, + ): + super().__init__() + self.initial_learning_rate = initial_learning_rate + self.warmup_steps = warmup_steps + self.power = power + self.decay_schedule = decay_schedule + self.name = name + + def __call__(self, step): + with tf.name_scope(self.name or "WarmUp") as name: + # Implements polynomial warmup. i.e., if global_step < warmup_steps, the + # learning rate will be `global_step/num_warmup_steps * init_lr`. + global_step_float = tf.cast(step, tf.float32) + warmup_steps_float = tf.cast(self.warmup_steps, tf.float32) + warmup_percent_done = global_step_float / warmup_steps_float + warmup_learning_rate = self.initial_learning_rate * tf.math.pow( + warmup_percent_done, self.power + ) + if callable(self.decay_schedule): + warmed_learning_rate = self.decay_schedule(step - self.warmup_steps) + else: + warmed_learning_rate = self.decay_schedule + return tf.cond( + global_step_float < warmup_steps_float, + lambda: warmup_learning_rate, + lambda: warmed_learning_rate, + name=name, + ) + +
+[docs] + def get_config(self): + return { + "initial_learning_rate": self.initial_learning_rate, + "decay_schedule": self.decay_schedule, + "warmup_steps": self.warmup_steps, + "power": self.power, + "name": self.name, + }
+
+ + + +
+[docs] +def safe_next(data_iter, retry=5, sleep=10): + attempts = 0 + d = None + while d is None and attempts < retry: + try: + d = next(data_iter) + except tf.errors.AbortedError: + print( + "AbortedError, which has previously indicated NFS daemon restart.", + file=sys.stderr, + ) + time.sleep(sleep) + attempts += 1 + + if d is None: + # let it crash + d = next(data_iter) + + return d
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/baskerville/vcf.html b/_modules/baskerville/vcf.html new file mode 100644 index 0000000..49e7b47 --- /dev/null +++ b/_modules/baskerville/vcf.html @@ -0,0 +1,864 @@ + + + + + + baskerville.vcf — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for baskerville.vcf

+# Copyright 2017 Calico LLC
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     https://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =========================================================================
+import gzip
+import os
+import pdb
+import subprocess
+import sys
+import tempfile
+
+import numpy as np
+import pysam
+
+from baskerville import dna
+
+"""
+vcf.py
+
+Methods and classes to support .vcf SNP analysis.
+"""
+
+
+
+[docs] +def cap_allele(allele, cap=5): + """Cap the length of an allele in the figures.""" + if len(allele) > cap: + allele = allele[:cap] + "*" + return allele
+ + + +
+[docs] +def intersect_seqs_snps(vcf_file, seqs, vision_p=1): + """Intersect a VCF file with a list of sequence coordinates. + + In + vcf_file: + seqs: list of objects w/ chrom, start, end + vision_p: proportion of sequences visible to center genes. + + Out + seqs_snps: list of list mapping segment indexes to overlapping SNP indexes + """ + + # print segments to BED + # hash segments to indexes + seq_temp = tempfile.NamedTemporaryFile() + seq_bed_file = seq_temp.name + seq_bed_out = open(seq_bed_file, "w") + seq_indexes = {} + for si in range(len(seqs)): + sstart = max(0, seqs[si].start) + print("%s\t%d\t%d" % (seqs[si].chrom, sstart, seqs[si].end), file=seq_bed_out) + seq_key = (seqs[si].chrom, sstart, seqs[si].end) + seq_indexes[seq_key] = si + seq_bed_out.close() + + # hash SNPs to indexes + snp_indexes = {} + si = 0 + + vcf_in = open(vcf_file) + line = vcf_in.readline() + while line[0] == "#": + line = vcf_in.readline() + while line: + a = line.split() + snp_id = a[2] + if snp_id in snp_indexes: + raise Exception("Duplicate SNP id %s will break the script" % snp_id) + snp_indexes[snp_id] = si + si += 1 + line = vcf_in.readline() + vcf_in.close() + + # initialize list of lists + seqs_snps = [] + for _ in range(len(seqs)): + seqs_snps.append([]) + + # intersect + p = subprocess.Popen( + "bedtools intersect -wo -a %s -b %s" % (vcf_file, seq_bed_file), + shell=True, + stdout=subprocess.PIPE, + ) + for line in p.stdout: + line = line.decode("UTF-8") + a = line.split() + pos = int(a[1]) + snp_id = a[2] + seq_chrom = a[-4] + seq_start = int(a[-3]) + seq_end = int(a[-2]) + seq_key = (seq_chrom, seq_start, seq_end) + + vision_buffer = (seq_end - seq_start) * (1 - vision_p) // 2 + if seq_start + vision_buffer < pos < seq_end - vision_buffer: + seqs_snps[seq_indexes[seq_key]].append(snp_indexes[snp_id]) + + p.communicate() + + return seqs_snps
+ + + +
+[docs] +def intersect_snps_seqs(vcf_file, seq_coords, vision_p=1): + """Intersect a VCF file with a list of sequence coordinates. + + In + vcf_file: + seq_coords: list of sequence coordinates + vision_p: proportion of sequences visible to center genes. + + Out + snp_segs: list of list mapping SNP indexes to overlapping sequence indexes + """ + # print segments to BED + # hash segments to indexes + seg_temp = tempfile.NamedTemporaryFile() + seg_bed_file = seg_temp.name + seg_bed_out = open(seg_bed_file, "w") + segment_indexes = {} + + for si in range(len(seq_coords)): + segment_indexes[seq_coords[si]] = si + print("%s\t%d\t%d" % seq_coords[si], file=seg_bed_out) + + seg_bed_out.close() + + # hash SNPs to indexes + snp_indexes = {} + si = 0 + + vcf_in = open(vcf_file) + line = vcf_in.readline() + while line[0] == "#": + line = vcf_in.readline() + while line: + a = line.split() + snp_id = a[2] + if snp_id in snp_indexes: + raise Exception("Duplicate SNP id %s will break the script" % snp_id) + snp_indexes[snp_id] = si + si += 1 + line = vcf_in.readline() + vcf_in.close() + + # initialize list of lists + snp_segs = [] + for i in range(len(snp_indexes)): + snp_segs.append([]) + + # intersect + p = subprocess.Popen( + "bedtools intersect -wo -a %s -b %s" % (vcf_file, seg_bed_file), + shell=True, + stdout=subprocess.PIPE, + ) + for line in p.stdout: + line = line.decode("UTF-8") + a = line.split() + pos = int(a[1]) + snp_id = a[2] + seg_chrom = a[-4] + seg_start = int(a[-3]) + seg_end = int(a[-2]) + seg_key = (seg_chrom, seg_start, seg_end) + + vision_buffer = (seg_end - seg_start) * (1 - vision_p) // 2 + if seg_start + vision_buffer < pos < seg_end - vision_buffer: + snp_segs[snp_indexes[snp_id]].append(segment_indexes[seg_key]) + + p.communicate() + + return snp_segs
+ + + +
+[docs] +def snp_seq1(snp, seq_len, genome_open): + """Produce one hot coded sequences for a SNP. + + Attrs: + snp [SNP] : + seq_len (int) : sequence length to code + genome_open (File) : open genome FASTA file + + Return: + seq_vecs_list [array] : list of one hot coded sequences surrounding the + SNP + """ + left_len = seq_len // 2 - 1 + right_len = seq_len // 2 + + # initialize one hot coded vector list + seq_vecs_list = [] + + # specify positions in GFF-style 1-based + seq_start = snp.pos - left_len + seq_end = snp.pos + right_len + max(0, len(snp.ref_allele) - snp.longest_alt()) + + # extract sequence as BED style + if seq_start < 0: + seq = "N" * (1 - seq_start) + genome_open.fetch(snp.chr, 0, seq_end).upper() + else: + seq = genome_open.fetch(snp.chr, seq_start - 1, seq_end).upper() + + # extend to full length + if len(seq) < seq_end - seq_start: + seq += "N" * (seq_end - seq_start - len(seq)) + + # verify that ref allele matches ref sequence + seq_ref = seq[left_len : left_len + len(snp.ref_allele)] + ref_found = True + if seq_ref != snp.ref_allele: + # search for reference allele in alternatives + ref_found = False + + # for each alternative allele + for alt_al in snp.alt_alleles: + # grab reference sequence matching alt length + seq_ref_alt = seq[left_len : left_len + len(alt_al)] + if seq_ref_alt == alt_al: + # found it! + ref_found = True + + # warn user + print( + "WARNING: %s - alt (as opposed to ref) allele matches reference genome; changing reference genome to match." + % (snp.rsid), + file=sys.stderr, + ) + + # remove alt allele and include ref allele + seq = seq[:left_len] + snp.ref_allele + seq[left_len + len(alt_al) :] + break + + if not ref_found: + print( + "WARNING: %s - reference genome does not match any allele" % snp.rsid, + file=sys.stderr, + ) + + else: + # one hot code ref allele + seq_vecs_ref, seq_ref = dna_length_1hot(seq, seq_len) + seq_vecs_list.append(seq_vecs_ref) + + for alt_al in snp.alt_alleles: + # remove ref allele and include alt allele + seq_alt = seq[:left_len] + alt_al + seq[left_len + len(snp.ref_allele) :] + + # one hot code + seq_vecs_alt, seq_alt = dna_length_1hot(seq_alt, seq_len) + seq_vecs_list.append(seq_vecs_alt) + + return seq_vecs_list
+ + + +
+[docs] +def snps_seq1(snps, seq_len, genome_fasta, return_seqs=False): + """Produce an array of one hot coded sequences for a list of SNPs. + + Attrs: + snps [SNP] : list of SNPs + seq_len (int) : sequence length to code + genome_fasta (str) : genome FASTA file + + Return: + seq_vecs (array) : one hot coded sequences surrounding the SNPs + seq_headers [str] : headers for sequences + seq_snps [SNP] : list of used SNPs + """ + left_len = seq_len // 2 - 1 + right_len = seq_len // 2 + + # initialize one hot coded vector list + seq_vecs_list = [] + + # save successful SNPs + seq_snps = [] + + # save sequence strings, too + seqs = [] + + # name sequences + seq_headers = [] + + # open genome FASTA + genome_open = pysam.Fastafile(genome_fasta) + + for snp in snps: + # specify positions in GFF-style 1-based + seq_start = snp.pos - left_len + seq_end = snp.pos + right_len + max(0, len(snp.ref_allele) - snp.longest_alt()) + + # extract sequence as BED style + if seq_start < 0: + seq = "N" * (-seq_start) + genome_open.fetch(snp.chr, 0, seq_end).upper() + else: + seq = genome_open.fetch(snp.chr, seq_start - 1, seq_end).upper() + + # extend to full length + if len(seq) < seq_end - seq_start: + seq += "N" * (seq_end - seq_start - len(seq)) + + # verify that ref allele matches ref sequence + seq_ref = seq[left_len : left_len + len(snp.ref_allele)] + if seq_ref != snp.ref_allele: + # search for reference allele in alternatives + ref_found = False + + # for each alternative allele + for alt_al in snp.alt_alleles: + # grab reference sequence matching alt length + seq_ref_alt = seq[left_len : left_len + len(alt_al)] + if seq_ref_alt == alt_al: + # found it! + ref_found = True + + # warn user + print( + "WARNING: %s - alt (as opposed to ref) allele matches reference genome; changing reference genome to match." + % (snp.rsid), + file=sys.stderr, + ) + + # remove alt allele and include ref allele + seq = ( + seq[:left_len] + snp.ref_allele + seq[left_len + len(alt_al) :] + ) + break + + if not ref_found: + print( + "WARNING: %s - reference genome %s does not match any allele; skipping" + % (seq_ref, snp.rsid), + file=sys.stderr, + ) + continue + + seq_snps.append(snp) + + # one hot code ref allele + seq_vecs_ref, seq_ref = dna_length_1hot(seq, seq_len) + seq_vecs_list.append(seq_vecs_ref) + if return_seqs: + seqs.append(seq_ref) + + # name ref allele + seq_headers.append("%s_%s" % (snp.rsid, cap_allele(snp.ref_allele))) + + for alt_al in snp.alt_alleles: + # remove ref allele and include alt allele + seq_alt = seq[:left_len] + alt_al + seq[left_len + len(snp.ref_allele) :] + + # one hot code + seq_vecs_alt, seq_alt = dna_length_1hot(seq_alt, seq_len) + seq_vecs_list.append(seq_vecs_alt) + if return_seqs: + seqs.append(seq_alt) # not using right now + + # name + seq_headers.append("%s_%s" % (snp.rsid, cap_allele(alt_al))) + + # convert to array + seq_vecs = np.array(seq_vecs_list) + + if return_seqs: + return seq_vecs, seq_headers, seq_snps, seqs + else: + return seq_vecs, seq_headers, seq_snps
+ + + +
+[docs] +def snps2_seq1(snps, seq_len, genome1_fasta, genome2_fasta, return_seqs=False): + """Produce an array of one hot coded sequences for a list of SNPs. + + Attrs: + snps [SNP] : list of SNPs + seq_len (int) : sequence length to code + genome_fasta (str) : major allele genome FASTA file + genome2_fasta (str) : minor allele genome FASTA file + + Return: + seq_vecs (array) : one hot coded sequences surrounding the SNPs + seq_headers [str] : headers for sequences + seq_snps [SNP] : list of used SNPs + """ + left_len = seq_len // 2 - 1 + right_len = seq_len // 2 + + # open genome FASTA + genome1 = pysam.Fastafile(genome1_fasta) + genome2 = pysam.Fastafile(genome2_fasta) + + # initialize one hot coded vector list + seq_vecs_list = [] + + # save successful SNPs + seq_snps = [] + + # save sequence strings, too + seqs = [] + + # name sequences + seq_headers = [] + + for snp in snps: + if len(snp.alt_alleles) > 1: + raise Exception( + "Major/minor genome mode requires only two alleles: %s" % snp.rsid + ) + + alt_al = snp.alt_alleles[0] + + # specify positions in GFF-style 1-based + seq_start = snp.pos - left_len + seq_end = snp.pos + right_len + len(snp.ref_allele) + + # extract sequence as BED style + if seq_start < 0: + seq_ref = "N" * (-seq_start) + genome1.fetch(snp.chr, 0, seq_end).upper() + else: + seq_ref = genome1.fetch(snp.chr, seq_start - 1, seq_end).upper() + + # extend to full length + if len(seq_ref) < seq_end - seq_start: + seq_ref += "N" * (seq_end - seq_start - len(seq_ref)) + + # verify that ref allele matches ref sequence + seq_ref_snp = seq_ref[left_len : left_len + len(snp.ref_allele)] + if seq_ref_snp != snp.ref_allele: + raise Exception( + "WARNING: Major allele SNP %s doesnt match reference genome: %s vs %s" + % (snp.rsid, snp.ref_allele, seq_ref_snp) + ) + + # specify positions in GFF-style 1-based + seq_start = snp.pos2 - left_len + seq_end = snp.pos2 + right_len + len(alt_al) + + # extract sequence as BED style + if seq_start < 0: + seq_alt = "N" * (-seq_start) + genome2.fetch(snp.chr, 0, seq_end).upper() + else: + seq_alt = genome2.fetch(snp.chr, seq_start - 1, seq_end).upper() + + # extend to full length + if len(seq_alt) < seq_end - seq_start: + seq_alt += "N" * (seq_end - seq_start - len(seq_alt)) + + # verify that ref allele matches ref sequence + seq_alt_snp = seq_alt[left_len : left_len + len(alt_al)] + if seq_alt_snp != alt_al: + raise Exception( + "WARNING: Minor allele SNP %s doesnt match reference genome: %s vs %s" + % (snp.rsid, snp.alt_alleles[0], seq_alt_snp) + ) + + seq_snps.append(snp) + + # one hot code ref allele + seq_vecs_ref, seq_ref = dna_length_1hot(seq_ref, seq_len) + seq_vecs_list.append(seq_vecs_ref) + if return_seqs: + seqs.append(seq_ref) + + # name ref allele + seq_headers.append("%s_%s" % (snp.rsid, cap_allele(snp.ref_allele))) + + # one hot code alt allele + seq_vecs_alt, seq_alt = dna_length_1hot(seq_alt, seq_len) + seq_vecs_list.append(seq_vecs_alt) + if return_seqs: + seqs.append(seq_alt) + + # name + seq_headers.append("%s_%s" % (snp.rsid, cap_allele(alt_al))) + + # convert to array + seq_vecs = np.array(seq_vecs_list) + + if return_seqs: + return seq_vecs, seq_headers, seq_snps, seqs + else: + return seq_vecs, seq_headers, seq_snps
+ + + +
+[docs] +def dna_length_1hot(seq, length): + """Adjust the sequence length and compute + a 1hot coding.""" + + if length < len(seq): + # trim the sequence + seq_trim = (len(seq) - length) // 2 + seq = seq[seq_trim : seq_trim + length] + + elif length > len(seq): + # extend with N's + nfront = (length - len(seq)) // 2 + nback = length - len(seq) - nfront + seq = "N" * nfront + seq + "N" * nback + + # n_uniform required to avoid different + # random nucleotides for each allele + seq_1hot = dna.dna_1hot(seq, n_uniform=True) + + return seq_1hot, seq
+ + + +
+[docs] +def vcf_count(vcf_file): + """Count SNPs in a VCF file""" + if vcf_file[-3:] == ".gz": + vcf_in = gzip.open(vcf_file, "rt") + else: + vcf_in = open(vcf_file) + + # read through header + line = vcf_in.readline() + while line[0] == "#": + line = vcf_in.readline() + + # count SNPs + num_snps = 0 + while line: + num_snps += 1 + line = vcf_in.readline() + + vcf_in.close() + + return num_snps
+ + + +
+[docs] +def vcf_snps( + vcf_file, + require_sorted=False, + validate_ref_fasta=None, + flip_ref=False, + pos2=False, + start_i=None, + end_i=None, +): + """Load SNPs from a VCF file""" + if vcf_file[-3:] == ".gz": + vcf_in = gzip.open(vcf_file, "rt") + else: + vcf_in = open(vcf_file) + + # read through header + line = vcf_in.readline() + while line[0] == "#": + line = vcf_in.readline() + + # to check sorted + if require_sorted: + seen_chrs = set() + prev_chr = None + prev_pos = -1 + + # to check reference + if validate_ref_fasta is not None: + genome_open = pysam.Fastafile(validate_ref_fasta) + + # read in SNPs + snps = [] + si = 0 + while line: + if start_i is None or start_i <= si < end_i: + snps.append(SNP(line, pos2)) + + if require_sorted: + if prev_chr is not None: + # same chromosome + if prev_chr == snps[-1].chr: + if snps[-1].pos < prev_pos: + print( + "Sorted VCF required. Mis-ordered position: %s" + % line.rstrip(), + file=sys.stderr, + ) + exit(1) + elif snps[-1].chr in seen_chrs: + print( + "Sorted VCF required. Mis-ordered chromosome: %s" + % line.rstrip(), + file=sys.stderr, + ) + exit(1) + + seen_chrs.add(snps[-1].chr) + prev_chr = snps[-1].chr + prev_pos = snps[-1].pos + + if validate_ref_fasta is not None: + ref_n = len(snps[-1].ref_allele) + snp_pos = snps[-1].pos - 1 + ref_snp = genome_open.fetch( + snps[-1].chr, snp_pos, snp_pos + ref_n + ).upper() + if snps[-1].ref_allele != ref_snp: + if not flip_ref: + # bail + print( + "ERROR: %s does not match reference %s" + % (snps[-1], ref_snp), + file=sys.stderr, + ) + exit(1) + + else: + alt_n = len(snps[-1].alt_alleles[0]) + ref_snp = genome_open.fetch( + snps[-1].chr, snp_pos, snp_pos + alt_n + ).upper() + + # if alt matches fasta reference + if snps[-1].alt_alleles[0] == ref_snp: + # flip alleles + snps[-1].flip_alleles() + + else: + # bail + print( + "ERROR: %s does not match reference %s" + % (snps[-1], ref_snp), + file=sys.stderr, + ) + exit(1) + + si += 1 + line = vcf_in.readline() + + vcf_in.close() + + return snps
+ + + +
+[docs] +def vcf_sort(vcf_file): + # move + os.rename(vcf_file, "%s.tmp" % vcf_file) + + # print header + vcf_out = open(vcf_file, "w") + print("##fileformat=VCFv4.0", file=vcf_out) + vcf_out.close() + + # sort + subprocess.call("bedtools sort -i %s.tmp >> %s" % (vcf_file, vcf_file), shell=True) + + # clean + os.remove("%s.tmp" % vcf_file)
+ + + +
+[docs] +class SNP: + """SNP + + Represent SNPs read in from a VCF file + + Attributes: + vcf_line (str) + """ + + def __init__(self, vcf_line, pos2=False): + a = vcf_line.split() + # self.chr = a[0] + if a[0].startswith("chr"): + self.chr = a[0] + else: + self.chr = "chr%s" % a[0] + self.pos = int(a[1]) + self.rsid = a[2] + self.ref_allele = a[3] + self.alt_alleles = a[4].split(",") + self.alt_allele = self.alt_alleles[0] + self.flipped = False + + if self.rsid == ".": + self.rsid = "%s:%d" % (self.chr, self.pos) + + self.pos2 = None + if pos2: + self.pos2 = int(a[5]) + +
+[docs] + def flip_alleles(self): + """Flip reference and first alt allele.""" + assert len(self.alt_alleles) == 1 + self.ref_allele, self.alt_alleles[0] = self.alt_alleles[0], self.ref_allele + self.alt_allele = self.alt_alleles[0] + self.flipped = True
+ + +
+[docs] + def get_alleles(self): + """Return a list of all alleles""" + alleles = [self.ref_allele] + self.alt_alleles + return alleles
+ + +
+[docs] + def indel_size(self): + """Return the size of the indel.""" + return len(self.alt_allele) - len(self.ref_allele)
+ + +
+[docs] + def longest_alt(self): + """Return the longest alt allele.""" + return max([len(al) for al in self.alt_alleles])
+ + + def __str__(self): + return "SNP(%s, %s:%d, %s/%s)" % ( + self.rsid, + self.chr, + self.pos, + self.ref_allele, + ",".join(self.alt_alleles), + )
+ +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/index.html b/_modules/index.html new file mode 100644 index 0000000..2668e51 --- /dev/null +++ b/_modules/index.html @@ -0,0 +1,113 @@ + + + + + + Overview: module code — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/_sources/baskerville.rst.txt b/_sources/baskerville.rst.txt new file mode 100644 index 0000000..e22d88e --- /dev/null +++ b/_sources/baskerville.rst.txt @@ -0,0 +1,101 @@ +baskerville package +=================== + +Submodules +---------- + +baskerville.bed module +---------------------- + +.. automodule:: baskerville.bed + :members: + :undoc-members: + :show-inheritance: + +baskerville.blocks module +------------------------- + +.. automodule:: baskerville.blocks + :members: + :undoc-members: + :show-inheritance: + +baskerville.dataset module +-------------------------- + +.. automodule:: baskerville.dataset + :members: + :undoc-members: + :show-inheritance: + +baskerville.dna module +---------------------- + +.. automodule:: baskerville.dna + :members: + :undoc-members: + :show-inheritance: + +baskerville.gene module +----------------------- + +.. automodule:: baskerville.gene + :members: + :undoc-members: + :show-inheritance: + +baskerville.layers module +------------------------- + +.. automodule:: baskerville.layers + :members: + :undoc-members: + :show-inheritance: + +baskerville.metrics module +-------------------------- + +.. automodule:: baskerville.metrics + :members: + :undoc-members: + :show-inheritance: + +baskerville.seqnn module +------------------------ + +.. automodule:: baskerville.seqnn + :members: + :undoc-members: + :show-inheritance: + +baskerville.snps module +----------------------- + +.. automodule:: baskerville.snps + :members: + :undoc-members: + :show-inheritance: + +baskerville.trainer module +-------------------------- + +.. automodule:: baskerville.trainer + :members: + :undoc-members: + :show-inheritance: + +baskerville.vcf module +---------------------- + +.. automodule:: baskerville.vcf + :members: + :undoc-members: + :show-inheritance: + +Module contents +--------------- + +.. automodule:: baskerville + :members: + :undoc-members: + :show-inheritance: diff --git a/_sources/index.rst.txt b/_sources/index.rst.txt new file mode 100644 index 0000000..0ad687f --- /dev/null +++ b/_sources/index.rst.txt @@ -0,0 +1,21 @@ +.. baskerville documentation master file, created by + sphinx-quickstart on Mon Dec 11 21:48:46 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to baskerville's documentation! +======================================= + +.. toctree:: + :maxdepth: 4 + :caption: Contents: + + baskerville + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/_static/_sphinx_javascript_frameworks_compat.js b/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 0000000..8141580 --- /dev/null +++ b/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,123 @@ +/* Compatability shim for jQuery and underscores.js. + * + * Copyright Sphinx contributors + * Released under the two clause BSD licence + */ + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/_static/basic.css b/_static/basic.css new file mode 100644 index 0000000..30fee9d --- /dev/null +++ b/_static/basic.css @@ -0,0 +1,925 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/_static/css/badge_only.css b/_static/css/badge_only.css new file mode 100644 index 0000000..c718cee --- /dev/null +++ b/_static/css/badge_only.css @@ -0,0 +1 @@ +.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/_static/css/fonts/Roboto-Slab-Bold.woff b/_static/css/fonts/Roboto-Slab-Bold.woff new file mode 100644 index 0000000..6cb6000 Binary files /dev/null and b/_static/css/fonts/Roboto-Slab-Bold.woff differ diff --git a/_static/css/fonts/Roboto-Slab-Bold.woff2 b/_static/css/fonts/Roboto-Slab-Bold.woff2 new file mode 100644 index 0000000..7059e23 Binary files /dev/null and b/_static/css/fonts/Roboto-Slab-Bold.woff2 differ diff --git a/_static/css/fonts/Roboto-Slab-Regular.woff b/_static/css/fonts/Roboto-Slab-Regular.woff new file mode 100644 index 0000000..f815f63 Binary files /dev/null and b/_static/css/fonts/Roboto-Slab-Regular.woff differ diff --git a/_static/css/fonts/Roboto-Slab-Regular.woff2 b/_static/css/fonts/Roboto-Slab-Regular.woff2 new file mode 100644 index 0000000..f2c76e5 Binary files /dev/null and b/_static/css/fonts/Roboto-Slab-Regular.woff2 differ diff --git a/_static/css/fonts/fontawesome-webfont.eot b/_static/css/fonts/fontawesome-webfont.eot new file mode 100644 index 0000000..e9f60ca Binary files /dev/null and b/_static/css/fonts/fontawesome-webfont.eot differ diff --git a/_static/css/fonts/fontawesome-webfont.svg b/_static/css/fonts/fontawesome-webfont.svg new file mode 100644 index 0000000..855c845 --- /dev/null +++ b/_static/css/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/_static/css/fonts/fontawesome-webfont.ttf b/_static/css/fonts/fontawesome-webfont.ttf new file mode 100644 index 0000000..35acda2 Binary files /dev/null and b/_static/css/fonts/fontawesome-webfont.ttf differ diff --git a/_static/css/fonts/fontawesome-webfont.woff b/_static/css/fonts/fontawesome-webfont.woff new file mode 100644 index 0000000..400014a Binary files /dev/null and b/_static/css/fonts/fontawesome-webfont.woff differ diff --git a/_static/css/fonts/fontawesome-webfont.woff2 b/_static/css/fonts/fontawesome-webfont.woff2 new file mode 100644 index 0000000..4d13fc6 Binary files /dev/null and b/_static/css/fonts/fontawesome-webfont.woff2 differ diff --git a/_static/css/fonts/lato-bold-italic.woff b/_static/css/fonts/lato-bold-italic.woff new file mode 100644 index 0000000..88ad05b Binary files /dev/null and b/_static/css/fonts/lato-bold-italic.woff differ diff --git a/_static/css/fonts/lato-bold-italic.woff2 b/_static/css/fonts/lato-bold-italic.woff2 new file mode 100644 index 0000000..c4e3d80 Binary files /dev/null and b/_static/css/fonts/lato-bold-italic.woff2 differ diff --git a/_static/css/fonts/lato-bold.woff b/_static/css/fonts/lato-bold.woff new file mode 100644 index 0000000..c6dff51 Binary files /dev/null and b/_static/css/fonts/lato-bold.woff differ diff --git a/_static/css/fonts/lato-bold.woff2 b/_static/css/fonts/lato-bold.woff2 new file mode 100644 index 0000000..bb19504 Binary files /dev/null and b/_static/css/fonts/lato-bold.woff2 differ diff --git a/_static/css/fonts/lato-normal-italic.woff b/_static/css/fonts/lato-normal-italic.woff new file mode 100644 index 0000000..76114bc Binary files /dev/null and b/_static/css/fonts/lato-normal-italic.woff differ diff --git a/_static/css/fonts/lato-normal-italic.woff2 b/_static/css/fonts/lato-normal-italic.woff2 new file mode 100644 index 0000000..3404f37 Binary files /dev/null and b/_static/css/fonts/lato-normal-italic.woff2 differ diff --git a/_static/css/fonts/lato-normal.woff b/_static/css/fonts/lato-normal.woff new file mode 100644 index 0000000..ae1307f Binary files /dev/null and b/_static/css/fonts/lato-normal.woff differ diff --git a/_static/css/fonts/lato-normal.woff2 b/_static/css/fonts/lato-normal.woff2 new file mode 100644 index 0000000..3bf9843 Binary files /dev/null and b/_static/css/fonts/lato-normal.woff2 differ diff --git a/_static/css/theme.css b/_static/css/theme.css new file mode 100644 index 0000000..19a446a --- /dev/null +++ b/_static/css/theme.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs>li{display:inline-block;padding-top:5px}.wy-breadcrumbs>li.wy-breadcrumbs-aside{float:right}.rst-content .wy-breadcrumbs>li code,.rst-content .wy-breadcrumbs>li tt,.wy-breadcrumbs>li .rst-content tt,.wy-breadcrumbs>li code{all:inherit;color:inherit}.breadcrumb-item:before{content:"/";color:#bbb;font-size:13px;padding:0 6px 0 3px}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content p a{overflow-wrap:anywhere}.rst-content .wy-table td p,.rst-content .wy-table td ul,.rst-content .wy-table th p,.rst-content .wy-table th ul,.rst-content table.docutils td p,.rst-content table.docutils td ul,.rst-content table.docutils th p,.rst-content table.docutils th ul,.rst-content table.field-list td p,.rst-content table.field-list td ul,.rst-content table.field-list th p,.rst-content table.field-list th ul{font-size:inherit}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .citation-reference>span.fn-bracket,.rst-content .footnote-reference>span.fn-bracket{display:none}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:auto minmax(80%,95%)}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{display:inline-grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{display:grid;grid-template-columns:auto auto minmax(.65rem,auto) minmax(40%,95%)}html.writer-html5 .rst-content aside.citation>span.label,html.writer-html5 .rst-content aside.footnote>span.label,html.writer-html5 .rst-content div.citation>span.label{grid-column-start:1;grid-column-end:2}html.writer-html5 .rst-content aside.citation>span.backrefs,html.writer-html5 .rst-content aside.footnote>span.backrefs,html.writer-html5 .rst-content div.citation>span.backrefs{grid-column-start:2;grid-column-end:3;grid-row-start:1;grid-row-end:3}html.writer-html5 .rst-content aside.citation>p,html.writer-html5 .rst-content aside.footnote>p,html.writer-html5 .rst-content div.citation>p{grid-column-start:4;grid-column-end:5}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{margin-bottom:24px}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a{word-break:keep-all}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.citation>dd p,html.writer-html5 .rst-content dl.footnote>dd p{font-size:.9rem}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{padding-left:1rem;padding-right:1rem;font-size:.9rem;line-height:1.2rem}html.writer-html5 .rst-content aside.citation p,html.writer-html5 .rst-content aside.footnote p,html.writer-html5 .rst-content div.citation p{font-size:.9rem;line-height:1.2rem;margin-bottom:12px}html.writer-html5 .rst-content aside.citation span.backrefs,html.writer-html5 .rst-content aside.footnote span.backrefs,html.writer-html5 .rst-content div.citation span.backrefs{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content aside.citation span.backrefs>a,html.writer-html5 .rst-content aside.footnote span.backrefs>a,html.writer-html5 .rst-content div.citation span.backrefs>a{word-break:keep-all}html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content aside.citation span.label,html.writer-html5 .rst-content aside.footnote span.label,html.writer-html5 .rst-content div.citation span.label{line-height:1.2rem}html.writer-html5 .rst-content aside.citation-list,html.writer-html5 .rst-content aside.footnote-list,html.writer-html5 .rst-content div.citation-list{margin-bottom:24px}html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content aside.footnote-list aside.footnote,html.writer-html5 .rst-content div.citation-list>div.citation,html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content aside.footnote-list aside.footnote code,html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,html.writer-html5 .rst-content aside.footnote code,html.writer-html5 .rst-content aside.footnote tt,html.writer-html5 .rst-content div.citation-list>div.citation code,html.writer-html5 .rst-content div.citation-list>div.citation tt,html.writer-html5 .rst-content dl.citation code,html.writer-html5 .rst-content dl.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040;overflow-wrap:normal}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl dd>ol:last-child,.rst-content dl dd>p:last-child,.rst-content dl dd>table:last-child,.rst-content dl dd>ul:last-child{margin-bottom:0}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel,.rst-content .menuselection{font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .guilabel,.rst-content .menuselection{border:1px solid #7fbbe3;background:#e7f2fa}.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd{color:inherit;font-size:80%;background-color:#fff;border:1px solid #a6a6a6;border-radius:4px;box-shadow:0 2px grey;padding:2.4px 6px;margin:auto 0}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/_static/doctools.js b/_static/doctools.js new file mode 100644 index 0000000..d06a71d --- /dev/null +++ b/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js new file mode 100644 index 0000000..d1f2291 --- /dev/null +++ b/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '0.0.1', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/_static/file.png b/_static/file.png new file mode 100644 index 0000000..a858a41 Binary files /dev/null and b/_static/file.png differ diff --git a/_static/jquery.js b/_static/jquery.js new file mode 100644 index 0000000..c4c6022 --- /dev/null +++ b/_static/jquery.js @@ -0,0 +1,2 @@ +/*! jQuery v3.6.0 | (c) OpenJS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.6.0",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/_static/js/html5shiv.min.js b/_static/js/html5shiv.min.js new file mode 100644 index 0000000..cd1c674 --- /dev/null +++ b/_static/js/html5shiv.min.js @@ -0,0 +1,4 @@ +/** +* @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed +*/ +!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/_static/js/theme.js b/_static/js/theme.js new file mode 100644 index 0000000..1fddb6e --- /dev/null +++ b/_static/js/theme.js @@ -0,0 +1 @@ +!function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/_static/minus.png b/_static/minus.png new file mode 100644 index 0000000..d96755f Binary files /dev/null and b/_static/minus.png differ diff --git a/_static/plus.png b/_static/plus.png new file mode 100644 index 0000000..7107cec Binary files /dev/null and b/_static/plus.png differ diff --git a/_static/pygments.css b/_static/pygments.css new file mode 100644 index 0000000..84ab303 --- /dev/null +++ b/_static/pygments.css @@ -0,0 +1,75 @@ +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #008000; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #9C6500 } /* Comment.Preproc */ +.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +.highlight .gr { color: #E40000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #008400 } /* Generic.Inserted */ +.highlight .go { color: #717171 } /* Generic.Output */ +.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #008000 } /* Keyword.Pseudo */ +.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #B00040 } /* Keyword.Type */ +.highlight .m { color: #666666 } /* Literal.Number */ +.highlight .s { color: #BA2121 } /* Literal.String */ +.highlight .na { color: #687822 } /* Name.Attribute */ +.highlight .nb { color: #008000 } /* Name.Builtin */ +.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */ +.highlight .no { color: #880000 } /* Name.Constant */ +.highlight .nd { color: #AA22FF } /* Name.Decorator */ +.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #0000FF } /* Name.Function */ +.highlight .nl { color: #767600 } /* Name.Label */ +.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #19177C } /* Name.Variable */ +.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #666666 } /* Literal.Number.Bin */ +.highlight .mf { color: #666666 } /* Literal.Number.Float */ +.highlight .mh { color: #666666 } /* Literal.Number.Hex */ +.highlight .mi { color: #666666 } /* Literal.Number.Integer */ +.highlight .mo { color: #666666 } /* Literal.Number.Oct */ +.highlight .sa { color: #BA2121 } /* Literal.String.Affix */ +.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ +.highlight .sc { color: #BA2121 } /* Literal.String.Char */ +.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ +.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #BA2121 } /* Literal.String.Double */ +.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ +.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ +.highlight .sx { color: #008000 } /* Literal.String.Other */ +.highlight .sr { color: #A45A77 } /* Literal.String.Regex */ +.highlight .s1 { color: #BA2121 } /* Literal.String.Single */ +.highlight .ss { color: #19177C } /* Literal.String.Symbol */ +.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #0000FF } /* Name.Function.Magic */ +.highlight .vc { color: #19177C } /* Name.Variable.Class */ +.highlight .vg { color: #19177C } /* Name.Variable.Global */ +.highlight .vi { color: #19177C } /* Name.Variable.Instance */ +.highlight .vm { color: #19177C } /* Name.Variable.Magic */ +.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/_static/searchtools.js b/_static/searchtools.js new file mode 100644 index 0000000..7918c3f --- /dev/null +++ b/_static/searchtools.js @@ -0,0 +1,574 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + /** + * execute search (requires search index to be loaded) + */ + query: (query) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + // array of [docname, title, anchor, descr, score, filename] + let results = []; + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + results.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id] of foundEntries) { + let score = Math.round(100 * queryLower.length / entry.length) + results.push([ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // lookup as object + objectTerms.forEach((term) => + results.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); + + // now sort the results by score (in opposite order of appearance, since the + // display function below uses pop() to retrieve items) and then + // alphabetically + results.sort((a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; + }); + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + results = results.reverse(); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord) && !terms[word]) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord) && !titleTerms[word]) + arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); + }); + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) + fileMap.get(file).push(word); + else fileMap.set(file, [word]); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords) => { + const text = Search.htmlToText(htmlText); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/_static/sphinx_highlight.js b/_static/sphinx_highlight.js new file mode 100644 index 0000000..8a96c69 --- /dev/null +++ b/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/baskerville.html b/baskerville.html new file mode 100644 index 0000000..4efb173 --- /dev/null +++ b/baskerville.html @@ -0,0 +1,4449 @@ + + + + + + + baskerville package — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

baskerville package

+
+

Submodules

+
+
+

baskerville.bed module

+
+
+baskerville.bed.make_bed_seqs(bed_file, fasta_file, seq_len, stranded=False)[source]
+

Return BED regions as sequences and regions as a list of coordinate +tuples, extended to a specified length.

+
+ +
+
+baskerville.bed.read_bed_coords(bed_file, seq_len)[source]
+

Return BED regions as a list of coordinate +tuples, extended to a specified length.

+
+ +
+
+baskerville.bed.write_bedgraph(preds, targets, data_dir: str, out_dir: str, split_label: str, bedgraph_indexes=None)[source]
+

Write BEDgraph files for predictions and targets from a dataset..

+
+
Parameters:
+
    +
  • preds (np.array) – Predictions.

  • +
  • targets (np.array) – Targets.

  • +
  • data_dir (str) – Data directory, for identifying sequences and statistics.

  • +
  • out_dir (str) – Output directory.

  • +
  • split_label (str) – Split label.

  • +
  • bedgraph_indexes (list) – List of target indexes to write.

  • +
+
+
+
+ +
+
+

baskerville.blocks module

+
+
+baskerville.blocks.center_average(inputs, center, **kwargs)[source]
+
+ +
+
+baskerville.blocks.center_slice(inputs, center, **kwargs)[source]
+
+ +
+
+baskerville.blocks.concat_dist_2d(inputs, **kwargs)[source]
+
+ +
+
+baskerville.blocks.concat_position(inputs, transform='abs', power=1, **kwargs)[source]
+
+ +
+
+baskerville.blocks.conv_block(inputs, filters=None, kernel_size=1, activation='relu', activation_end=None, stride=1, dilation_rate=1, l2_scale=0, dropout=0, conv_type='standard', pool_size=1, pool_type='max', norm_type=None, bn_momentum=0.99, norm_gamma=None, residual=False, kernel_initializer='he_normal', padding='same')[source]
+

Construct a single convolution block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • filters – Conv1D filters

  • +
  • kernel_size – Conv1D kernel_size

  • +
  • activation – relu/gelu/etc

  • +
  • stride – Conv1D stride

  • +
  • dilation_rate – Conv1D dilation rate

  • +
  • l2_scale – L2 regularization weight.

  • +
  • dropout – Dropout rate probability

  • +
  • conv_type – Conv1D layer type

  • +
  • residual – Residual connection boolean

  • +
  • pool_size – Max pool width

  • +
  • norm_type – Apply batch or layer normalization

  • +
  • bn_momentum – BatchNorm momentum

  • +
  • norm_gamma – BatchNorm gamma (defaults according to residual)

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.conv_block_2d(inputs, filters=128, activation='relu', conv_type='standard', kernel_size=1, stride=1, dilation_rate=1, l2_scale=0, dropout=0, pool_size=1, norm_type=None, bn_momentum=0.99, norm_gamma='ones', kernel_initializer='he_normal', symmetric=False)[source]
+

Construct a single 2D convolution block.

+
+ +
+
+baskerville.blocks.conv_dna(inputs, filters=None, kernel_size=15, activation='relu', stride=1, l2_scale=0, residual=False, dropout=0, dropout_residual=0, pool_size=1, pool_type='max', norm_type=None, bn_momentum=0.99, norm_gamma=None, use_bias=None, se=False, conv_type='standard', kernel_initializer='he_normal', padding='same')[source]
+

Construct a single convolution block, assumed to be operating on DNA.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • filters – Conv1D filters

  • +
  • kernel_size – Conv1D kernel_size

  • +
  • activation – relu/gelu/etc

  • +
  • stride – Conv1D stride

  • +
  • l2_scale – L2 regularization weight.

  • +
  • dropout – Dropout rate probability

  • +
  • conv_type – Conv1D layer type

  • +
  • pool_size – Max pool width

  • +
  • norm_type – Apply batch or layer normalization

  • +
  • bn_momentum – BatchNorm momentum

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.conv_nac(inputs, filters=None, kernel_size=1, activation='relu', stride=1, dilation_rate=1, l2_scale=0, dropout=0, conv_type='standard', residual=False, pool_size=1, pool_type='max', norm_type=None, bn_momentum=0.99, norm_gamma=None, kernel_initializer='he_normal', padding='same', se=False)[source]
+

Construct a single convolution block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • filters – Conv1D filters

  • +
  • kernel_size – Conv1D kernel_size

  • +
  • activation – relu/gelu/etc

  • +
  • stride – Conv1D stride

  • +
  • dilation_rate – Conv1D dilation rate

  • +
  • l2_scale – L2 regularization weight.

  • +
  • dropout – Dropout rate probability

  • +
  • conv_type – Conv1D layer type

  • +
  • residual – Residual connection boolean

  • +
  • pool_size – Max pool width

  • +
  • norm_type – Apply batch or layer normalization

  • +
  • bn_momentum – BatchNorm momentum

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.conv_next(inputs, filters=None, kernel_size=7, activation='relu', dense_expansion=2.0, dilation_rate=1, l2_scale=0, dropout=0, residual=False, pool_size=1, pool_type='max', kernel_initializer='he_normal', padding='same', norm_type=None, bn_momentum=0.99)[source]
+

Construct a single convolution block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • filters – Conv1D filters

  • +
  • kernel_size – Conv1D kernel_size

  • +
  • activation – relu/gelu/etc

  • +
  • dilation_rate – Conv1D dilation rate

  • +
  • l2_scale – L2 regularization weight.

  • +
  • dropout – Dropout rate probability

  • +
  • residual – Residual connection boolean

  • +
  • pool_size – Max pool width

  • +
  • bn_momentum – BatchNorm momentum

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.conv_tower(inputs, filters_init, filters_end=None, filters_mult=None, divisible_by=1, repeat=1, reprs=[], **kwargs)[source]
+

Construct a reducing convolution block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • filters_init – Initial Conv1D filters

  • +
  • filters_end – End Conv1D filters

  • +
  • filters_mult – Multiplier for Conv1D filters

  • +
  • divisible_by – Round filters to be divisible by (eg a power of two)

  • +
  • repeat – Tower repetitions

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.conv_tower_nac(inputs, filters_init, filters_end=None, filters_mult=None, divisible_by=1, repeat=1, reprs=[], **kwargs)[source]
+

Construct a reducing convolution block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • filters_init – Initial Conv1D filters

  • +
  • filters_end – End Conv1D filters

  • +
  • filters_mult – Multiplier for Conv1D filters

  • +
  • divisible_by – Round filters to be divisible by (eg a power of two)

  • +
  • repeat – Tower repetitions

  • +
  • reprs – Append representations.

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.conv_tower_v1(inputs, filters_init, filters_mult=1, repeat=1, **kwargs)[source]
+

Construct a reducing convolution block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • filters_init – Initial Conv1D filters

  • +
  • filters_mult – Multiplier for Conv1D filters

  • +
  • repeat – Conv block repetitions

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.convnext_tower(inputs, filters_init, filters_end=None, filters_mult=None, kernel_size=1, dropout=0, pool_size=2, pool_type='max', divisible_by=1, repeat=1, num_convs=2, reprs=[], **kwargs)[source]
+

Abc.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • filters_init – Initial Conv1D filters

  • +
  • filters_end – End Conv1D filters

  • +
  • filters_mult – Multiplier for Conv1D filters

  • +
  • kernel_size – Conv1D kernel_size

  • +
  • dropout – Dropout on subsequent convolution blocks.

  • +
  • pool_size – Pool width.

  • +
  • repeat – Residual block repetitions

  • +
  • num_convs – Conv blocks per residual layer

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.cropping_2d(inputs, cropping, **kwargs)[source]
+
+ +
+
+baskerville.blocks.dense_block(inputs, units=None, activation='relu', activation_end=None, flatten=False, dropout=0, l2_scale=0, l1_scale=0, residual=False, norm_type=None, bn_momentum=0.99, norm_gamma=None, kernel_initializer='he_normal', **kwargs)[source]
+

Construct a single convolution block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • units – Conv1D filters

  • +
  • activation – relu/gelu/etc

  • +
  • activation_end – Compute activation after the other operations

  • +
  • flatten – Flatten across positional axis

  • +
  • dropout – Dropout rate probability

  • +
  • l2_scale – L2 regularization weight.

  • +
  • l1_scale – L1 regularization weight.

  • +
  • residual – Residual connection boolean

  • +
  • batch_norm – Apply batch normalization

  • +
  • bn_momentum – BatchNorm momentum

  • +
  • norm_gamma – BatchNorm gamma (defaults according to residual)

  • +
+
+
Returns:
+

[batch_size, seq_length(?), features] output sequence

+
+
+
+ +
+
+baskerville.blocks.dense_nac(inputs, units=None, activation='relu', flatten=False, dropout=0, l2_scale=0, l1_scale=0, residual=False, norm_type=None, bn_momentum=0.99, norm_gamma=None, kernel_initializer='he_normal', **kwargs)[source]
+

Construct a single convolution block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • units – Conv1D filters

  • +
  • activation – relu/gelu/etc

  • +
  • activation_end – Compute activation after the other operations

  • +
  • flatten – Flatten across positional axis

  • +
  • dropout – Dropout rate probability

  • +
  • l2_scale – L2 regularization weight.

  • +
  • l1_scale – L1 regularization weight.

  • +
  • residual – Residual connection boolean

  • +
  • batch_norm – Apply batch normalization

  • +
  • bn_momentum – BatchNorm momentum

  • +
  • norm_gamma – BatchNorm gamma (defaults according to residual)

  • +
+
+
Returns:
+

[batch_size, seq_length(?), features] output sequence

+
+
+
+ +
+
+baskerville.blocks.dilated_dense(inputs, filters, kernel_size=3, rate_mult=2, conv_type='standard', dropout=0, repeat=1, **kwargs)[source]
+

Construct a residual dilated dense block.

+

Args:

+

Returns:

+
+ +
+
+baskerville.blocks.dilated_residual(inputs, filters, kernel_size=3, rate_mult=2, dropout=0, repeat=1, conv_type='standard', norm_type=None, round=False, **kwargs)[source]
+

Construct a residual dilated convolution block.

+

Args:

+

Returns:

+
+ +
+
+baskerville.blocks.dilated_residual_2d(inputs, filters, kernel_size=3, rate_mult=2, dropout=0, repeat=1, symmetric=True, **kwargs)[source]
+

Construct a residual dilated convolution block.

+
+ +
+
+baskerville.blocks.dilated_residual_nac(inputs, filters, kernel_size=3, rate_mult=2, dropout=0, repeat=1, **kwargs)[source]
+

Construct a residual dilated convolution block.

+

Args:

+

Returns:

+
+ +
+
+baskerville.blocks.factor_inverse(inputs, components_file, **kwargs)[source]
+
+ +
+
+baskerville.blocks.final(inputs, units, activation='linear', flatten=False, kernel_initializer='he_normal', l2_scale=0, l1_scale=0, **kwargs)[source]
+

Final simple transformation before comparison to targets.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • units – Dense units

  • +
  • activation – relu/gelu/etc

  • +
  • flatten – Flatten positional axis.

  • +
  • l2_scale – L2 regularization weight.

  • +
  • l1_scale – L1 regularization weight.

  • +
+
+
Returns:
+

[batch_size, seq_length(?), units] output sequence

+
+
+
+ +
+
+baskerville.blocks.global_context(inputs, **kwargs)[source]
+
+ +
+
+baskerville.blocks.one_to_two(inputs, operation='mean', **kwargs)[source]
+
+ +
+
+baskerville.blocks.res_tower(inputs, filters_init, filters_end=None, filters_mult=None, kernel_size=1, dropout=0, pool_size=2, pool_type='max', divisible_by=1, repeat=1, num_convs=2, reprs=[], **kwargs)[source]
+

Construct a reducing convolution block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • filters_init – Initial Conv1D filters

  • +
  • filters_end – End Conv1D filters

  • +
  • filters_mult – Multiplier for Conv1D filters

  • +
  • kernel_size – Conv1D kernel_size

  • +
  • dropout – Dropout on subsequent convolution blocks.

  • +
  • pool_size – Pool width.

  • +
  • repeat – Residual block repetitions

  • +
  • num_convs – Conv blocks per residual layer

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.squeeze_excite(inputs, activation='relu', bottleneck_ratio=8, additive=False, norm_type=None, bn_momentum=0.9, **kwargs)[source]
+
+ +
+
+baskerville.blocks.swin_transformer(inputs, **kwargs)[source]
+
+ +
+
+baskerville.blocks.symmetrize_2d(inputs, **kwargs)[source]
+
+ +
+
+baskerville.blocks.tconv_nac(inputs, filters=None, kernel_size=1, activation='relu', stride=1, l2_scale=0, dropout=0, conv_type='standard', norm_type=None, bn_momentum=0.99, norm_gamma=None, kernel_initializer='he_normal', padding='same')[source]
+

Construct a single transposed convolution block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • filters – Conv1D filters

  • +
  • kernel_size – Conv1D kernel_size

  • +
  • activation – relu/gelu/etc

  • +
  • stride – UpSample stride

  • +
  • l2_scale – L2 regularization weight.

  • +
  • dropout – Dropout rate probability

  • +
  • conv_type – Conv1D layer type

  • +
  • norm_type – Apply batch or layer normalization

  • +
  • bn_momentum – BatchNorm momentum

  • +
+
+
Returns:
+

[batch_size, stride*seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.transformer(inputs, key_size=None, heads=1, out_size=None, activation='relu', dense_expansion=2.0, content_position_bias=True, dropout=0.25, attention_dropout=0.05, position_dropout=0.01, l2_scale=0, mha_l2_scale=0, num_position_features=None, qkv_width=1, mha_initializer='he_normal', kernel_initializer='he_normal', **kwargs)[source]
+

Construct a transformer block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • key_size – Conv block repetitions

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.transformer2(inputs, key_size=None, heads=1, out_size=None, activation='relu', num_position_features=None, attention_dropout=0.05, position_dropout=0.01, dropout=0.25, dense_expansion=2.0, qkv_width=1, **kwargs)[source]
+
+
Construct a transformer block, with length-wise pooling before

returning to full length.

+
+
+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • key_size – Conv block repetitions

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.transformer_dense(inputs, out_size, dense_expansion, l2_scale, dropout, kernel_initializer)[source]
+

Transformer block dense portion.

+
+ +
+
+baskerville.blocks.transformer_split(inputs, splits=2, key_size=None, heads=1, out_size=None, activation='relu', dense_expansion=2.0, content_position_bias=True, dropout=0.25, attention_dropout=0.05, position_dropout=0.01, l2_scale=0, mha_l2_scale=0, num_position_features=None, qkv_width=1, mha_initializer='he_normal', kernel_initializer='he_normal', **kwargs)[source]
+

Construct a transformer block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • key_size – Conv block repetitions

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.transformer_tower(inputs, repeat=2, block_type='transformer', **kwargs)[source]
+

Construct a tower of repeated transformer blocks.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • repeat – Conv block repetitions

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.unet_concat(inputs, unet_repr, activation='relu', stride=2, l2_scale=0, dropout=0, norm_type=None, bn_momentum=0.99, kernel_size=1, kernel_initializer='he_normal')[source]
+

Construct a single transposed convolution block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • filters – Conv1D filters

  • +
  • kernel_size – Conv1D kernel_size

  • +
  • activation – relu/gelu/etc

  • +
  • stride – UpSample stride

  • +
  • l2_scale – L2 regularization weight.

  • +
  • dropout – Dropout rate probability

  • +
  • conv_type – Conv1D layer type

  • +
  • norm_type – Apply batch or layer normalization

  • +
  • bn_momentum – BatchNorm momentum

  • +
+
+
Returns:
+

[batch_size, stride*seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.unet_conv(inputs, unet_repr, activation='relu', stride=2, l2_scale=0, dropout=0, norm_type=None, bn_momentum=0.99, kernel_size=1, kernel_initializer='he_normal', upsample_conv=False)[source]
+

Construct a feature pyramid network block.

+
+
Parameters:
+
    +
  • inputs – [batch_size, seq_length, features] input sequence

  • +
  • kernel_size – Conv1D kernel_size

  • +
  • activation – relu/gelu/etc

  • +
  • stride – UpSample stride

  • +
  • l2_scale – L2 regularization weight.

  • +
  • dropout – Dropout rate probability

  • +
  • norm_type – Apply batch or layer normalization

  • +
  • bn_momentum – BatchNorm momentum

  • +
  • upsample_conv – Conv1D the upsampled input path

  • +
+
+
Returns:
+

[batch_size, seq_length, features] output sequence

+
+
+
+ +
+
+baskerville.blocks.upper_tri(inputs, diagonal_offset=2, **kwargs)[source]
+
+ +
+
+baskerville.blocks.wheeze_excite(inputs, pool_size, **kwargs)[source]
+
+ +
+
+

baskerville.dataset module

+
+
+class baskerville.dataset.SeqDataset(data_dir: str, split_label: str, batch_size: int, shuffle_buffer: int = 128, seq_length_crop: int = 0, mode: str = 'eval', tfr_pattern: str | None = None, targets_slice_file: str | None = None)[source]
+

Bases: object

+

Labeled sequence dataset for Tensorflow.

+
+
Parameters:
+
    +
  • data_dir (str) – Dataset directory.

  • +
  • split_label (str) – Dataset split, e.g. train, valid, test.

  • +
  • batch_size (int) – Batch size.

  • +
  • shuffle_buffer (int) – Shuffle buffer size. Defaults to 128.

  • +
  • seq_length_crop (int) – Sequence length to crop from sides. Defaults to 0.

  • +
  • mode (str) – Dataset mode, e.g. train/eval. Defaults to ‘eval’.

  • +
  • tfr_pattern (str) – TFRecord pattern to glob. Defaults to split_label.

  • +
  • targets_slice_file (str) – Targets table from which to slice a target subset.

  • +
+
+
+
+
+batches_per_epoch()[source]
+

Compute number of batches per epoch.

+
+ +
+
+compute_stats()[source]
+

Iterate over the TFRecords to count sequences, and infer +seq_depth and num_targets.

+
+ +
+
+distribute(strategy)[source]
+

Wrap Dataset to distribute across devices.

+
+ +
+
+generate_parser(raw: bool = False)[source]
+

Generate parser function for TFRecordDataset.

+
+ +
+
+make_dataset(cycle_length=4)[source]
+

Make tf.data.Dataset w/ transformations.

+
+ +
+
+numpy(return_inputs=True, return_outputs=True, step=1, target_slice=None, dtype='float16')[source]
+

Convert TFR inputs and/or outputs to numpy arrays.

+
+ +
+ +
+
+baskerville.dataset.file_to_records(filename: str)[source]
+

Read TFRecord file into tf.data.Dataset.

+
+ +
+
+baskerville.dataset.targets_prep_strand(targets_df)[source]
+

Adjust targets table for merged stranded datasets.

+
+
Parameters:
+

targets_df – pandas DataFrame of targets

+
+
Returns:
+

+
pandas DataFrame of targets, with stranded

targets collapsed into a single row

+
+
+

+
+
Return type:
+

targets_df

+
+
+
+ +
+
+baskerville.dataset.untransform_preds(preds, targets_df, unscale=False)[source]
+

Undo the squashing transformations performed for the tasks.

+
+
Parameters:
+
    +
  • preds (np.array) – Predictions LxT.

  • +
  • targets_df (pd.DataFrame) – Targets information table.

  • +
+
+
Returns:
+

Untransformed predictions LxT.

+
+
Return type:
+

preds (np.array)

+
+
+
+ +
+
+baskerville.dataset.untransform_preds1(preds, targets_df, unscale=False)[source]
+

Undo the squashing transformations performed for the tasks.

+
+
Parameters:
+
    +
  • preds (np.array) – Predictions LxT.

  • +
  • targets_df (pd.DataFrame) – Targets information table.

  • +
+
+
Returns:
+

Untransformed predictions LxT.

+
+
Return type:
+

preds (np.array)

+
+
+
+ +
+
+

baskerville.dna module

+
+
+baskerville.dna.dna_1hot(seq: str, seq_len: int | None = None, n_uniform: bool = False, n_sample: bool = False)[source]
+

Convert a DNA sequence to a 1-hot encoding.

+
+
Parameters:
+
    +
  • seq (str) – DNA sequence.

  • +
  • seq_len (int) – length to extend/trim sequences to.

  • +
  • n_uniform (bool) – represent N’s as 0.25, forcing float16,

  • +
  • n_sample (bool) – sample ACGT for N

  • +
+
+
Returns:
+

1-hot encoding of DNA sequence.

+
+
Return type:
+

seq_code (np.array)

+
+
+
+ +
+
+baskerville.dna.dna_1hot_index(seq: str, n_sample: bool = False)[source]
+

Convert a DNA sequence to an index encoding.

+
+
Parameters:
+
    +
  • seq (str) – DNA sequence.

  • +
  • n_sample (bool) – sample ACGT for N

  • +
+
+
Returns:
+

Index encoding of DNA sequence.

+
+
Return type:
+

seq_code (np.array)

+
+
+
+ +
+
+baskerville.dna.dna_rc(seq: str)[source]
+

Reverse complement a DNA sequence.

+
+
Parameters:
+

seq (str) – DNA sequence.

+
+
Returns:
+

Reverse complement of the input sequence.

+
+
+
+ +
+
+baskerville.dna.hot1_augment(Xb, fwdrc: bool = True, shift: int = 0)[source]
+

Transform a batch of one hot coded sequences to augment training.

+
+
Parameters:
+
    +
  • Xb (np.array) – Batch x Length x 4 one hot coded sequences.

  • +
  • fwdrc (bool) – Representing forward versus reverse complement strand.

  • +
  • shift (int) – Shift sequences by this many positions.

  • +
+
+
Returns:
+

Transformed batch of sequences.

+
+
Return type:
+

Xbt (np.array)

+
+
+
+ +
+
+baskerville.dna.hot1_delete(seq_1hot, pos: int, delete_len: int, pad_value=None)[source]
+
+
Delete nucleotides starting at a given position

in the Lx4 1-hot encoded sequence.

+
+
+
+
Parameters:
+
    +
  • seq_1hot (np.array) – 1-hot encoded sequence.

  • +
  • pos (int) – Position to start deleting.

  • +
  • delete_len (int) – Number of nucleotides to delete.

  • +
  • pad_value (float) – Value to pad the end with.

  • +
+
+
Returns:
+

In-place transformed sequence.

+
+
Return type:
+

seq_1hot (np.array)

+
+
+
+ +
+
+baskerville.dna.hot1_dna(seqs_1hot)[source]
+

Convert 1-hot coded sequences to ACGTN.

+
+
Parameters:
+

seq_1hot (np.array) – 1-hot encoded sequences.

+
+
Returns:
+

List of DNA sequences.

+
+
Return type:
+

seqs [str]

+
+
+
+ +
+
+baskerville.dna.hot1_get(seqs_1hot, pos: int)[source]
+
+
Return the nucleotide corresponding to the one hot coding

of position “pos” in the Lx4 array seqs_1hot.

+
+
+
+
Parameters:
+
    +
  • seqs_1hot (np.array) – 1-hot encoded sequences.

  • +
  • pos (int) – Position to get nucleotide.

  • +
+
+
Returns:
+

Nucleotide.

+
+
Return type:
+

nt (str)

+
+
+
+ +
+
+baskerville.dna.hot1_insert(seq_1hot, pos: int, insert_seq: str)[source]
+

Insert sequence at a given position in the 1-hot encoded sequence.

+
+
Parameters:
+
    +
  • seq_1hot (np.array) – 1-hot encoded sequence.

  • +
  • pos (int) – Position to insert sequence.

  • +
  • insert_seq (str) – Sequence to insert.

  • +
+
+
Returns:
+

In-place transformed sequence.

+
+
Return type:
+

seq_1hot (np.array)

+
+
+
+ +
+
+baskerville.dna.hot1_rc(seqs_1hot)[source]
+
+
Reverse complement a batch of one hot coded sequences,

while being robust to additional tracks beyond the four +nucleotides.

+
+
+
+
Parameters:
+

seqs_1hot (np.array) – 1-hot encoded sequences.

+
+
Returns:
+

Reverse complemented sequences.

+
+
Return type:
+

seqs_1hot_rc (np.array)

+
+
+
+ +
+
+baskerville.dna.hot1_set(seq_1hot, pos: int, nt: str)[source]
+

Set position in a 1-hot encoded sequence to given nucleotide.

+
+
Parameters:
+
    +
  • seq_1hot (np.array) – 1-hot encoded sequence.

  • +
  • pos (int) – Position to set nucleotide.

  • +
  • nt (str) – Nucleotide to set.

  • +
+
+
Returns:
+

In-place transformed sequence.

+
+
Return type:
+

seq_1hot (np.array)

+
+
+
+ +
+
+

baskerville.gene module

+
+
+class baskerville.gene.Gene(chrom, strand, kv)[source]
+

Bases: object

+

Class for managing genes in an isoform-agnostic way, taking +the union of exons across isoforms.

+
+
+add_exon(start, end)[source]
+

BED 0-indexing assumed.

+
+ +
+
+get_exons()[source]
+
+ +
+
+midpoint()[source]
+
+ +
+
+output_slice(seq_start, seq_len, model_stride, span=False)[source]
+
+ +
+
+span()[source]
+
+ +
+ +
+
+class baskerville.gene.GenomicInterval(start, end, chrom=None, strand=None)[source]
+

Bases: object

+
+ +
+
+class baskerville.gene.Transcriptome(gtf_file)[source]
+

Bases: object

+
+
+bedtool_exon()[source]
+
+ +
+
+bedtool_span()[source]
+
+ +
+
+read_gtf(gtf_file)[source]
+
+ +
+
+write_bed_exon(bed_file)[source]
+
+ +
+
+write_bed_span(bed_file)[source]
+
+ +
+ +
+
+baskerville.gene.gtf_kv(s)[source]
+

Convert the last gtf section of key/value pairs into a dict.

+
+ +
+
+

baskerville.layers module

+
+
+class baskerville.layers.CenterAverage(*args, **kwargs)[source]
+

Bases: Layer

+

Average the center of the input.

+
+
Parameters:
+

center (int) – Length of the center slice.

+
+
+
+
+call(x)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.CenterSlice(*args, **kwargs)[source]
+

Bases: Layer

+

Scale the input by a learned value.

+
+
Parameters:
+
    +
  • axis (int or [int]) – Axis/axes along which to scale.

  • +
  • initializer – Initializer for the scale weight.

  • +
+
+
+
+
+call(x)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.ConcatDist2D(*args, **kwargs)[source]
+

Bases: Layer

+

Concatenate the pairwise distance to 2d feature matrix.

+
+
+call(inputs)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+ +
+
+class baskerville.layers.ConcatPosition(*args, **kwargs)[source]
+

Bases: Layer

+

Concatenate position to 1d feature vectors.

+
+
+call(inputs)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.EnsembleReverseComplement(*args, **kwargs)[source]
+

Bases: Layer

+

Expand tensor to include reverse complement of one hot encoded DNA sequence.

+
+
+call(seqs_1hot)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+ +
+
+class baskerville.layers.EnsembleShift(*args, **kwargs)[source]
+

Bases: Layer

+

Expand tensor to include shifts of one hot encoded DNA sequence.

+
+
+call(seqs_1hot)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.FactorInverse(*args, **kwargs)[source]
+

Bases: Layer

+

Inverse a target matrix factorization.

+
+
+call(W)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.GlobalContext(*args, **kwargs)[source]
+

Bases: Layer

+
+
+build(input_shape)[source]
+

Creates the variables of the layer (for subclass implementers).

+

This is a method that implementers of subclasses of Layer or Model +can override if they need a state-creation step in-between +layer instantiation and layer call. It is invoked automatically before +the first execution of call().

+

This is typically used to create the weights of Layer subclasses +(at the discretion of the subclass implementer).

+
+
Parameters:
+

input_shape – Instance of TensorShape, or list of instances of +TensorShape if the layer expects a list of inputs +(one instance per input).

+
+
+
+ +
+
+call(x)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+ +
+
+class baskerville.layers.LengthAverage(*args, **kwargs)[source]
+

Bases: Layer

+

Average across a variable length sequence.

+
+
+call(x, seq)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+ +
+
+class baskerville.layers.MultiheadAttention(*args, **kwargs)[source]
+

Bases: Layer

+

Multi-head attention.

+
+
+call(inputs, training=False)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.OneToTwo(*args, **kwargs)[source]
+

Bases: Layer

+

Transform 1d to 2d with i,j vectors operated on.

+
+
+call(oned)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.Scale(*args, **kwargs)[source]
+

Bases: Layer

+

Scale the input by a learned value.

+
+
Parameters:
+
    +
  • axis (int or [int]) – Axis/axes along which to scale.

  • +
  • initializer – Initializer for the scale weight.

  • +
+
+
+
+
+build(input_shape)[source]
+

Creates the variables of the layer (for subclass implementers).

+

This is a method that implementers of subclasses of Layer or Model +can override if they need a state-creation step in-between +layer instantiation and layer call. It is invoked automatically before +the first execution of call().

+

This is typically used to create the weights of Layer subclasses +(at the discretion of the subclass implementer).

+
+
Parameters:
+

input_shape – Instance of TensorShape, or list of instances of +TensorShape if the layer expects a list of inputs +(one instance per input).

+
+
+
+ +
+
+call(x)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.SoftmaxPool1D(*args, **kwargs)[source]
+

Bases: Layer

+

Pooling operation with optional weights.

+
+
+build(input_shape)[source]
+

Creates the variables of the layer (for subclass implementers).

+

This is a method that implementers of subclasses of Layer or Model +can override if they need a state-creation step in-between +layer instantiation and layer call. It is invoked automatically before +the first execution of call().

+

This is typically used to create the weights of Layer subclasses +(at the discretion of the subclass implementer).

+
+
Parameters:
+

input_shape – Instance of TensorShape, or list of instances of +TensorShape if the layer expects a list of inputs +(one instance per input).

+
+
+
+ +
+
+call(inputs)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.Softplus(*args, **kwargs)[source]
+

Bases: Layer

+

Safe softplus, clipping large values.

+
+
+call(x)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.SqueezeExcite(*args, **kwargs)[source]
+

Bases: Layer

+
+
+build(input_shape)[source]
+

Creates the variables of the layer (for subclass implementers).

+

This is a method that implementers of subclasses of Layer or Model +can override if they need a state-creation step in-between +layer instantiation and layer call. It is invoked automatically before +the first execution of call().

+

This is typically used to create the weights of Layer subclasses +(at the discretion of the subclass implementer).

+
+
Parameters:
+

input_shape – Instance of TensorShape, or list of instances of +TensorShape if the layer expects a list of inputs +(one instance per input).

+
+
+
+ +
+
+call(x)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.StochasticReverseComplement(*args, **kwargs)[source]
+

Bases: Layer

+

Stochastically reverse complement a one hot encoded DNA sequence.

+
+
+call(seq_1hot, training=None)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+ +
+
+class baskerville.layers.StochasticShift(*args, **kwargs)[source]
+

Bases: Layer

+

Stochastically shift a one hot encoded DNA sequence.

+
+
+call(seq_1hot, training=None)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.SwitchReverse(*args, **kwargs)[source]
+

Bases: Layer

+

Reverse predictions if the inputs were reverse complemented.

+
+
+call(x_reverse)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.SwitchReverseTriu(*args, **kwargs)[source]
+

Bases: Layer

+
+
+call(x_reverse)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.Symmetrize2D(*args, **kwargs)[source]
+

Bases: Layer

+

Take the average of a matrix and its transpose to enforce symmetry.

+
+
+call(x)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+ +
+
+class baskerville.layers.UpperTri(*args, **kwargs)[source]
+

Bases: Layer

+

Unroll matrix to its upper triangular portion.

+
+
+call(inputs)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+class baskerville.layers.WheezeExcite(*args, **kwargs)[source]
+

Bases: Layer

+
+
+build(input_shape)[source]
+

Creates the variables of the layer (for subclass implementers).

+

This is a method that implementers of subclasses of Layer or Model +can override if they need a state-creation step in-between +layer instantiation and layer call. It is invoked automatically before +the first execution of call().

+

This is typically used to create the weights of Layer subclasses +(at the discretion of the subclass implementer).

+
+
Parameters:
+

input_shape – Instance of TensorShape, or list of instances of +TensorShape if the layer expects a list of inputs +(one instance per input).

+
+
+
+ +
+
+call(x)[source]
+

This is where the layer’s logic lives.

+

The call() method may not create state (except in its first +invocation, wrapping the creation of variables or other resources in +tf.init_scope()). It is recommended to create state, including +tf.Variable instances and nested Layer instances,

+
+

in __init__(), or in the build() method that is

+
+

called automatically before call() executes for the first time.

+
+
Parameters:
+
    +
  • inputs

    Input tensor, or dict/list/tuple of input tensors. +The first positional inputs argument is subject to special rules: +- inputs must be explicitly passed. A layer cannot have zero

    +
    +

    arguments, and inputs cannot be provided via the default value +of a keyword argument.

    +
    +
      +
    • NumPy array or Python scalar values in inputs get cast as +tensors.

    • +
    • Keras mask metadata is only collected from inputs.

    • +
    • Layers are built (build(input_shape) method) +using shape info from inputs only.

    • +
    • input_spec compatibility is only checked against inputs.

    • +
    • Mixed precision input casting is only applied to inputs. +If a layer has tensor arguments in *args or **kwargs, their +casting behavior in mixed precision should be handled manually.

    • +
    • The SavedModel input specification is generated using inputs +only.

    • +
    • Integration with various ecosystem packages like TFMOT, TFLite, +TF.js, etc is only supported for inputs and not for tensors in +positional and keyword arguments.

    • +
    +

  • +
  • *args – Additional positional arguments. May contain tensors, although +this is not recommended, for the reasons above.

  • +
  • **kwargs

    Additional keyword arguments. May contain tensors, although +this is not recommended, for the reasons above. +The following optional keyword arguments are reserved: +- training: Boolean scalar tensor of Python boolean indicating

    +
    +

    whether the call is meant for training or inference.

    +
    +
      +
    • mask: Boolean input mask. If the layer’s call() method takes a +mask argument, its default value will be set to the mask +generated for inputs by the previous layer (if input did come +from a layer that generated a corresponding mask, i.e. if it came +from a Keras layer with masking support).

    • +
    +

  • +
+
+
Returns:
+

A tensor or list/tuple of tensors.

+
+
+
+ +
+
+get_config()[source]
+

Returns the config of the layer.

+

A layer config is a Python dictionary (serializable) +containing the configuration of a layer. +The same layer can be reinstantiated later +(without its trained weights) from this configuration.

+

The config of a layer does not include connectivity +information, nor the layer class name. These are handled +by Network (one layer of abstraction above).

+

Note that get_config() does not guarantee to return a fresh copy of +dict every time it is called. The callers should make a copy of the +returned dict if they want to modify it.

+
+
Returns:
+

Python dictionary.

+
+
+
+ +
+ +
+
+baskerville.layers.activate(current, activation, verbose=False)[source]
+
+ +
+
+baskerville.layers.positional_features(positions: Tensor, feature_size: int, seq_length: int, symmetric=False)[source]
+

Compute relative positional encodings/features.

+

Each positional feature function will compute/provide the same fraction of +features, making up the total of feature_size.

+
+
Parameters:
+
    +
  • positions – Tensor of relative positions of arbitrary shape.

  • +
  • feature_size – Total number of basis functions.

  • +
  • seq_length – Sequence length denoting the characteristic length that +the individual positional features can use. This is required since the +parametrization of the input features should be independent of positions +while it could still require to use the total number of features.

  • +
  • symmetric – If True, the resulting features will be symmetric across the +relative position of 0 (i.e. only absolute value of positions will +matter). If false, then both the symmetric and asymmetric version +(symmetric multiplied by sign(positions)) of the features will be used.

  • +
+
+
Returns:
+

positions.shape + (feature_size,).

+
+
Return type:
+

Tensor of shape

+
+
+
+ +
+
+baskerville.layers.positional_features_central_mask(positions: Tensor, feature_size: int, seq_length: int)[source]
+

Positional features using a central mask (allow only central features).

+
+ +
+
+baskerville.layers.relative_shift(x)[source]
+

Shift the relative logits like in TransformerXL.

+
+ +
+
+baskerville.layers.shift_sequence(seq, shift, pad_value=0)[source]
+

Shift a sequence left or right by shift_amount.

+
+
Parameters:
+
    +
  • seq – [batch_size, seq_length, seq_depth] sequence

  • +
  • shift – signed shift value (tf.int32 or int)

  • +
  • pad_value – value to fill the padding (primitive or scalar tf.Tensor)

  • +
+
+
+
+ +
+
+

baskerville.metrics module

+
+
+class baskerville.metrics.MeanSquaredErrorUDot(udot_weight: float = 1, reduction='auto', name: str = 'mse_udot')[source]
+

Bases: LossFunctionWrapper

+

Mean squared error with mean-normalized specificity term.

+
+
Parameters:
+

udot_weight – Weight of the mean-normalized specificity term.

+
+
+
+ +
+
+class baskerville.metrics.PearsonR(*args, **kwargs)[source]
+

Bases: Metric

+

PearsonR metric for multi-task data.

+
+
Parameters:
+
    +
  • num_targets (int) – Number of tasks.

  • +
  • summarize (bool) – Whether to summarize over all tasks.

  • +
+
+
+
+
+reset_state()[source]
+

Reset metric state.

+
+ +
+
+result()[source]
+

Compute PearsonR result from state.

+
+ +
+
+update_state(y_true, y_pred, sample_weight=None)[source]
+

Update metric state for a batch.

+
+ +
+ +
+
+class baskerville.metrics.PoissonKL(kl_weight: int = 1, reduction='auto', name='poisson_kl')[source]
+

Bases: LossFunctionWrapper

+

Possion decomposition with KL specificity term.

+
+
Parameters:
+

kl_weight (float) – Weight of the KL specificity term.

+
+
+
+ +
+
+class baskerville.metrics.PoissonMultinomial(total_weight=1, reduction='auto', name: str = 'poisson_multinomial')[source]
+

Bases: LossFunctionWrapper

+

Possion decomposition with multinomial specificity term.

+
+
Parameters:
+
    +
  • total_weight (float) – Weight of the Poisson total term.

  • +
  • epsilon (float) – Added small value to avoid log(0).

  • +
+
+
+
+ +
+
+class baskerville.metrics.R2(*args, **kwargs)[source]
+

Bases: Metric

+

R2 metric for multi-task data.

+
+
Parameters:
+
    +
  • num_targets (int) – Number of tasks.

  • +
  • summarize (bool) – Whether to summarize over all tasks.

  • +
+
+
+
+
+reset_state()[source]
+

Reset metric state.

+
+ +
+
+result()[source]
+

Compute R2 result from state.

+
+ +
+
+update_state(y_true, y_pred, sample_weight=None)[source]
+

Update metric state for a batch.

+
+ +
+ +
+
+class baskerville.metrics.SeqAUC(*args, **kwargs)[source]
+

Bases: AUC

+

AUC metric for multi-task sequence data.

+
+
Parameters:
+
    +
  • curve (str) – Metric type–‘ROC’ or ‘PR’.

  • +
  • summarize (bool) – Whether to summarize over all tasks.

  • +
+
+
+
+
+interpolate_pr_auc()[source]
+

Add option to remove summary.

+
+ +
+
+result()[source]
+

Add option to remove summary. +It’s not clear why, but these metrics_utils == aren’t working for tf2.6 on. +I’m hacking a solution to compare the values instead.

+
+ +
+
+update_state(y_true, y_pred, **kwargs)[source]
+

Flatten sequence length before update.

+
+ +
+ +
+
+baskerville.metrics.mean_squared_error_udot(y_true, y_pred, udot_weight: float = 1)[source]
+

Mean squared error with mean-normalized specificity term.

+
+ +
+
+baskerville.metrics.poisson_kl(y_true, y_pred, kl_weight=1, epsilon=0.001)[source]
+
+ +
+
+baskerville.metrics.poisson_multinomial(y_true, y_pred, total_weight: float = 1, epsilon: float = 1e-06, rescale: bool = False)[source]
+

Possion decomposition with multinomial specificity term.

+
+
Parameters:
+
    +
  • total_weight (float) – Weight of the Poisson total term.

  • +
  • epsilon (float) – Added small value to avoid log(0).

  • +
+
+
+
+ +
+
+

baskerville.seqnn module

+
+
+class baskerville.seqnn.SeqNN(params: dict)[source]
+

Bases: object

+

Sequence neural network model.

+
+
Parameters:
+

params (dict) – Model specification and parameters.

+
+
+
+
+build_block(current, block_params)[source]
+

Construct a SeqNN block.

+
+
Parameters:
+
    +
  • current – Current Tensor.

  • +
  • block_params (dict) – Block parameters.

  • +
+
+
Returns:
+

New current Tensor.

+
+
Return type:
+

current

+
+
+
+ +
+
+build_embed(conv_layer_i: int, batch_norm: bool = True)[source]
+

Build model to embed sequences into specific layer.

+
+ +
+
+build_ensemble(ensemble_rc: bool = False, ensemble_shifts=[0])[source]
+

Build ensemble of models computing on augmented input sequences.

+
+ +
+
+build_model(save_reprs: bool = True)[source]
+

Build the model.

+
+ +
+
+build_sad()[source]
+

Sum across length axis, in graph.

+
+ +
+
+build_slice(target_slice=None, target_sum: bool = False)[source]
+

Slice and/or sum across tasks, in graph.

+
+ +
+
+downcast(dtype=tf.float16, head_i=None)[source]
+

Downcast model output type.

+
+ +
+
+evaluate(seq_data, head_i=None, loss_label: str = 'poisson', loss_fn=None)[source]
+

Evaluate model on SeqDataset.

+
+ +
+
+get_bn_layer(bn_layer_i=0)[source]
+

Return specified batch normalization layer.

+
+ +
+
+get_conv_layer(conv_layer_i=0)[source]
+

Return specified convolution layer.

+
+ +
+
+get_conv_weights(conv_layer_i=0)[source]
+

Return kernel weights for specified convolution layer.

+
+ +
+
+get_dense_layer(layer_i=0)[source]
+

Return specified dense layer.

+
+ +
+
+gradients(seq_1hot, head_i=None, target_slice=None, pos_slice=None, pos_mask=None, pos_slice_denom=None, pos_mask_denom=None, chunk_size=None, batch_size=1, track_scale=1.0, track_transform=1.0, clip_soft=None, pseudo_count=0.0, no_transform=False, use_mean=False, use_ratio=False, use_logodds=False, subtract_avg=True, input_gate=True, smooth_grad=False, n_samples=5, sample_prob=0.875, dtype='float16')[source]
+

Compute input gradients for sequences (GPU-friendly).

+
+ +
+
+gradients_func(model, seq_1hot, target_slice, pos_slice, pos_mask=None, pos_slice_denom=None, pos_mask_denom=True, track_scale=1.0, track_transform=1.0, clip_soft=None, pseudo_count=0.0, no_transform=False, use_mean=False, use_ratio=False, use_logodds=False, subtract_avg=True, input_gate=True)[source]
+
+ +
+
+gradients_func_orig(model, seq_1hot, pos_slice)[source]
+

Compute input gradients for each task.

+
+
Parameters:
+
    +
  • model (tf.keras.Model) – Model to compute gradients for.

  • +
  • seq_1hot (tf.Tensor) – 1-hot encoded sequence.

  • +
  • pos_slice ([int]) – Sequence positions to consider.

  • +
+
+
Returns:
+

Gradients for each task.

+
+
Return type:
+

grads (tf.Tensor)

+
+
+
+ +
+
+gradients_orig(seq_1hot, head_i=None, pos_slice=None, batch_size=8, dtype='float16')[source]
+

Compute input gradients for each task.

+
+
Parameters:
+
    +
  • seq_1hot (np.array) – 1-hot encoded sequence.

  • +
  • head_i (int) – Model head index.

  • +
  • pos_slice ([int]) – Sequence positions to consider.

  • +
  • batch_size (int) – number of tasks to compute gradients for at once.

  • +
  • dtype – Returned data type.

  • +
+
+
Returns:
+

Gradients for each task.

+
+
+
+ +
+
+num_targets(head_i=None)[source]
+

Return number of targets.

+
+ +
+
+predict(seq_data, head_i: int | None = None, generator: bool = False, stream: bool = False, step: int = 1, dtype: str = 'float32', **kwargs)[source]
+

Predict targets for SeqDataset, with more options.

+
+
Parameters:
+
    +
  • seq_data (SeqDataset) – Dataset to predict on.

  • +
  • head_i (int) – Model head index.

  • +
  • generator (bool) – Use generator to predict on dataset.

  • +
  • stream (bool) – Stream predictions from dataset.

  • +
  • step (int) – Step size.

  • +
  • dtype (str) – Data type to return.

  • +
+
+
+
+ +
+
+restore(model_file, head_i=0, trunk=False)[source]
+

Restore weights from saved model.

+
+ +
+
+save(model_file, trunk=False)[source]
+

Save model weights to file.

+
+
Parameters:
+
    +
  • model_file (str) – Path to save model weights.

  • +
  • trunk (bool) – Save trunk weights only.

  • +
+
+
+
+ +
+
+set_defaults()[source]
+

Set default parameters.

+

Only necessary for my bespoke parameters. +Others are best defaulted closer to the source.

+
+ +
+
+step(step=2, head_i=None)[source]
+

Create new model to step positions across sequence.

+
+
Parameters:
+
    +
  • step (int) – Step size.

  • +
  • head_i (int) – Model head index.

  • +
+
+
+
+ +
+
+track_sequence(sequence)[source]
+

Track pooling, striding, and cropping of sequence.

+
+
Parameters:
+

sequence (tf.Tensor) – Sequence input.

+
+
+
+ +
+ +
+
+

baskerville.snps module

+
+
+class baskerville.snps.SNPCluster[source]
+

Bases: object

+
+
+add_snp(snp)[source]
+

Add SNP to cluster.

+
+ +
+
+delimit(seq_len)[source]
+

Delimit sequence boundaries.

+
+ +
+
+get_1hots(genome_open)[source]
+

Get list of one hot coded sequences.

+
+ +
+ +
+
+baskerville.snps.cluster_snps(snps, seq_len: int, center_pct: float)[source]
+
+
Cluster a sorted list of SNPs into regions that will satisfy

the required center_pct.

+
+
+
+
Parameters:
+
    +
  • [SNP] (snps) – List of SNPs.

  • +
  • seq_len (int) – Sequence length.

  • +
  • center_pct (float) – Percent of sequence length to cluster SNPs.

  • +
+
+
+
+ +
+
+baskerville.snps.initialize_output_h5(out_dir, snp_stats, snps, targets_length, targets_df, num_shifts)[source]
+

Initialize an output HDF5 file for SAD stats.

+
+
Parameters:
+
    +
  • out_dir (str) – Output directory.

  • +
  • [str] (snp_stats) – List of SAD stats to compute.

  • +
  • [SNP] (snps) – List of SNPs.

  • +
  • targets_length (int) – Targets’ sequence length

  • +
  • targets_df (pd.DataFrame) – Targets DataFrame.

  • +
  • num_shifts (int) – Number of shifts.

  • +
+
+
+
+ +
+
+baskerville.snps.make_alt_1hot(ref_1hot, snp_seq_pos, ref_allele, alt_allele)[source]
+

Return alternative allele one hot coding.

+
+
Parameters:
+
    +
  • ref_1hot (np.array) – Reference allele one hot coding.

  • +
  • snp_seq_pos (int) – SNP position in sequence.

  • +
  • ref_allele (str) – Reference allele.

  • +
  • alt_allele (str) – Alternative allele.

  • +
+
+
Returns:
+

Alternative allele one hot coding.

+
+
Return type:
+

np.array

+
+
+
+ +
+
+baskerville.snps.make_strand_transform(targets_df, targets_strand_df)[source]
+

Make a sparse matrix to sum strand pairs.

+
+
Parameters:
+
    +
  • targets_df (pd.DataFrame) – Targets DataFrame.

  • +
  • targets_strand_df (pd.DataFrame) – Targets DataFrame, with strand pairs collapsed.

  • +
+
+
Returns:
+

Sparse matrix to sum strand pairs.

+
+
Return type:
+

scipy.sparse.csr_matrix

+
+
+
+ +
+
+baskerville.snps.score_snps(params_file, model_file, vcf_file, worker_index, options)[source]
+

Score SNPs in a VCF file with a SeqNN model.

+
+
Parameters:
+
    +
  • params_file – Model parameters

  • +
  • model_file – Saved model weights

  • +
  • vcf_file – VCF

  • +
+
+
+

:param worker_index +:param options: options from cmd args +:return:

+
+ +
+
+baskerville.snps.write_pct(scores_out, snp_stats)[source]
+

Compute percentile values for each target and write to HDF5.

+
+
Parameters:
+
    +
  • scores_out (h5py.File) – Output HDF5 file.

  • +
  • [str] (snp_stats) – List of SAD stats to compute.

  • +
+
+
+
+ +
+
+baskerville.snps.write_snp(ref_preds_sum, alt_preds_sum, scores_out, si, snp_stats)[source]
+

Write SNP predictions to HDF, assuming the length dimension has +been collapsed.

+
+
Parameters:
+
    +
  • ref_preds_sum (np.array) – Reference allele predictions.

  • +
  • alt_preds_sum (np.array) – Alternative allele predictions.

  • +
  • scores_out (h5py.File) – Output HDF5 file.

  • +
  • si (int) – SNP index.

  • +
  • [str] (snp_stats) – List of SAD stats to compute.

  • +
+
+
+
+ +
+
+baskerville.snps.write_snp_len(ref_preds, alt_preds, scores_out, si, snp_stats)[source]
+

Write SNP predictions to HDF, assuming the length dimension has +been maintained.

+
+
Parameters:
+
    +
  • ref_preds (np.array) – Reference allele predictions.

  • +
  • alt_preds (np.array) – Alternative allele predictions.

  • +
  • scores_out (h5py.File) – Output HDF5 file.

  • +
  • si (int) – SNP index.

  • +
  • [str] (snp_stats) – List of SAD stats to compute.

  • +
+
+
+
+ +
+
+

baskerville.trainer module

+
+
+class baskerville.trainer.Cyclical1LearningRate(initial_learning_rate: float, maximal_learning_rate: float, final_learning_rate: float, step_size, name: str = 'Cyclical1LearningRate')[source]
+

Bases: LearningRateSchedule

+

A LearningRateSchedule that uses cyclical schedule. +https://yashuseth.blog/2018/11/26/hyper-parameter-tuning-best-practices-learning-rate-batch-size-momentum-weight-decay/

+
+
Parameters:
+
    +
  • initial_learning_rate (float) – The initial learning rate.

  • +
  • maximal_learning_rate (float) – The maximal learning rate after warm up.

  • +
  • final_learning_rate (float) – The final learning rate after cycle.

  • +
  • step_size (int) – Cycle step size.

  • +
  • name (str, optional) – The name of the schedule. Defaults to “Cyclical1LearningRate”.

  • +
+
+
+
+
+get_config()[source]
+
+ +
+ +
+
+class baskerville.trainer.EarlyStoppingMin(min_epoch: int = 0, **kwargs)[source]
+

Bases: EarlyStopping

+

Stop training when a monitored quantity has stopped improving.

+
+
Parameters:
+

min_epoch – Minimum number of epochs before considering stopping.

+
+
+
+
+on_epoch_end(epoch, logs=None)[source]
+

Called at the end of an epoch.

+

Subclasses should override for any actions to run. This function should +only be called during TRAIN mode.

+
+
Parameters:
+
    +
  • epoch – Integer, index of epoch.

  • +
  • logs – Dict, metric results for this training epoch, and for the +validation epoch if validation is performed. Validation result +keys are prefixed with val_. For training epoch, the values of +the Model’s metrics are returned. Example: +{‘loss’: 0.2, ‘accuracy’: 0.7}.

  • +
+
+
+
+ +
+ +
+
+class baskerville.trainer.Trainer(params: dict, train_data, eval_data, out_dir: str, strategy=None, num_gpu: int = 1, keras_fit: bool = False)[source]
+

Bases: object

+

Model training class.

+
+
Parameters:
+
    +
  • params (dict) – Training parameters dictionary.

  • +
  • train_data – Dataset object or list of Dataset objects.

  • +
  • eval_data – Dataset object or list of Dataset objects.

  • +
  • out_dir (str) – Output directory name.

  • +
  • strategy – tf.distribute.Strategy object.

  • +
  • num_gpu (int) – Number of GPUs to use. Default: 1.

  • +
  • keras_fit (bool) – Use Keras fit method instead of custom loop.

  • +
+
+
+
+
+compile(seqnn_model)[source]
+
+ +
+
+fit2(seqnn_model)[source]
+

Train the model using a custom loop for two separate datasets.

+
+ +
+
+fit_keras(seqnn_model)[source]
+
+ +
+
+fit_tape(seqnn_model)[source]
+

Train the model using a custom tf.GradientTape loop.

+
+ +
+
+make_optimizer()[source]
+

Make optimizer object from given parameters.

+
+ +
+ +
+
+class baskerville.trainer.WarmUp(initial_learning_rate: float, warmup_steps: int, decay_schedule: None, power: float = 1.0, name: str | None = None)[source]
+

Bases: LearningRateSchedule

+

Applies a warmup schedule on a given learning rate decay schedule. +(h/t HuggingFace.)

+
+
Parameters:
+
    +
  • initial_learning_rate (float) – Initial learning rate after the warmup +(so this will be the learning rate at the end of the warmup).

  • +
  • decay_schedule (Callable) – The learning rate or schedule function to +apply after the warmup for the rest of training.

  • +
  • warmup_steps (int) – The number of steps for the warmup part of training.

  • +
  • power (float, optional) – Power to use for the polynomial warmup +(defaults is a linear warmup).

  • +
  • name (str, optional) – Optional name prefix for the returned tensors +during the schedule.

  • +
+
+
+
+
+get_config()[source]
+
+ +
+ +
+
+baskerville.trainer.adaptive_clip_grad(parameters, gradients, clip_factor: float = 0.1, eps: float = 0.001)[source]
+

Adaptive gradient clipping.

+
+ +
+
+baskerville.trainer.compute_norm(x, axis, keepdims)[source]
+

Compute L2 norm of a tensor across an axis.

+
+ +
+
+baskerville.trainer.parse_loss(loss_label, strategy=None, keras_fit: bool = True, spec_weight: float = 1, total_weight: float = 1)[source]
+

Parse loss function from label, strategy, and fitting method.

+
+
Parameters:
+
    +
  • loss_label (str) – Loss function label.

  • +
  • strategy – tf.distribute.Strategy object.

  • +
  • keras_fit (bool) – Use Keras fit method instead of custom loop.

  • +
  • spec_weight (float) – Specificity weight for PoissonKL.

  • +
  • total_weight (float) – Total weight for PoissionMultinomial.

  • +
+
+
Returns:
+

tf.keras.losses.Loss object.

+
+
Return type:
+

loss_fn

+
+
+
+ +
+
+baskerville.trainer.safe_next(data_iter, retry=5, sleep=10)[source]
+
+ +
+
+baskerville.trainer.unitwise_norm(x)[source]
+

Compute L2 norm of a tensor across its last dimension.

+
+ +
+
+

baskerville.vcf module

+
+
+class baskerville.vcf.SNP(vcf_line, pos2=False)[source]
+

Bases: object

+

Represent SNPs read in from a VCF file

+
+
+vcf_line
+
+
Type:
+

str

+
+
+
+ +
+
+flip_alleles()[source]
+

Flip reference and first alt allele.

+
+ +
+
+get_alleles()[source]
+

Return a list of all alleles

+
+ +
+
+indel_size()[source]
+

Return the size of the indel.

+
+ +
+
+longest_alt()[source]
+

Return the longest alt allele.

+
+ +
+ +
+
+baskerville.vcf.cap_allele(allele, cap=5)[source]
+

Cap the length of an allele in the figures.

+
+ +
+
+baskerville.vcf.dna_length_1hot(seq, length)[source]
+

Adjust the sequence length and compute +a 1hot coding.

+
+ +
+
+baskerville.vcf.intersect_seqs_snps(vcf_file, seqs, vision_p=1)[source]
+

Intersect a VCF file with a list of sequence coordinates.

+
+
In

vcf_file: +seqs: list of objects w/ chrom, start, end +vision_p: proportion of sequences visible to center genes.

+
+
Out

seqs_snps: list of list mapping segment indexes to overlapping SNP indexes

+
+
+
+ +
+
+baskerville.vcf.intersect_snps_seqs(vcf_file, seq_coords, vision_p=1)[source]
+

Intersect a VCF file with a list of sequence coordinates.

+
+
In

vcf_file: +seq_coords: list of sequence coordinates +vision_p: proportion of sequences visible to center genes.

+
+
Out

snp_segs: list of list mapping SNP indexes to overlapping sequence indexes

+
+
+
+ +
+
+baskerville.vcf.snp_seq1(snp, seq_len, genome_open)[source]
+

Produce one hot coded sequences for a SNP.

+
+
Attrs:

snp [SNP] : +seq_len (int) : sequence length to code +genome_open (File) : open genome FASTA file

+
+
+
+
Returns:
+

list of one hot coded sequences surrounding the +SNP

+
+
Return type:
+

seq_vecs_list [array]

+
+
+
+ +
+
+baskerville.vcf.snps2_seq1(snps, seq_len, genome1_fasta, genome2_fasta, return_seqs=False)[source]
+

Produce an array of one hot coded sequences for a list of SNPs.

+
+
Attrs:

snps [SNP] : list of SNPs +seq_len (int) : sequence length to code +genome_fasta (str) : major allele genome FASTA file +genome2_fasta (str) : minor allele genome FASTA file

+
+
+
+
Returns:
+

one hot coded sequences surrounding the SNPs +seq_headers [str] : headers for sequences +seq_snps [SNP] : list of used SNPs

+
+
Return type:
+

seq_vecs (array)

+
+
+
+ +
+
+baskerville.vcf.snps_seq1(snps, seq_len, genome_fasta, return_seqs=False)[source]
+

Produce an array of one hot coded sequences for a list of SNPs.

+
+
Attrs:

snps [SNP] : list of SNPs +seq_len (int) : sequence length to code +genome_fasta (str) : genome FASTA file

+
+
+
+
Returns:
+

one hot coded sequences surrounding the SNPs +seq_headers [str] : headers for sequences +seq_snps [SNP] : list of used SNPs

+
+
Return type:
+

seq_vecs (array)

+
+
+
+ +
+
+baskerville.vcf.vcf_count(vcf_file)[source]
+

Count SNPs in a VCF file

+
+ +
+
+baskerville.vcf.vcf_snps(vcf_file, require_sorted=False, validate_ref_fasta=None, flip_ref=False, pos2=False, start_i=None, end_i=None)[source]
+

Load SNPs from a VCF file

+
+ +
+
+baskerville.vcf.vcf_sort(vcf_file)[source]
+
+ +
+
+

Module contents

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/genindex.html b/genindex.html new file mode 100644 index 0000000..dfd64e3 --- /dev/null +++ b/genindex.html @@ -0,0 +1,878 @@ + + + + + + Index — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Index

+ +
+ A + | B + | C + | D + | E + | F + | G + | H + | I + | L + | M + | N + | O + | P + | R + | S + | T + | U + | V + | W + +
+

A

+ + + +
+ +

B

+ + + +
    +
  • + baskerville + +
  • +
  • + baskerville.bed + +
  • +
  • + baskerville.blocks + +
  • +
  • + baskerville.dataset + +
  • +
  • + baskerville.dna + +
  • +
  • + baskerville.gene + +
  • +
  • + baskerville.layers + +
  • +
  • + baskerville.metrics + +
  • +
  • + baskerville.seqnn + +
  • +
  • + baskerville.snps + +
  • +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + + +
+ +

I

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

N

+ + + +
+ +

O

+ + + +
+ +

P

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + + +
+ +

W

+ + + +
+ + + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 0000000..017be66 --- /dev/null +++ b/index.html @@ -0,0 +1,463 @@ + + + + + + + Welcome to baskerville’s documentation! — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Welcome to baskerville’s documentation!

+
+

Contents:

+ +
+
+
+

Indices and tables

+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/objects.inv b/objects.inv new file mode 100644 index 0000000..f0af5d8 Binary files /dev/null and b/objects.inv differ diff --git a/py-modindex.html b/py-modindex.html new file mode 100644 index 0000000..f63a2a2 --- /dev/null +++ b/py-modindex.html @@ -0,0 +1,177 @@ + + + + + + Python Module Index — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Python Module Index

+ +
+ b +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ b
+ baskerville +
    + baskerville.bed +
    + baskerville.blocks +
    + baskerville.dataset +
    + baskerville.dna +
    + baskerville.gene +
    + baskerville.layers +
    + baskerville.metrics +
    + baskerville.seqnn +
    + baskerville.snps +
    + baskerville.trainer +
    + baskerville.vcf +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/search.html b/search.html new file mode 100644 index 0000000..1aeac00 --- /dev/null +++ b/search.html @@ -0,0 +1,122 @@ + + + + + + Search — baskerville 0.0.1 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + + + +
+ +
+ +
+
+ +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/searchindex.js b/searchindex.js new file mode 100644 index 0000000..67287aa --- /dev/null +++ b/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["baskerville", "index"], "filenames": ["baskerville.rst", "index.rst"], "titles": ["baskerville package", "Welcome to baskerville\u2019s documentation!"], "terms": {"make_bed_seq": [0, 1], "bed_fil": 0, "fasta_fil": 0, "seq_len": 0, "strand": 0, "fals": 0, "sourc": 0, "return": 0, "region": 0, "sequenc": 0, "list": 0, "coordin": 0, "tupl": 0, "extend": 0, "specifi": 0, "length": 0, "read_bed_coord": [0, 1], "write_bedgraph": [0, 1], "pred": 0, "target": 0, "data_dir": 0, "str": 0, "out_dir": 0, "split_label": 0, "bedgraph_index": 0, "none": 0, "write": 0, "bedgraph": 0, "file": 0, "predict": [0, 1], "from": 0, "paramet": 0, "np": 0, "arrai": 0, "data": 0, "directori": 0, "identifi": 0, "statist": 0, "output": 0, "split": 0, "label": 0, "index": [0, 1], "center_averag": [0, 1], "input": 0, "center": 0, "kwarg": 0, "center_slic": [0, 1], "concat_dist_2d": [0, 1], "concat_posit": [0, 1], "transform": [0, 1], "ab": 0, "power": 0, "1": 0, "conv_block": [0, 1], "filter": 0, "kernel_s": 0, "activ": [0, 1], "relu": 0, "activation_end": 0, "stride": 0, "dilation_r": 0, "l2_scale": 0, "0": 0, "dropout": 0, "conv_typ": 0, "standard": 0, "pool_siz": 0, "pool_typ": 0, "max": 0, "norm_typ": 0, "bn_momentum": 0, "99": 0, "norm_gamma": 0, "residu": 0, "kernel_initi": 0, "he_norm": 0, "pad": 0, "same": 0, "construct": 0, "singl": 0, "convolut": 0, "batch_siz": 0, "seq_length": 0, "featur": 0, "conv1d": 0, "gelu": 0, "etc": 0, "dilat": 0, "rate": 0, "l2": 0, "regular": 0, "weight": 0, "probabl": 0, "type": 0, "connect": 0, "boolean": 0, "pool": 0, "width": 0, "appli": 0, "batch": 0, "normal": 0, "batchnorm": 0, "momentum": 0, "gamma": 0, "default": 0, "accord": 0, "conv_block_2d": [0, 1], "128": 0, "ones": 0, "symmetr": 0, "2d": 0, "conv_dna": [0, 1], "15": 0, "dropout_residu": 0, "use_bia": 0, "se": 0, "assum": 0, "oper": 0, "conv_nac": [0, 1], "conv_next": [0, 1], "7": 0, "dense_expans": 0, "2": 0, "conv_tow": [0, 1], "filters_init": 0, "filters_end": 0, "filters_mult": 0, "divisible_bi": 0, "repeat": 0, "repr": 0, "reduc": 0, "initi": 0, "end": 0, "multipli": 0, "round": 0, "divis": 0, "eg": 0, "two": 0, "tower": 0, "repetit": 0, "conv_tower_nac": [0, 1], "append": 0, "represent": 0, "conv_tower_v1": [0, 1], "conv": 0, "convnext_tow": [0, 1], "num_conv": 0, "abc": 0, "subsequ": 0, "per": 0, "cropping_2d": [0, 1], "crop": 0, "dense_block": [0, 1], "unit": 0, "flatten": 0, "l1_scale": 0, "comput": 0, "after": 0, "other": 0, "across": 0, "posit": 0, "axi": 0, "l1": 0, "batch_norm": 0, "dense_nac": [0, 1], "dilated_dens": [0, 1], "3": 0, "rate_mult": 0, "dens": 0, "arg": 0, "dilated_residu": [0, 1], "dilated_residual_2d": [0, 1], "true": 0, "dilated_residual_nac": [0, 1], "factor_invers": [0, 1], "components_fil": 0, "final": [0, 1], "linear": 0, "simpl": 0, "befor": 0, "comparison": 0, "global_context": [0, 1], "one_to_two": [0, 1], "mean": 0, "res_tow": [0, 1], "squeeze_excit": [0, 1], "bottleneck_ratio": 0, "8": 0, "addit": 0, "9": 0, "swin_transform": [0, 1], "symmetrize_2d": [0, 1], "tconv_nac": [0, 1], "transpos": 0, "upsampl": 0, "key_siz": 0, "head": 0, "out_siz": 0, "content_position_bia": 0, "25": 0, "attention_dropout": 0, "05": 0, "position_dropout": 0, "01": 0, "mha_l2_scal": 0, "num_position_featur": 0, "qkv_width": 0, "mha_initi": 0, "transformer2": [0, 1], "wise": 0, "full": 0, "transformer_dens": [0, 1], "portion": 0, "transformer_split": [0, 1], "transformer_tow": [0, 1], "block_typ": 0, "unet_concat": [0, 1], "unet_repr": 0, "unet_conv": [0, 1], "upsample_conv": 0, "pyramid": 0, "network": 0, "path": 0, "upper_tri": [0, 1], "diagonal_offset": 0, "wheeze_excit": [0, 1], "class": 0, "seqdataset": [0, 1], "int": 0, "shuffle_buff": 0, "seq_length_crop": 0, "mode": 0, "eval": 0, "tfr_pattern": 0, "targets_slice_fil": 0, "base": 0, "object": 0, "tensorflow": 0, "e": 0, "g": 0, "train": 0, "valid": 0, "test": 0, "size": 0, "shuffl": 0, "buffer": 0, "side": 0, "tfrecord": 0, "pattern": 0, "glob": 0, "tabl": 0, "which": 0, "slice": 0, "subset": 0, "batches_per_epoch": [0, 1], "number": 0, "epoch": 0, "compute_stat": [0, 1], "iter": 0, "over": 0, "count": 0, "infer": 0, "seq_depth": 0, "num_target": [0, 1], "distribut": [0, 1], "strategi": 0, "wrap": 0, "devic": 0, "generate_pars": [0, 1], "raw": 0, "bool": 0, "gener": 0, "parser": 0, "function": 0, "tfrecorddataset": 0, "make_dataset": [0, 1], "cycle_length": 0, "4": 0, "make": 0, "tf": 0, "w": 0, "numpi": [0, 1], "return_input": 0, "return_output": 0, "step": [0, 1], "target_slic": 0, "dtype": 0, "float16": 0, "convert": 0, "tfr": 0, "file_to_record": [0, 1], "filenam": 0, "read": 0, "targets_prep_strand": [0, 1], "targets_df": 0, "adjust": 0, "merg": 0, "panda": 0, "datafram": 0, "collaps": 0, "row": 0, "untransform_pr": [0, 1], "unscal": 0, "undo": 0, "squash": 0, "perform": 0, "task": 0, "lxt": 0, "pd": 0, "inform": 0, "untransform": 0, "untransform_preds1": [0, 1], "dna_1hot": [0, 1], "seq": 0, "n_uniform": 0, "n_sampl": 0, "hot": 0, "encod": 0, "trim": 0, "repres": 0, "n": 0, "": 0, "forc": 0, "sampl": 0, "acgt": 0, "seq_cod": 0, "dna_1hot_index": [0, 1], "an": 0, "dna_rc": [0, 1], "revers": 0, "complement": 0, "hot1_aug": [0, 1], "xb": 0, "fwdrc": 0, "shift": 0, "one": 0, "code": 0, "augment": 0, "x": 0, "forward": 0, "versu": 0, "thi": 0, "mani": 0, "xbt": 0, "hot1_delet": [0, 1], "seq_1hot": 0, "po": 0, "delete_len": 0, "pad_valu": 0, "delet": 0, "nucleotid": 0, "start": 0, "given": 0, "lx4": 0, "float": 0, "valu": 0, "In": 0, "place": 0, "hot1_dna": [0, 1], "seqs_1hot": 0, "acgtn": 0, "hot1_get": [0, 1], "correspond": 0, "get": 0, "nt": 0, "hot1_insert": [0, 1], "insert_seq": 0, "insert": 0, "hot1_rc": [0, 1], "while": 0, "being": 0, "robust": 0, "track": 0, "beyond": 0, "four": 0, "seqs_1hot_rc": 0, "hot1_set": [0, 1], "set": 0, "chrom": 0, "kv": 0, "manag": 0, "isoform": 0, "agnost": 0, "wai": 0, "take": 0, "union": 0, "exon": 0, "add_exon": [0, 1], "get_exon": [0, 1], "midpoint": [0, 1], "output_slic": [0, 1], "seq_start": 0, "model_strid": 0, "span": [0, 1], "genomicinterv": [0, 1], "transcriptom": [0, 1], "gtf_file": 0, "bedtool_exon": [0, 1], "bedtool_span": [0, 1], "read_gtf": [0, 1], "write_bed_exon": [0, 1], "write_bed_span": [0, 1], "gtf_kv": [0, 1], "last": 0, "gtf": 0, "section": 0, "kei": 0, "pair": 0, "dict": 0, "centeraverag": [0, 1], "averag": 0, "call": [0, 1], "i": 0, "where": 0, "logic": 0, "live": 0, "The": 0, "method": 0, "mai": 0, "creat": 0, "state": 0, "except": 0, "its": 0, "first": 0, "invoc": 0, "creation": 0, "variabl": 0, "resourc": 0, "init_scop": 0, "It": 0, "recommend": 0, "includ": 0, "instanc": 0, "nest": 0, "__init__": 0, "build": [0, 1], "automat": 0, "execut": 0, "time": 0, "tensor": 0, "argument": 0, "subject": 0, "special": 0, "rule": 0, "must": 0, "explicitli": 0, "pass": 0, "A": 0, "cannot": 0, "have": 0, "zero": 0, "provid": 0, "via": 0, "keyword": 0, "python": 0, "scalar": 0, "cast": 0, "kera": 0, "mask": 0, "metadata": 0, "onli": 0, "collect": 0, "ar": 0, "built": 0, "input_shap": 0, "us": 0, "shape": 0, "info": 0, "input_spec": 0, "compat": 0, "check": 0, "against": 0, "mix": 0, "precis": 0, "If": 0, "ha": 0, "behavior": 0, "should": 0, "handl": 0, "manual": 0, "savedmodel": 0, "specif": 0, "integr": 0, "variou": 0, "ecosystem": 0, "like": 0, "tfmot": 0, "tflite": 0, "j": 0, "support": 0, "contain": 0, "although": 0, "reason": 0, "abov": 0, "follow": 0, "option": 0, "reserv": 0, "indic": 0, "whether": 0, "meant": 0, "previou": 0, "did": 0, "come": 0, "came": 0, "get_config": [0, 1], "config": 0, "dictionari": 0, "serializ": 0, "configur": 0, "can": 0, "reinstanti": 0, "later": 0, "without": 0, "doe": 0, "nor": 0, "name": 0, "These": 0, "abstract": 0, "note": 0, "guarante": 0, "fresh": 0, "copi": 0, "everi": 0, "caller": 0, "thei": 0, "want": 0, "modifi": 0, "centerslic": [0, 1], "scale": [0, 1], "learn": 0, "ax": 0, "along": 0, "concatdist2d": [0, 1], "concaten": 0, "pairwis": 0, "distanc": 0, "matrix": 0, "concatposit": [0, 1], "1d": 0, "vector": 0, "ensemblereversecompl": [0, 1], "expand": 0, "ensembleshift": [0, 1], "factorinvers": [0, 1], "invers": 0, "factor": 0, "globalcontext": [0, 1], "subclass": 0, "implement": 0, "model": 0, "overrid": 0, "need": 0, "between": 0, "instanti": 0, "invok": 0, "typic": 0, "discret": 0, "tensorshap": 0, "expect": 0, "lengthaverag": [0, 1], "multiheadattent": [0, 1], "multi": 0, "attent": 0, "onetotwo": [0, 1], "oned": 0, "softmaxpool1d": [0, 1], "softplu": [0, 1], "safe": 0, "clip": 0, "larg": 0, "squeezeexcit": [0, 1], "stochasticreversecompl": [0, 1], "stochast": 0, "stochasticshift": [0, 1], "switchrevers": [0, 1], "were": 0, "x_revers": 0, "switchreversetriu": [0, 1], "symmetrize2d": [0, 1], "enforc": 0, "symmetri": 0, "uppertri": [0, 1], "unrol": 0, "upper": 0, "triangular": 0, "wheezeexcit": [0, 1], "current": 0, "verbos": 0, "positional_featur": [0, 1], "feature_s": 0, "rel": 0, "each": 0, "fraction": 0, "up": 0, "total": 0, "arbitrari": 0, "basi": 0, "denot": 0, "characterist": 0, "individu": 0, "requir": 0, "sinc": 0, "parametr": 0, "independ": 0, "could": 0, "still": 0, "result": [0, 1], "absolut": 0, "matter": 0, "both": 0, "asymmetr": 0, "version": 0, "sign": 0, "positional_features_central_mask": [0, 1], "central": 0, "allow": 0, "relative_shift": [0, 1], "logit": 0, "transformerxl": 0, "shift_sequ": [0, 1], "left": 0, "right": 0, "shift_amount": 0, "int32": 0, "fill": 0, "primit": 0, "meansquarederrorudot": [0, 1], "udot_weight": 0, "reduct": 0, "auto": 0, "mse_udot": 0, "lossfunctionwrapp": 0, "squar": 0, "error": 0, "term": 0, "pearsonr": [0, 1], "summar": 0, "all": 0, "reset_st": [0, 1], "reset": 0, "update_st": [0, 1], "y_true": 0, "y_pred": 0, "sample_weight": 0, "updat": 0, "poissonkl": [0, 1], "kl_weight": 0, "poisson_kl": [0, 1], "possion": 0, "decomposit": 0, "kl": 0, "poissonmultinomi": [0, 1], "total_weight": 0, "poisson_multinomi": [0, 1], "multinomi": 0, "poisson": 0, "epsilon": 0, "ad": 0, "small": 0, "avoid": 0, "log": 0, "r2": [0, 1], "seqauc": [0, 1], "auc": 0, "curv": 0, "roc": 0, "pr": 0, "interpolate_pr_auc": [0, 1], "add": 0, "remov": 0, "summari": 0, "clear": 0, "why": 0, "metrics_util": 0, "aren": 0, "t": 0, "work": 0, "tf2": 0, "6": 0, "m": 0, "hack": 0, "solut": 0, "compar": 0, "instead": 0, "mean_squared_error_udot": [0, 1], "001": 0, "1e": 0, "06": 0, "rescal": 0, "param": 0, "neural": 0, "build_block": [0, 1], "block_param": 0, "new": 0, "build_emb": [0, 1], "conv_layer_i": 0, "emb": 0, "build_ensembl": [0, 1], "ensemble_rc": 0, "ensemble_shift": 0, "ensembl": 0, "build_model": [0, 1], "save_repr": 0, "build_sad": [0, 1], "sum": 0, "graph": 0, "build_slic": [0, 1], "target_sum": 0, "downcast": [0, 1], "head_i": 0, "evalu": [0, 1], "seq_data": 0, "loss_label": 0, "loss_fn": 0, "get_bn_lay": [0, 1], "bn_layer_i": 0, "get_conv_lay": [0, 1], "get_conv_weight": [0, 1], "kernel": 0, "get_dense_lay": [0, 1], "layer_i": 0, "gradient": [0, 1], "pos_slic": 0, "pos_mask": 0, "pos_slice_denom": 0, "pos_mask_denom": 0, "chunk_siz": 0, "track_scal": 0, "track_transform": 0, "clip_soft": 0, "pseudo_count": 0, "no_transform": 0, "use_mean": 0, "use_ratio": 0, "use_logodd": 0, "subtract_avg": 0, "input_g": 0, "smooth_grad": 0, "5": 0, "sample_prob": 0, "875": 0, "gpu": 0, "friendli": 0, "gradients_func": [0, 1], "gradients_func_orig": [0, 1], "consid": 0, "grad": 0, "gradients_orig": [0, 1], "onc": 0, "stream": 0, "float32": 0, "more": 0, "restor": [0, 1], "model_fil": 0, "trunk": 0, "save": [0, 1], "set_default": [0, 1], "necessari": 0, "my": 0, "bespok": 0, "best": 0, "closer": 0, "track_sequ": [0, 1], "snpcluster": [0, 1], "add_snp": [0, 1], "cluster": 0, "delimit": [0, 1], "boundari": 0, "get_1hot": [0, 1], "genome_open": 0, "cluster_snp": [0, 1], "center_pct": 0, "sort": 0, "satisfi": 0, "percent": 0, "initialize_output_h5": [0, 1], "snp_stat": 0, "targets_length": 0, "num_shift": 0, "hdf5": 0, "sad": 0, "stat": 0, "make_alt_1hot": [0, 1], "ref_1hot": 0, "snp_seq_po": 0, "ref_allel": 0, "alt_allel": 0, "altern": 0, "allel": 0, "refer": 0, "make_strand_transform": [0, 1], "targets_strand_df": 0, "spars": 0, "scipi": 0, "csr_matrix": 0, "score_snp": [0, 1], "params_fil": 0, "vcf_file": 0, "worker_index": 0, "score": 0, "cmd": 0, "write_pct": [0, 1], "scores_out": 0, "percentil": 0, "h5py": 0, "write_snp": [0, 1], "ref_preds_sum": 0, "alt_preds_sum": 0, "si": 0, "hdf": 0, "dimens": 0, "been": 0, "write_snp_len": [0, 1], "ref_pr": 0, "alt_pr": 0, "maintain": 0, "cyclical1learningr": [0, 1], "initial_learning_r": 0, "maximal_learning_r": 0, "final_learning_r": 0, "step_siz": 0, "learningrateschedul": 0, "cyclic": 0, "schedul": 0, "http": 0, "yashuseth": 0, "blog": 0, "2018": 0, "11": 0, "26": 0, "hyper": 0, "tune": 0, "practic": 0, "decai": 0, "maxim": 0, "warm": 0, "cycl": 0, "earlystoppingmin": [0, 1], "min_epoch": 0, "earlystop": 0, "stop": 0, "when": 0, "monitor": 0, "quantiti": 0, "improv": 0, "minimum": 0, "on_epoch_end": [0, 1], "ani": 0, "action": 0, "run": 0, "dure": 0, "integ": 0, "prefix": 0, "val_": 0, "For": 0, "exampl": 0, "loss": 0, "accuraci": 0, "train_data": 0, "eval_data": 0, "num_gpu": 0, "keras_fit": 0, "fit": 0, "custom": 0, "loop": 0, "compil": [0, 1], "seqnn_model": 0, "fit2": [0, 1], "separ": 0, "fit_kera": [0, 1], "fit_tap": [0, 1], "gradienttap": 0, "make_optim": [0, 1], "optim": 0, "warmup": [0, 1], "warmup_step": 0, "decay_schedul": 0, "h": 0, "huggingfac": 0, "so": 0, "callabl": 0, "rest": 0, "part": 0, "polynomi": 0, "adaptive_clip_grad": [0, 1], "clip_factor": 0, "ep": 0, "adapt": 0, "compute_norm": [0, 1], "keepdim": 0, "norm": 0, "parse_loss": [0, 1], "spec_weight": 0, "pars": 0, "poissionmultinomi": 0, "safe_next": [0, 1], "data_it": 0, "retri": 0, "sleep": 0, "10": 0, "unitwise_norm": [0, 1], "vcf_line": [0, 1], "pos2": 0, "flip_allel": [0, 1], "flip": 0, "alt": 0, "get_allel": [0, 1], "indel_s": [0, 1], "indel": 0, "longest_alt": [0, 1], "longest": 0, "cap_allel": [0, 1], "cap": 0, "figur": 0, "dna_length_1hot": [0, 1], "1hot": 0, "intersect_seqs_snp": [0, 1], "vision_p": 0, "intersect": 0, "proport": 0, "visibl": 0, "out": 0, "seqs_snp": 0, "map": 0, "segment": 0, "overlap": 0, "intersect_snps_seq": [0, 1], "seq_coord": 0, "snp_seg": 0, "snp_seq1": [0, 1], "produc": 0, "attr": 0, "open": 0, "genom": 0, "fasta": 0, "surround": 0, "seq_vecs_list": 0, "snps2_seq1": [0, 1], "genome1_fasta": 0, "genome2_fasta": 0, "return_seq": 0, "genome_fasta": 0, "major": 0, "minor": 0, "seq_head": 0, "header": 0, "seq_snp": 0, "seq_vec": 0, "snps_seq1": [0, 1], "vcf_count": [0, 1], "vcf_snp": [0, 1], "require_sort": 0, "validate_ref_fasta": 0, "flip_ref": 0, "start_i": 0, "end_i": 0, "load": 0, "vcf_sort": [0, 1], "packag": 1, "submodul": 1, "bed": 1, "modul": 1, "block": 1, "dataset": 1, "dna": 1, "gene": 1, "layer": 1, "metric": 1, "seqnn": 1, "snp": 1, "trainer": 1, "vcf": 1, "search": 1, "page": 1}, "objects": {"": [[0, 0, 0, "-", "baskerville"]], "baskerville": [[0, 0, 0, "-", "bed"], [0, 0, 0, "-", "blocks"], [0, 0, 0, "-", "dataset"], [0, 0, 0, "-", "dna"], [0, 0, 0, "-", "gene"], [0, 0, 0, "-", "layers"], [0, 0, 0, "-", "metrics"], [0, 0, 0, "-", "seqnn"], [0, 0, 0, "-", "snps"], [0, 0, 0, "-", "trainer"], [0, 0, 0, "-", "vcf"]], "baskerville.bed": [[0, 1, 1, "", "make_bed_seqs"], [0, 1, 1, "", "read_bed_coords"], [0, 1, 1, "", "write_bedgraph"]], "baskerville.blocks": [[0, 1, 1, "", "center_average"], [0, 1, 1, "", "center_slice"], [0, 1, 1, "", "concat_dist_2d"], [0, 1, 1, "", "concat_position"], [0, 1, 1, "", "conv_block"], [0, 1, 1, "", "conv_block_2d"], [0, 1, 1, "", "conv_dna"], [0, 1, 1, "", "conv_nac"], [0, 1, 1, "", "conv_next"], [0, 1, 1, "", "conv_tower"], [0, 1, 1, "", "conv_tower_nac"], [0, 1, 1, "", "conv_tower_v1"], [0, 1, 1, "", "convnext_tower"], [0, 1, 1, "", "cropping_2d"], [0, 1, 1, "", "dense_block"], [0, 1, 1, "", "dense_nac"], [0, 1, 1, "", "dilated_dense"], [0, 1, 1, "", "dilated_residual"], [0, 1, 1, "", "dilated_residual_2d"], [0, 1, 1, "", "dilated_residual_nac"], [0, 1, 1, "", "factor_inverse"], [0, 1, 1, "", "final"], [0, 1, 1, "", "global_context"], [0, 1, 1, "", "one_to_two"], [0, 1, 1, "", "res_tower"], [0, 1, 1, "", "squeeze_excite"], [0, 1, 1, "", "swin_transformer"], [0, 1, 1, "", "symmetrize_2d"], [0, 1, 1, "", "tconv_nac"], [0, 1, 1, "", "transformer"], [0, 1, 1, "", "transformer2"], [0, 1, 1, "", "transformer_dense"], [0, 1, 1, "", "transformer_split"], [0, 1, 1, "", "transformer_tower"], [0, 1, 1, "", "unet_concat"], [0, 1, 1, "", "unet_conv"], [0, 1, 1, "", "upper_tri"], [0, 1, 1, "", "wheeze_excite"]], "baskerville.dataset": [[0, 2, 1, "", "SeqDataset"], [0, 1, 1, "", "file_to_records"], [0, 1, 1, "", "targets_prep_strand"], [0, 1, 1, "", "untransform_preds"], [0, 1, 1, "", "untransform_preds1"]], "baskerville.dataset.SeqDataset": [[0, 3, 1, "", "batches_per_epoch"], [0, 3, 1, "", "compute_stats"], [0, 3, 1, "", "distribute"], [0, 3, 1, "", "generate_parser"], [0, 3, 1, "", "make_dataset"], [0, 3, 1, "", "numpy"]], "baskerville.dna": [[0, 1, 1, "", "dna_1hot"], [0, 1, 1, "", "dna_1hot_index"], [0, 1, 1, "", "dna_rc"], [0, 1, 1, "", "hot1_augment"], [0, 1, 1, "", "hot1_delete"], [0, 1, 1, "", "hot1_dna"], [0, 1, 1, "", "hot1_get"], [0, 1, 1, "", "hot1_insert"], [0, 1, 1, "", "hot1_rc"], [0, 1, 1, "", "hot1_set"]], "baskerville.gene": [[0, 2, 1, "", "Gene"], [0, 2, 1, "", "GenomicInterval"], [0, 2, 1, "", "Transcriptome"], [0, 1, 1, "", "gtf_kv"]], "baskerville.gene.Gene": [[0, 3, 1, "", "add_exon"], [0, 3, 1, "", "get_exons"], [0, 3, 1, "", "midpoint"], [0, 3, 1, "", "output_slice"], [0, 3, 1, "", "span"]], "baskerville.gene.Transcriptome": [[0, 3, 1, "", "bedtool_exon"], [0, 3, 1, "", "bedtool_span"], [0, 3, 1, "", "read_gtf"], [0, 3, 1, "", "write_bed_exon"], [0, 3, 1, "", "write_bed_span"]], "baskerville.layers": [[0, 2, 1, "", "CenterAverage"], [0, 2, 1, "", "CenterSlice"], [0, 2, 1, "", "ConcatDist2D"], [0, 2, 1, "", "ConcatPosition"], [0, 2, 1, "", "EnsembleReverseComplement"], [0, 2, 1, "", "EnsembleShift"], [0, 2, 1, "", "FactorInverse"], [0, 2, 1, "", "GlobalContext"], [0, 2, 1, "", "LengthAverage"], [0, 2, 1, "", "MultiheadAttention"], [0, 2, 1, "", "OneToTwo"], [0, 2, 1, "", "Scale"], [0, 2, 1, "", "SoftmaxPool1D"], [0, 2, 1, "", "Softplus"], [0, 2, 1, "", "SqueezeExcite"], [0, 2, 1, "", "StochasticReverseComplement"], [0, 2, 1, "", "StochasticShift"], [0, 2, 1, "", "SwitchReverse"], [0, 2, 1, "", "SwitchReverseTriu"], [0, 2, 1, "", "Symmetrize2D"], [0, 2, 1, "", "UpperTri"], [0, 2, 1, "", "WheezeExcite"], [0, 1, 1, "", "activate"], [0, 1, 1, "", "positional_features"], [0, 1, 1, "", "positional_features_central_mask"], [0, 1, 1, "", "relative_shift"], [0, 1, 1, "", "shift_sequence"]], "baskerville.layers.CenterAverage": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.CenterSlice": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.ConcatDist2D": [[0, 3, 1, "", "call"]], "baskerville.layers.ConcatPosition": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.EnsembleReverseComplement": [[0, 3, 1, "", "call"]], "baskerville.layers.EnsembleShift": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.FactorInverse": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.GlobalContext": [[0, 3, 1, "", "build"], [0, 3, 1, "", "call"]], "baskerville.layers.LengthAverage": [[0, 3, 1, "", "call"]], "baskerville.layers.MultiheadAttention": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.OneToTwo": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.Scale": [[0, 3, 1, "", "build"], [0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.SoftmaxPool1D": [[0, 3, 1, "", "build"], [0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.Softplus": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.SqueezeExcite": [[0, 3, 1, "", "build"], [0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.StochasticReverseComplement": [[0, 3, 1, "", "call"]], "baskerville.layers.StochasticShift": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.SwitchReverse": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.SwitchReverseTriu": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.Symmetrize2D": [[0, 3, 1, "", "call"]], "baskerville.layers.UpperTri": [[0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.layers.WheezeExcite": [[0, 3, 1, "", "build"], [0, 3, 1, "", "call"], [0, 3, 1, "", "get_config"]], "baskerville.metrics": [[0, 2, 1, "", "MeanSquaredErrorUDot"], [0, 2, 1, "", "PearsonR"], [0, 2, 1, "", "PoissonKL"], [0, 2, 1, "", "PoissonMultinomial"], [0, 2, 1, "", "R2"], [0, 2, 1, "", "SeqAUC"], [0, 1, 1, "", "mean_squared_error_udot"], [0, 1, 1, "", "poisson_kl"], [0, 1, 1, "", "poisson_multinomial"]], "baskerville.metrics.PearsonR": [[0, 3, 1, "", "reset_state"], [0, 3, 1, "", "result"], [0, 3, 1, "", "update_state"]], "baskerville.metrics.R2": [[0, 3, 1, "", "reset_state"], [0, 3, 1, "", "result"], [0, 3, 1, "", "update_state"]], "baskerville.metrics.SeqAUC": [[0, 3, 1, "", "interpolate_pr_auc"], [0, 3, 1, "", "result"], [0, 3, 1, "", "update_state"]], "baskerville.seqnn": [[0, 2, 1, "", "SeqNN"]], "baskerville.seqnn.SeqNN": [[0, 3, 1, "", "build_block"], [0, 3, 1, "", "build_embed"], [0, 3, 1, "", "build_ensemble"], [0, 3, 1, "", "build_model"], [0, 3, 1, "", "build_sad"], [0, 3, 1, "", "build_slice"], [0, 3, 1, "", "downcast"], [0, 3, 1, "", "evaluate"], [0, 3, 1, "", "get_bn_layer"], [0, 3, 1, "", "get_conv_layer"], [0, 3, 1, "", "get_conv_weights"], [0, 3, 1, "", "get_dense_layer"], [0, 3, 1, "", "gradients"], [0, 3, 1, "", "gradients_func"], [0, 3, 1, "", "gradients_func_orig"], [0, 3, 1, "", "gradients_orig"], [0, 3, 1, "", "num_targets"], [0, 3, 1, "", "predict"], [0, 3, 1, "", "restore"], [0, 3, 1, "", "save"], [0, 3, 1, "", "set_defaults"], [0, 3, 1, "", "step"], [0, 3, 1, "", "track_sequence"]], "baskerville.snps": [[0, 2, 1, "", "SNPCluster"], [0, 1, 1, "", "cluster_snps"], [0, 1, 1, "", "initialize_output_h5"], [0, 1, 1, "", "make_alt_1hot"], [0, 1, 1, "", "make_strand_transform"], [0, 1, 1, "", "score_snps"], [0, 1, 1, "", "write_pct"], [0, 1, 1, "", "write_snp"], [0, 1, 1, "", "write_snp_len"]], "baskerville.snps.SNPCluster": [[0, 3, 1, "", "add_snp"], [0, 3, 1, "", "delimit"], [0, 3, 1, "", "get_1hots"]], "baskerville.trainer": [[0, 2, 1, "", "Cyclical1LearningRate"], [0, 2, 1, "", "EarlyStoppingMin"], [0, 2, 1, "", "Trainer"], [0, 2, 1, "", "WarmUp"], [0, 1, 1, "", "adaptive_clip_grad"], [0, 1, 1, "", "compute_norm"], [0, 1, 1, "", "parse_loss"], [0, 1, 1, "", "safe_next"], [0, 1, 1, "", "unitwise_norm"]], "baskerville.trainer.Cyclical1LearningRate": [[0, 3, 1, "", "get_config"]], "baskerville.trainer.EarlyStoppingMin": [[0, 3, 1, "", "on_epoch_end"]], "baskerville.trainer.Trainer": [[0, 3, 1, "", "compile"], [0, 3, 1, "", "fit2"], [0, 3, 1, "", "fit_keras"], [0, 3, 1, "", "fit_tape"], [0, 3, 1, "", "make_optimizer"]], "baskerville.trainer.WarmUp": [[0, 3, 1, "", "get_config"]], "baskerville.vcf": [[0, 2, 1, "", "SNP"], [0, 1, 1, "", "cap_allele"], [0, 1, 1, "", "dna_length_1hot"], [0, 1, 1, "", "intersect_seqs_snps"], [0, 1, 1, "", "intersect_snps_seqs"], [0, 1, 1, "", "snp_seq1"], [0, 1, 1, "", "snps2_seq1"], [0, 1, 1, "", "snps_seq1"], [0, 1, 1, "", "vcf_count"], [0, 1, 1, "", "vcf_snps"], [0, 1, 1, "", "vcf_sort"]], "baskerville.vcf.SNP": [[0, 3, 1, "", "flip_alleles"], [0, 3, 1, "", "get_alleles"], [0, 3, 1, "", "indel_size"], [0, 3, 1, "", "longest_alt"], [0, 4, 1, "", "vcf_line"]]}, "objtypes": {"0": "py:module", "1": "py:function", "2": "py:class", "3": "py:method", "4": "py:attribute"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "function", "Python function"], "2": ["py", "class", "Python class"], "3": ["py", "method", "Python method"], "4": ["py", "attribute", "Python attribute"]}, "titleterms": {"baskervil": [0, 1], "packag": 0, "submodul": 0, "bed": 0, "modul": 0, "block": 0, "dataset": 0, "dna": 0, "gene": 0, "layer": 0, "metric": 0, "seqnn": 0, "snp": 0, "trainer": 0, "vcf": 0, "content": [0, 1], "welcom": 1, "": 1, "document": 1, "indic": 1, "tabl": 1}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.viewcode": 1, "sphinx": 60}, "alltitles": {"baskerville package": [[0, "baskerville-package"]], "Submodules": [[0, "submodules"]], "baskerville.bed module": [[0, "module-baskerville.bed"]], "baskerville.blocks module": [[0, "module-baskerville.blocks"]], "baskerville.dataset module": [[0, "module-baskerville.dataset"]], "baskerville.dna module": [[0, "module-baskerville.dna"]], "baskerville.gene module": [[0, "module-baskerville.gene"]], "baskerville.layers module": [[0, "module-baskerville.layers"]], "baskerville.metrics module": [[0, "module-baskerville.metrics"]], "baskerville.seqnn module": [[0, "module-baskerville.seqnn"]], "baskerville.snps module": [[0, "module-baskerville.snps"]], "baskerville.trainer module": [[0, "module-baskerville.trainer"]], "baskerville.vcf module": [[0, "module-baskerville.vcf"]], "Module contents": [[0, "module-baskerville"]], "Welcome to baskerville\u2019s documentation!": [[1, "welcome-to-baskerville-s-documentation"]], "Contents:": [[1, null]], "Indices and tables": [[1, "indices-and-tables"]]}, "indexentries": {"centeraverage (class in baskerville.layers)": [[0, "baskerville.layers.CenterAverage"]], "centerslice (class in baskerville.layers)": [[0, "baskerville.layers.CenterSlice"]], "concatdist2d (class in baskerville.layers)": [[0, "baskerville.layers.ConcatDist2D"]], "concatposition (class in baskerville.layers)": [[0, "baskerville.layers.ConcatPosition"]], "cyclical1learningrate (class in baskerville.trainer)": [[0, "baskerville.trainer.Cyclical1LearningRate"]], "earlystoppingmin (class in baskerville.trainer)": [[0, "baskerville.trainer.EarlyStoppingMin"]], "ensemblereversecomplement (class in baskerville.layers)": [[0, "baskerville.layers.EnsembleReverseComplement"]], "ensembleshift (class in baskerville.layers)": [[0, "baskerville.layers.EnsembleShift"]], "factorinverse (class in baskerville.layers)": [[0, "baskerville.layers.FactorInverse"]], "gene (class in baskerville.gene)": [[0, "baskerville.gene.Gene"]], "genomicinterval (class in baskerville.gene)": [[0, "baskerville.gene.GenomicInterval"]], "globalcontext (class in baskerville.layers)": [[0, "baskerville.layers.GlobalContext"]], "lengthaverage (class in baskerville.layers)": [[0, "baskerville.layers.LengthAverage"]], "meansquarederrorudot (class in baskerville.metrics)": [[0, "baskerville.metrics.MeanSquaredErrorUDot"]], "multiheadattention (class in baskerville.layers)": [[0, "baskerville.layers.MultiheadAttention"]], "onetotwo (class in baskerville.layers)": [[0, "baskerville.layers.OneToTwo"]], "pearsonr (class in baskerville.metrics)": [[0, "baskerville.metrics.PearsonR"]], "poissonkl (class in baskerville.metrics)": [[0, "baskerville.metrics.PoissonKL"]], "poissonmultinomial (class in baskerville.metrics)": [[0, "baskerville.metrics.PoissonMultinomial"]], "r2 (class in baskerville.metrics)": [[0, "baskerville.metrics.R2"]], "snp (class in baskerville.vcf)": [[0, "baskerville.vcf.SNP"]], "snpcluster (class in baskerville.snps)": [[0, "baskerville.snps.SNPCluster"]], "scale (class in baskerville.layers)": [[0, "baskerville.layers.Scale"]], "seqauc (class in baskerville.metrics)": [[0, "baskerville.metrics.SeqAUC"]], "seqdataset (class in baskerville.dataset)": [[0, "baskerville.dataset.SeqDataset"]], "seqnn (class in baskerville.seqnn)": [[0, "baskerville.seqnn.SeqNN"]], "softmaxpool1d (class in baskerville.layers)": [[0, "baskerville.layers.SoftmaxPool1D"]], "softplus (class in baskerville.layers)": [[0, "baskerville.layers.Softplus"]], "squeezeexcite (class in baskerville.layers)": [[0, "baskerville.layers.SqueezeExcite"]], "stochasticreversecomplement (class in baskerville.layers)": [[0, "baskerville.layers.StochasticReverseComplement"]], "stochasticshift (class in baskerville.layers)": [[0, "baskerville.layers.StochasticShift"]], "switchreverse (class in baskerville.layers)": [[0, "baskerville.layers.SwitchReverse"]], "switchreversetriu (class in baskerville.layers)": [[0, "baskerville.layers.SwitchReverseTriu"]], "symmetrize2d (class in baskerville.layers)": [[0, "baskerville.layers.Symmetrize2D"]], "trainer (class in baskerville.trainer)": [[0, "baskerville.trainer.Trainer"]], "transcriptome (class in baskerville.gene)": [[0, "baskerville.gene.Transcriptome"]], "uppertri (class in baskerville.layers)": [[0, "baskerville.layers.UpperTri"]], "warmup (class in baskerville.trainer)": [[0, "baskerville.trainer.WarmUp"]], "wheezeexcite (class in baskerville.layers)": [[0, "baskerville.layers.WheezeExcite"]], "activate() (in module baskerville.layers)": [[0, "baskerville.layers.activate"]], "adaptive_clip_grad() (in module baskerville.trainer)": [[0, "baskerville.trainer.adaptive_clip_grad"]], "add_exon() (baskerville.gene.gene method)": [[0, "baskerville.gene.Gene.add_exon"]], "add_snp() (baskerville.snps.snpcluster method)": [[0, "baskerville.snps.SNPCluster.add_snp"]], "baskerville": [[0, "module-baskerville"]], "baskerville.bed": [[0, "module-baskerville.bed"]], "baskerville.blocks": [[0, "module-baskerville.blocks"]], "baskerville.dataset": [[0, "module-baskerville.dataset"]], "baskerville.dna": [[0, "module-baskerville.dna"]], "baskerville.gene": [[0, "module-baskerville.gene"]], "baskerville.layers": [[0, "module-baskerville.layers"]], "baskerville.metrics": [[0, "module-baskerville.metrics"]], "baskerville.seqnn": [[0, "module-baskerville.seqnn"]], "baskerville.snps": [[0, "module-baskerville.snps"]], "baskerville.trainer": [[0, "module-baskerville.trainer"]], "baskerville.vcf": [[0, "module-baskerville.vcf"]], "batches_per_epoch() (baskerville.dataset.seqdataset method)": [[0, "baskerville.dataset.SeqDataset.batches_per_epoch"]], "bedtool_exon() (baskerville.gene.transcriptome method)": [[0, "baskerville.gene.Transcriptome.bedtool_exon"]], "bedtool_span() (baskerville.gene.transcriptome method)": [[0, "baskerville.gene.Transcriptome.bedtool_span"]], "build() (baskerville.layers.globalcontext method)": [[0, "baskerville.layers.GlobalContext.build"]], "build() (baskerville.layers.scale method)": [[0, "baskerville.layers.Scale.build"]], "build() (baskerville.layers.softmaxpool1d method)": [[0, "baskerville.layers.SoftmaxPool1D.build"]], "build() (baskerville.layers.squeezeexcite method)": [[0, "baskerville.layers.SqueezeExcite.build"]], "build() (baskerville.layers.wheezeexcite method)": [[0, "baskerville.layers.WheezeExcite.build"]], "build_block() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.build_block"]], "build_embed() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.build_embed"]], "build_ensemble() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.build_ensemble"]], "build_model() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.build_model"]], "build_sad() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.build_sad"]], "build_slice() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.build_slice"]], "call() (baskerville.layers.centeraverage method)": [[0, "baskerville.layers.CenterAverage.call"]], "call() (baskerville.layers.centerslice method)": [[0, "baskerville.layers.CenterSlice.call"]], "call() (baskerville.layers.concatdist2d method)": [[0, "baskerville.layers.ConcatDist2D.call"]], "call() (baskerville.layers.concatposition method)": [[0, "baskerville.layers.ConcatPosition.call"]], "call() (baskerville.layers.ensemblereversecomplement method)": [[0, "baskerville.layers.EnsembleReverseComplement.call"]], "call() (baskerville.layers.ensembleshift method)": [[0, "baskerville.layers.EnsembleShift.call"]], "call() (baskerville.layers.factorinverse method)": [[0, "baskerville.layers.FactorInverse.call"]], "call() (baskerville.layers.globalcontext method)": [[0, "baskerville.layers.GlobalContext.call"]], "call() (baskerville.layers.lengthaverage method)": [[0, "baskerville.layers.LengthAverage.call"]], "call() (baskerville.layers.multiheadattention method)": [[0, "baskerville.layers.MultiheadAttention.call"]], "call() (baskerville.layers.onetotwo method)": [[0, "baskerville.layers.OneToTwo.call"]], "call() (baskerville.layers.scale method)": [[0, "baskerville.layers.Scale.call"]], "call() (baskerville.layers.softmaxpool1d method)": [[0, "baskerville.layers.SoftmaxPool1D.call"]], "call() (baskerville.layers.softplus method)": [[0, "baskerville.layers.Softplus.call"]], "call() (baskerville.layers.squeezeexcite method)": [[0, "baskerville.layers.SqueezeExcite.call"]], "call() (baskerville.layers.stochasticreversecomplement method)": [[0, "baskerville.layers.StochasticReverseComplement.call"]], "call() (baskerville.layers.stochasticshift method)": [[0, "baskerville.layers.StochasticShift.call"]], "call() (baskerville.layers.switchreverse method)": [[0, "baskerville.layers.SwitchReverse.call"]], "call() (baskerville.layers.switchreversetriu method)": [[0, "baskerville.layers.SwitchReverseTriu.call"]], "call() (baskerville.layers.symmetrize2d method)": [[0, "baskerville.layers.Symmetrize2D.call"]], "call() (baskerville.layers.uppertri method)": [[0, "baskerville.layers.UpperTri.call"]], "call() (baskerville.layers.wheezeexcite method)": [[0, "baskerville.layers.WheezeExcite.call"]], "cap_allele() (in module baskerville.vcf)": [[0, "baskerville.vcf.cap_allele"]], "center_average() (in module baskerville.blocks)": [[0, "baskerville.blocks.center_average"]], "center_slice() (in module baskerville.blocks)": [[0, "baskerville.blocks.center_slice"]], "cluster_snps() (in module baskerville.snps)": [[0, "baskerville.snps.cluster_snps"]], "compile() (baskerville.trainer.trainer method)": [[0, "baskerville.trainer.Trainer.compile"]], "compute_norm() (in module baskerville.trainer)": [[0, "baskerville.trainer.compute_norm"]], "compute_stats() (baskerville.dataset.seqdataset method)": [[0, "baskerville.dataset.SeqDataset.compute_stats"]], "concat_dist_2d() (in module baskerville.blocks)": [[0, "baskerville.blocks.concat_dist_2d"]], "concat_position() (in module baskerville.blocks)": [[0, "baskerville.blocks.concat_position"]], "conv_block() (in module baskerville.blocks)": [[0, "baskerville.blocks.conv_block"]], "conv_block_2d() (in module baskerville.blocks)": [[0, "baskerville.blocks.conv_block_2d"]], "conv_dna() (in module baskerville.blocks)": [[0, "baskerville.blocks.conv_dna"]], "conv_nac() (in module baskerville.blocks)": [[0, "baskerville.blocks.conv_nac"]], "conv_next() (in module baskerville.blocks)": [[0, "baskerville.blocks.conv_next"]], "conv_tower() (in module baskerville.blocks)": [[0, "baskerville.blocks.conv_tower"]], "conv_tower_nac() (in module baskerville.blocks)": [[0, "baskerville.blocks.conv_tower_nac"]], "conv_tower_v1() (in module baskerville.blocks)": [[0, "baskerville.blocks.conv_tower_v1"]], "convnext_tower() (in module baskerville.blocks)": [[0, "baskerville.blocks.convnext_tower"]], "cropping_2d() (in module baskerville.blocks)": [[0, "baskerville.blocks.cropping_2d"]], "delimit() (baskerville.snps.snpcluster method)": [[0, "baskerville.snps.SNPCluster.delimit"]], "dense_block() (in module baskerville.blocks)": [[0, "baskerville.blocks.dense_block"]], "dense_nac() (in module baskerville.blocks)": [[0, "baskerville.blocks.dense_nac"]], "dilated_dense() (in module baskerville.blocks)": [[0, "baskerville.blocks.dilated_dense"]], "dilated_residual() (in module baskerville.blocks)": [[0, "baskerville.blocks.dilated_residual"]], "dilated_residual_2d() (in module baskerville.blocks)": [[0, "baskerville.blocks.dilated_residual_2d"]], "dilated_residual_nac() (in module baskerville.blocks)": [[0, "baskerville.blocks.dilated_residual_nac"]], "distribute() (baskerville.dataset.seqdataset method)": [[0, "baskerville.dataset.SeqDataset.distribute"]], "dna_1hot() (in module baskerville.dna)": [[0, "baskerville.dna.dna_1hot"]], "dna_1hot_index() (in module baskerville.dna)": [[0, "baskerville.dna.dna_1hot_index"]], "dna_length_1hot() (in module baskerville.vcf)": [[0, "baskerville.vcf.dna_length_1hot"]], "dna_rc() (in module baskerville.dna)": [[0, "baskerville.dna.dna_rc"]], "downcast() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.downcast"]], "evaluate() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.evaluate"]], "factor_inverse() (in module baskerville.blocks)": [[0, "baskerville.blocks.factor_inverse"]], "file_to_records() (in module baskerville.dataset)": [[0, "baskerville.dataset.file_to_records"]], "final() (in module baskerville.blocks)": [[0, "baskerville.blocks.final"]], "fit2() (baskerville.trainer.trainer method)": [[0, "baskerville.trainer.Trainer.fit2"]], "fit_keras() (baskerville.trainer.trainer method)": [[0, "baskerville.trainer.Trainer.fit_keras"]], "fit_tape() (baskerville.trainer.trainer method)": [[0, "baskerville.trainer.Trainer.fit_tape"]], "flip_alleles() (baskerville.vcf.snp method)": [[0, "baskerville.vcf.SNP.flip_alleles"]], "generate_parser() (baskerville.dataset.seqdataset method)": [[0, "baskerville.dataset.SeqDataset.generate_parser"]], "get_1hots() (baskerville.snps.snpcluster method)": [[0, "baskerville.snps.SNPCluster.get_1hots"]], "get_alleles() (baskerville.vcf.snp method)": [[0, "baskerville.vcf.SNP.get_alleles"]], "get_bn_layer() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.get_bn_layer"]], "get_config() (baskerville.layers.centeraverage method)": [[0, "baskerville.layers.CenterAverage.get_config"]], "get_config() (baskerville.layers.centerslice method)": [[0, "baskerville.layers.CenterSlice.get_config"]], "get_config() (baskerville.layers.concatposition method)": [[0, "baskerville.layers.ConcatPosition.get_config"]], "get_config() (baskerville.layers.ensembleshift method)": [[0, "baskerville.layers.EnsembleShift.get_config"]], "get_config() (baskerville.layers.factorinverse method)": [[0, "baskerville.layers.FactorInverse.get_config"]], "get_config() (baskerville.layers.multiheadattention method)": [[0, "baskerville.layers.MultiheadAttention.get_config"]], "get_config() (baskerville.layers.onetotwo method)": [[0, "baskerville.layers.OneToTwo.get_config"]], "get_config() (baskerville.layers.scale method)": [[0, "baskerville.layers.Scale.get_config"]], "get_config() (baskerville.layers.softmaxpool1d method)": [[0, "baskerville.layers.SoftmaxPool1D.get_config"]], "get_config() (baskerville.layers.softplus method)": [[0, "baskerville.layers.Softplus.get_config"]], "get_config() (baskerville.layers.squeezeexcite method)": [[0, "baskerville.layers.SqueezeExcite.get_config"]], "get_config() (baskerville.layers.stochasticshift method)": [[0, "baskerville.layers.StochasticShift.get_config"]], "get_config() (baskerville.layers.switchreverse method)": [[0, "baskerville.layers.SwitchReverse.get_config"]], "get_config() (baskerville.layers.switchreversetriu method)": [[0, "baskerville.layers.SwitchReverseTriu.get_config"]], "get_config() (baskerville.layers.uppertri method)": [[0, "baskerville.layers.UpperTri.get_config"]], "get_config() (baskerville.layers.wheezeexcite method)": [[0, "baskerville.layers.WheezeExcite.get_config"]], "get_config() (baskerville.trainer.cyclical1learningrate method)": [[0, "baskerville.trainer.Cyclical1LearningRate.get_config"]], "get_config() (baskerville.trainer.warmup method)": [[0, "baskerville.trainer.WarmUp.get_config"]], "get_conv_layer() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.get_conv_layer"]], "get_conv_weights() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.get_conv_weights"]], "get_dense_layer() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.get_dense_layer"]], "get_exons() (baskerville.gene.gene method)": [[0, "baskerville.gene.Gene.get_exons"]], "global_context() (in module baskerville.blocks)": [[0, "baskerville.blocks.global_context"]], "gradients() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.gradients"]], "gradients_func() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.gradients_func"]], "gradients_func_orig() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.gradients_func_orig"]], "gradients_orig() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.gradients_orig"]], "gtf_kv() (in module baskerville.gene)": [[0, "baskerville.gene.gtf_kv"]], "hot1_augment() (in module baskerville.dna)": [[0, "baskerville.dna.hot1_augment"]], "hot1_delete() (in module baskerville.dna)": [[0, "baskerville.dna.hot1_delete"]], "hot1_dna() (in module baskerville.dna)": [[0, "baskerville.dna.hot1_dna"]], "hot1_get() (in module baskerville.dna)": [[0, "baskerville.dna.hot1_get"]], "hot1_insert() (in module baskerville.dna)": [[0, "baskerville.dna.hot1_insert"]], "hot1_rc() (in module baskerville.dna)": [[0, "baskerville.dna.hot1_rc"]], "hot1_set() (in module baskerville.dna)": [[0, "baskerville.dna.hot1_set"]], "indel_size() (baskerville.vcf.snp method)": [[0, "baskerville.vcf.SNP.indel_size"]], "initialize_output_h5() (in module baskerville.snps)": [[0, "baskerville.snps.initialize_output_h5"]], "interpolate_pr_auc() (baskerville.metrics.seqauc method)": [[0, "baskerville.metrics.SeqAUC.interpolate_pr_auc"]], "intersect_seqs_snps() (in module baskerville.vcf)": [[0, "baskerville.vcf.intersect_seqs_snps"]], "intersect_snps_seqs() (in module baskerville.vcf)": [[0, "baskerville.vcf.intersect_snps_seqs"]], "longest_alt() (baskerville.vcf.snp method)": [[0, "baskerville.vcf.SNP.longest_alt"]], "make_alt_1hot() (in module baskerville.snps)": [[0, "baskerville.snps.make_alt_1hot"]], "make_bed_seqs() (in module baskerville.bed)": [[0, "baskerville.bed.make_bed_seqs"]], "make_dataset() (baskerville.dataset.seqdataset method)": [[0, "baskerville.dataset.SeqDataset.make_dataset"]], "make_optimizer() (baskerville.trainer.trainer method)": [[0, "baskerville.trainer.Trainer.make_optimizer"]], "make_strand_transform() (in module baskerville.snps)": [[0, "baskerville.snps.make_strand_transform"]], "mean_squared_error_udot() (in module baskerville.metrics)": [[0, "baskerville.metrics.mean_squared_error_udot"]], "midpoint() (baskerville.gene.gene method)": [[0, "baskerville.gene.Gene.midpoint"]], "module": [[0, "module-baskerville"], [0, "module-baskerville.bed"], [0, "module-baskerville.blocks"], [0, "module-baskerville.dataset"], [0, "module-baskerville.dna"], [0, "module-baskerville.gene"], [0, "module-baskerville.layers"], [0, "module-baskerville.metrics"], [0, "module-baskerville.seqnn"], [0, "module-baskerville.snps"], [0, "module-baskerville.trainer"], [0, "module-baskerville.vcf"]], "num_targets() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.num_targets"]], "numpy() (baskerville.dataset.seqdataset method)": [[0, "baskerville.dataset.SeqDataset.numpy"]], "on_epoch_end() (baskerville.trainer.earlystoppingmin method)": [[0, "baskerville.trainer.EarlyStoppingMin.on_epoch_end"]], "one_to_two() (in module baskerville.blocks)": [[0, "baskerville.blocks.one_to_two"]], "output_slice() (baskerville.gene.gene method)": [[0, "baskerville.gene.Gene.output_slice"]], "parse_loss() (in module baskerville.trainer)": [[0, "baskerville.trainer.parse_loss"]], "poisson_kl() (in module baskerville.metrics)": [[0, "baskerville.metrics.poisson_kl"]], "poisson_multinomial() (in module baskerville.metrics)": [[0, "baskerville.metrics.poisson_multinomial"]], "positional_features() (in module baskerville.layers)": [[0, "baskerville.layers.positional_features"]], "positional_features_central_mask() (in module baskerville.layers)": [[0, "baskerville.layers.positional_features_central_mask"]], "predict() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.predict"]], "read_bed_coords() (in module baskerville.bed)": [[0, "baskerville.bed.read_bed_coords"]], "read_gtf() (baskerville.gene.transcriptome method)": [[0, "baskerville.gene.Transcriptome.read_gtf"]], "relative_shift() (in module baskerville.layers)": [[0, "baskerville.layers.relative_shift"]], "res_tower() (in module baskerville.blocks)": [[0, "baskerville.blocks.res_tower"]], "reset_state() (baskerville.metrics.pearsonr method)": [[0, "baskerville.metrics.PearsonR.reset_state"]], "reset_state() (baskerville.metrics.r2 method)": [[0, "baskerville.metrics.R2.reset_state"]], "restore() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.restore"]], "result() (baskerville.metrics.pearsonr method)": [[0, "baskerville.metrics.PearsonR.result"]], "result() (baskerville.metrics.r2 method)": [[0, "baskerville.metrics.R2.result"]], "result() (baskerville.metrics.seqauc method)": [[0, "baskerville.metrics.SeqAUC.result"]], "safe_next() (in module baskerville.trainer)": [[0, "baskerville.trainer.safe_next"]], "save() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.save"]], "score_snps() (in module baskerville.snps)": [[0, "baskerville.snps.score_snps"]], "set_defaults() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.set_defaults"]], "shift_sequence() (in module baskerville.layers)": [[0, "baskerville.layers.shift_sequence"]], "snp_seq1() (in module baskerville.vcf)": [[0, "baskerville.vcf.snp_seq1"]], "snps2_seq1() (in module baskerville.vcf)": [[0, "baskerville.vcf.snps2_seq1"]], "snps_seq1() (in module baskerville.vcf)": [[0, "baskerville.vcf.snps_seq1"]], "span() (baskerville.gene.gene method)": [[0, "baskerville.gene.Gene.span"]], "squeeze_excite() (in module baskerville.blocks)": [[0, "baskerville.blocks.squeeze_excite"]], "step() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.step"]], "swin_transformer() (in module baskerville.blocks)": [[0, "baskerville.blocks.swin_transformer"]], "symmetrize_2d() (in module baskerville.blocks)": [[0, "baskerville.blocks.symmetrize_2d"]], "targets_prep_strand() (in module baskerville.dataset)": [[0, "baskerville.dataset.targets_prep_strand"]], "tconv_nac() (in module baskerville.blocks)": [[0, "baskerville.blocks.tconv_nac"]], "track_sequence() (baskerville.seqnn.seqnn method)": [[0, "baskerville.seqnn.SeqNN.track_sequence"]], "transformer() (in module baskerville.blocks)": [[0, "baskerville.blocks.transformer"]], "transformer2() (in module baskerville.blocks)": [[0, "baskerville.blocks.transformer2"]], "transformer_dense() (in module baskerville.blocks)": [[0, "baskerville.blocks.transformer_dense"]], "transformer_split() (in module baskerville.blocks)": [[0, "baskerville.blocks.transformer_split"]], "transformer_tower() (in module baskerville.blocks)": [[0, "baskerville.blocks.transformer_tower"]], "unet_concat() (in module baskerville.blocks)": [[0, "baskerville.blocks.unet_concat"]], "unet_conv() (in module baskerville.blocks)": [[0, "baskerville.blocks.unet_conv"]], "unitwise_norm() (in module baskerville.trainer)": [[0, "baskerville.trainer.unitwise_norm"]], "untransform_preds() (in module baskerville.dataset)": [[0, "baskerville.dataset.untransform_preds"]], "untransform_preds1() (in module baskerville.dataset)": [[0, "baskerville.dataset.untransform_preds1"]], "update_state() (baskerville.metrics.pearsonr method)": [[0, "baskerville.metrics.PearsonR.update_state"]], "update_state() (baskerville.metrics.r2 method)": [[0, "baskerville.metrics.R2.update_state"]], "update_state() (baskerville.metrics.seqauc method)": [[0, "baskerville.metrics.SeqAUC.update_state"]], "upper_tri() (in module baskerville.blocks)": [[0, "baskerville.blocks.upper_tri"]], "vcf_count() (in module baskerville.vcf)": [[0, "baskerville.vcf.vcf_count"]], "vcf_line (baskerville.vcf.snp attribute)": [[0, "baskerville.vcf.SNP.vcf_line"]], "vcf_snps() (in module baskerville.vcf)": [[0, "baskerville.vcf.vcf_snps"]], "vcf_sort() (in module baskerville.vcf)": [[0, "baskerville.vcf.vcf_sort"]], "wheeze_excite() (in module baskerville.blocks)": [[0, "baskerville.blocks.wheeze_excite"]], "write_bed_exon() (baskerville.gene.transcriptome method)": [[0, "baskerville.gene.Transcriptome.write_bed_exon"]], "write_bed_span() (baskerville.gene.transcriptome method)": [[0, "baskerville.gene.Transcriptome.write_bed_span"]], "write_bedgraph() (in module baskerville.bed)": [[0, "baskerville.bed.write_bedgraph"]], "write_pct() (in module baskerville.snps)": [[0, "baskerville.snps.write_pct"]], "write_snp() (in module baskerville.snps)": [[0, "baskerville.snps.write_snp"]], "write_snp_len() (in module baskerville.snps)": [[0, "baskerville.snps.write_snp_len"]]}}) \ No newline at end of file