Skip to content

Python interface

Alexander Medvedev edited this page Aug 15, 2021 · 35 revisions

The interface is available here.

Operators

Examples of use


Create net

Creating a Network Architecture

 class Net():
    """Net object."""

    _net = 0
    _nodes = []
    _errCBack = 0
    _userCBack = {}

    def __init__(self, jnNet : str = '', weightPath : str = ''):
        """
        init
        :param jnNet: architec of net json
        :param weightPath: weight file path
        """
        if (len(jnNet) > 0):
            self._createNetJn(jnNet)

        if (self._net and (len(weightPath) > 0)):
            self.loadAllWeightFromFile(weightPath)

Example:

    # create net
net = snNet.Net()
net.addNode('In', snOperator.Input(), 'C1') \
   .addNode('C1', snOperator.Convolution(15), 'C2') \
   .addNode('C2', snOperator.Convolution(25), 'P1') \
   .addNode('P1', snOperator.Pooling(), 'F1') \
   .addNode('F1', snOperator.FullyConnected(256), 'UCB') \
   .addNode('UCB', snOperator.UserLayer('myLayer'), 'F2') \
   .addNode('F2', snOperator.FullyConnected(10), 'LS') \
   .addNode('LS', snOperator.LossFunction(snType.lossType.softMaxToCrossEntropy), 'Output')

Training net

You can train a network in two ways:

  • by calling one function 'snTraining'
  • and by the standard way: forwarding the function 'snForward', calculating your own error, passing back the function 'snBackward'.

Let's see the first option.

   def training(self, lr: float, inTns: numpy.ndarray, outTns: numpy.ndarray,
                trgTns: numpy.ndarray, outAccurate : []) -> bool:
        """
        Training net - cycle fwd<->bwd with calc error
        :param lr: lerning rate
        :param inTns: in tensor NCHW(bsz, ch, h, w)
        :param outTns: out tensor NCHW(bsz, ch, h, w)
        :param trgTns: target tensor
        :param outAccurate: accurate
        :return: True ok
        """

Example:

        # cycle lern
accuratSumm = 0.;
for n in range(1000):

    targLayer[...] = 0

    for i in range(bsz):
        ndir = random.randint(0, 10 - 1)
        nimg = random.randint(0, len(imgList[ndir]) - 1)
        inLayer[i][0] = imageio.imread(pathImg + str(ndir) + '/' + imgList[ndir][nimg])

        targLayer[i][0][0][ndir] = 1.

    acc = [0]
    net.training(0.001, inLayer, outLayer, targLayer, acc)

    accuratSumm += acc[0]

    print(n, "accurate", accuratSumm / (n + 1))

The function takes a batch of input data and the target result.
Returns the result and the evaluation by a batch.

Accurate is calculated as:

    snFloat* targetData = targetTens->getData();
    snFloat* outData = outTens->getData();
    
    size_t accCnt = 0, osz = outTens->size().size();
    for (size_t i = 0; i < osz; ++i){

        if (abs(outData[i] - targetData[i]) < 0.1)
            ++accCnt; 
    }

    return (accCnt * 1.F) / osz;

Architecture of net

Getting network structure in Json.

 def getGetArchitecNet(self) -> str:
        """
        architecture of net
        :return: arch in json. '' - error
        """

Save and load weight of layer

  def setWeightNode(self, nodeName: str, weight: numpy.ndarray) -> bool:
        """ 
        set weight of node ('channels first' [bsz,ch,h,w])
        :param nodeName: node name
        :param weight: set array weight NCHW(bsz, ch, h, w)
        :return: True ok
        """

  def getWeightNode(self, nodeName: str, weight: [numpy.ndarray]) -> bool:
        """
         get Weight of Node ('channels first' [bsz,ch,h,w])
        :param nodeName: node name
        :param weight: out array weight NCHW(bsz, ch, h, w) as list[0] 
        :return: True ok
        """

     def loadAllWeightFromFile(self, weightPath : str) -> bool:
        """
        load All Weight From File
        :param weightPath: weight Path file
        :return: True ok
        """

     def saveAllWeightToFile(self, weightPath: str) -> bool:
        """
        save All Weight to File
        :param weightPath: weight Path file
        :return: True ok
        """

Set and get params of layer

        def addNode(self, name : str, nd : snOperator, nextNodes : str):
        """
        add Node
        :param name: name node
        :param nd: tensor node
        :param nextNodes: next nodes through a space
        :return: True ok
        """
     
     def updateNode(self, name : str, nd : snOperator) -> bool:
        """
        Update params node
        :param name: name node
        :param nd: tensor node
        :return: True ok
        """

     def getOutputNode(self, nodeName: str, output: [numpy.ndarray]) -> bool:
        """
         get Output of Node
        :param nodeName: node name
        :param output: out array NCHW(bsz, ch, h, w) as list[0]
        :return: True ok
        """

Monitoring gradients and weights

You can specify your own callback function, and insert your 'UserLayer' node after the node of interest.

   def addUserCallBack(self, ucbName: str, ucb) -> bool:
        """
        User callback for 'UserCBack' layer and 'LossFunction' layer
        :param ucbName: cback name
        :param ucb: cback function
        :return: True ok

        ucb = function(None,
                       str,               # name user cback
                       str,               # name node
                       bool,              # current action forward(true) or backward(false)
                       inLayer: ndarray,  # input layer - receive from prev node
                       outLayer: [ndarray], # output layer - send to next node
                       )
        """

Example:

 def myLayer(ucbName: str,          # name user cback
            nodeName: str,         # name node
            isFwdBwd: bool,        # current action forward(true) or backward(false)
            inLayer: np.ndarray,   # input layer - receive from prev node
            outLayer: [np.ndarray]): # output layer - send to next node
           pass

# create net
net = snNet.Net()
net.addNode('In', snOperator.Input(), 'C1') \
   .addNode('C1', snOperator.Convolution(15), 'C2') \
   .addNode('C2', snOperator.Convolution(25), 'P1') \
   .addNode('P1', snOperator.Pooling(), 'F1') \
   .addNode('F1', snOperator.FullyConnected(256), 'UCB') \
   .addNode('UCB', snOperator.UserLayer('myLayer'), 'F2') \
   .addNode('F2', snOperator.FullyConnected(10), 'LS') \
   .addNode('LS', snOperator.LossFunction(snType.lossType.softMaxToCrossEntropy), 'Output')

# user cback
net.addUserCallBack('myLayer', myLayer)

Input

The input node receives the user data, and transmits further along the chain.

  
    net = snNet.Net()
    net.addNode('In', snOperator.Input(), 'C1') \
    ....    

Output

The interface is not implemented as unnecessary.
For the last node, the next one is set as "Output".

Example:

    net = snNet.Net()
net.addNode('In', snOperator.Input(), 'C1') \
   .addNode('C1', snOperator.Convolution(15), 'C2') \
   .
   .
   .addNode('LS', snOperator.LossFunction(snType.lossType.softMaxToCrossEntropy), 'Output')

FullyConnected

   class FullyConnected():
    '''Fully connected layer'''

    _params = {
    'units' : '0',                           # Number of out neurons. !Required parameter [0..)
    'active' : active.relu.value,            # Activation function type. Optional parameter
    'optimizer' : optimizer.adam.value,      # Optimizer of weights. Optional parameter
    'dropOut' : '0',                         # Random disconnection of neurons. Optional parameter [0..1.F]
    'batchNorm' : batchNormType.none.value,  # Type of batch norm. Optional parameter
    'gpuDeviceId' : '0',                     # GPU Id. Optional parameter
    'freeze' :'0',                           # Do not change weights. Optional parameter
    'useBias' :'1',                          # +bias. Optional parameter
    'weightInit' : weightInit.he.value,      # Type of initialization of weights. Optional parameter
    'decayMomentDW' : '0.9',                 # Optimizer of weights moment change. Optional parameter [0..1.F]
    'decayMomentWGr' : '0.99',               # Optimizer of weights moment change of prev. Optional parameter
    'lmbRegular' : '0.001',                  # Optimizer of weights l2Norm. Optional parameter [0..1.F]
    'batchNormLr' : '0.001'                  # Learning rate for batch norm coef. Optional parameter [0..)
    }

    def __init__(self,
                 units,
                 act=active.relu,
        .... 
 }        

The default parameters are specified.

Convolution

 class Convolution():
    '''Convolution layer'''

    _params = {
    'filters' : '0',                         # Number of output layers. !Required parameter [0..)
    'fWidth': '3',                           # Width of mask. Optional parameter(> 0)
    'fHeight': '3',                          # Height of mask. Optional parameter(> 0)
    'padding': '0',                          # Padding around the edges. Optional parameter
    'stride':'1',                            # Mask movement step. Optional parameter(> 0)
    'dilate': '1',                           # Expansion mask. Optional parameter(> 0)
    'active' : active.relu.value,            # Activation function type. Optional parameter
    'optimizer' : optimizer.adam.value,      # Optimizer of weights. Optional parameter
    'dropOut' : '0',                         # Random disconnection of neurons. Optional parameter [0..1.F]
    'batchNorm' : batchNormType.none.value,  # Type of batch norm. Optional parameter
    'gpuDeviceId' : '0',                     # GPU Id. Optional parameter
    'freeze' :'0',                           # Do not change weights. Optional parameter
    'useBias' :'1',                          # +bias. Optional parameter
    'weightInit' : weightInit.he.value,      # Type of initialization of weights. Optional parameter
    'decayMomentDW' : '0.9',                 # Optimizer of weights moment change. Optional parameter [0..1.F]
    'decayMomentWGr' : '0.99',               # Optimizer of weights moment change of prev. Optional parameter
    'lmbRegular' : '0.001',                  # Optimizer of weights l2Norm. Optional parameter [0..1.F]
    'batchNormLr' : '0.001'                  # Learning rate for batch norm coef. Optional parameter [0..)
    }

    def __init__(self,
                 filters,
                 act=active.relu,
        ....

The default parameters are specified.

Deconvolution

 class Deconvolution():
    '''Deconvolution layer'''

    _params = {
        'filters': '0',                        # Number of output layers. !Required parameter [0..)
        'fWidth': '3',                         # Width of mask. Optional parameter(> 0)
        'fHeight': '3',                        # Height of mask. Optional parameter(> 0)
        'stride': '2',                         # Mask movement step. Optional parameter(> 0)
        'active': active.relu.value,           # Activation function type. Optional parameter
        'optimizer': optimizer.adam.value,     # Optimizer of weights. Optional parameter
        'dropOut': '0',                        # Random disconnection of neurons. Optional parameter [0..1.F]
        'batchNorm': batchNormType.none.value, # Type of batch norm. Optional parameter
        'gpuDeviceId': '0',                    # GPU Id. Optional parameter
        'freeze': '0',                         # Do not change weights. Optional parameter
        'weightInit': weightInit.he.value,     # Type of initialization of weights. Optional parameter
        'decayMomentDW': '0.9',                # Optimizer of weights moment change. Optional parameter [0..1.F]
        'decayMomentWGr': '0.99',              # Optimizer of weights moment change of prev. Optional parameter
        'lmbRegular': '0.001',                 # Optimizer of weights l2Norm. Optional parameter [0..1.F]
        'batchNormLr': '0.001'                 # Learning rate for batch norm coef. Optional parameter [0..)
    }

    def __init__(self,
                 filters,
                 act=active.relu,
        .....

The default parameters are specified.

Pooling

   class Pooling():
    '''Pooling layer'''

    _params = {
        'kernel': '2',              # Square Mask Size. Optional parameter (> 0)
        'stride': '2',              # Mask movement step. Optional parameter(> 0)
        'pool': poolType.max.value, # Operator Type. Optional parameter
        'gpuDeviceId': '0',         # GPU Id. Optional parameter
   }

    def __init__(self,
                 kernel = 2,
        .... 

The default parameters are specified.
If the mask does not completely enter the image, the image automatically extends around the edges.

LossFunction

Operator for automatic error calculation.
Depending on the network task being solved, supports the following types of errors:

  • "softMaxACrossEntropy" - for multiclass classification
  • "binaryCrossEntropy" - for binary classification
  • "regressionMSE" - regression of a function with least-squares estimation
  • "userLoss" - user operator
 class LossFunction():
    '''Error function calculation layer'''

    _params = {
        'loss': lossType.softMaxToCrossEntropy.value,
        'cbackName': ''    # for user cback
    }

Switch

Operator for transferring data to several nodes at once.
In the process, you can change the way out - function net.updateNode().
Data can only be received from one node.

 class Switch():
  
    _params = {
        'nextWay':''       # next nodes through a space
    }

    def __init__(self, nextWay : str):
        self._params['nextWay'] = nextWay

Example:

    # create net
net = snNet.Net()
net.addNode('In', snOperator.Input(), 'SW') \
   .addNode('SW', snOperator.Switch(), 'FC1 FC2') \
   .addNode('FC1', snOperator.FullyConnected(256), 'Sum') \
   .addNode('FC2', snOperator.FullyConnected(256), 'Sum') \
   .addNode('Sum', snOperator.Summator(), 'LS') \
   .addNode('LS', snOperator.LossFunction(snType.lossType.softMaxToCrossEntropy), 'Output')

Lock

Operator to block further calculation at the current location.
In the process, you can change the way out - function net.updateNode().
It is designed for the ability to dynamically disconnect the parallel branches of the network during operation.

  class Lock():
  
    _params = {
        'state':lockType.unlock.value   # Blocking activity. Optional parameter
    }

    def __init__(self, lock):
        self._params['state'] = lock.value

Example:

      # create net
net = snNet.Net()
net.addNode('In', snOperator.Input(), 'SW') \
   .addNode('SW', snOperator.Switch(), 'FC1 FC2') \
   .addNode('FC1', snOperator.FullyConnected(256), 'Sum') \
   .addNode('LC', snOperator.Lock(snType.lockType.unlock), 'Sum') \
   .addNode('FC2', snOperator.FullyConnected(256), 'Sum') \
   .addNode('Sum', snOperator.Summator(), 'LS') \
   .addNode('LS', snOperator.LossFunction(snType.lossType.softMaxToCrossEntropy), 'Output')

Summator

The operator is designed to combine the values of two layers.
The consolidation can be performed by the following options: "summ", "diff", "mean".
The dimensions of the input layers must be the same.

class Summator():
    
    _params = {
        'type': summatorType.summ.value
    }

    def __init__(self, type : summatorType):
        self._params['type'] = type.value

Example:

    # create net
net = snNet.Net()
net.addNode('In', snOperator.Input(), 'SW') \
   .addNode('SW', snOperator.Switch(), 'FC1 FC2') \
   .addNode('FC1', snOperator.FullyConnected(256), 'Sum') \
   .addNode('LC', snOperator.Lock(snType.lockType.unlock), 'Sum') \
   .addNode('FC2', snOperator.FullyConnected(256), 'Sum') \
   .addNode('Sum', snOperator.Summator(), 'LS') \
   .addNode('LS', snOperator.LossFunction(snType.lossType.softMaxToCrossEntropy), 'Output')

Crop

ROI clipping in each image of each channel.

 class Crop():
   
    _params = {
        'roi': '0 0 0 0'     # region of interest
    }

    def __init__(self, rct: rect):
        self._params['roi'] = rct.value()

Concat

The operator connects the channels with multiple layers.

class Concat():
    
    _params = {
        'sequence': ''         # prev nodes through a space
    }
	
    def __init__(self, sequence: str):
        self._params['sequence'] = sequence

Example:

# create net
net = snNet.Net()
net.addNode('In', snOperator.Input(), 'C1 C2') \ 
   
   .addNode('C1', snOperator.Convolution(20), 'R1') \
   .addNode('R1', snOperator.Resize(snType.diap(0, 20), snType.diap(0, 20)), 'Conc') \

   .addNode('C2', snOperator.Convolution(20), 'R2') \
   .addNode('R2', snOperator.Resize(snType.diap(0, 20), snType.diap(20, 40)), 'Conc') \

   .addNode('Conc', snOperator.Concat('R1 R2'), 'LS') \
   .addNode('LS', snOperator.LossFunction(snType.lossType.softMaxToCrossEntropy), 'Output')
   

Resize

Change the number of channels.
Works in conjunction with "Concat".

 class Resize():
   
    _params = {
        'fwdDiap': '0 0',     # diap layer through a space
        'bwdDiap': '0 0'
    }

    def __init__(self, fwdDiap: diap, bwdDiap: diap):
        self._params['fwdDiap'] = fwdDiap.value()
        self._params['bwdDiap'] = bwdDiap.value()

Example:

     # create net
     net = snNet.Net()
     net.addNode('In', snOperator.Input(), 'C1 C2')

        .addNode('C1', snOperator.Convolution(20), 'R1')
        .addNode('R1', snOperator.Resize(snType.diap(0 20), snType.diap(0 20), 'Conc')

        .addNode('C2', snOperator.Convolution(20), 'R2')
        .addNode('R2', snOperator.Resize(snType.diap(0 20), snType.diap(20 40)), 'Conc')

        .addNode('Conc', snOperator.Concat('R1 R2'), 'LS')
        .addNode('LS', snOperator.LossFunction(snType.lossType.softMaxToCrossEntropy), 'Output');

Activation

Activation function operator.

 
class Activation():
    '''
    The operator is activation function type.
    '''

    _params = {
        'active': active.relu.value
    }

    def __init__(self, act=active.relu):
        self._params['active'] = act.value

BatchNorm

class BatchNormLayer():
    '''
    Batch norm
    '''

    _params = {
        'bnType': batchNormType.byChannels.value,  # Type of batch norm. Optional parameter
    }

UserLayer

Custom layer.
CallBack is set by the user, the 'net.addUserCBack' function

 class UserLayer():
    
    _params = {
        'cbackName': ''
    }

    def __init__(self, cbackName: str):
        self._params['cbackName'] = cbackName

Example:

   def myLayer(ucbName: str,          # name user cback
            nodeName: str,         # name node
            isFwdBwd: bool,        # current action forward(true) or backward(false)
            inLayer: np.ndarray,   # input layer - receive from prev node
            outLayer: np.ndarray): # output layer - send to next node
   pass

# create net
net = snNet.Net()
net.addNode('In', snOperator.Input(), 'C1') \
   .addNode('C1', snOperator.Convolution(15), 'C2') \
   .addNode('C2', snOperator.Convolution(25), 'P1') \
   .addNode('P1', snOperator.Pooling(), 'F1') \
   .addNode('F1', snOperator.FullyConnected(256), 'UCB') \
   .addNode('UCB', snOperator.UserLayer('myLayer'), 'F2') \
   .addNode('F2', snOperator.FullyConnected(10), 'LS') \
   .addNode('LS', snOperator.LossFunction(snType.lossType.softMaxToCrossEntropy), 'Output')

# user cback
net.addUserCallBack('myLayer', myLayer)

MNIST

import os

from libsunnet import*
import numpy as np
import imageio
import random
import ctypes
import datetime


# create net
net = snNet.Net()
net.addNode('In', snOperator.Input(), 'C1') \
   .addNode('C1', snOperator.Convolution(15, (3, 3)), 'C2') \
   .addNode('C2', snOperator.Convolution(25, (3, 3)), 'P1') \
   .addNode('P1', snOperator.Pooling(snType.poolType.max), 'F1') \
   .addNode('F1', snOperator.FullyConnected(256), 'F2') \
   .addNode('F2', snOperator.FullyConnected(10), 'LS') \
   .addNode('LS', snOperator.LossFunction(snType.lossType.softMaxToCrossEntropy), 'Output')

# load of weight
#if (net.loadAllWeightFromFile('c:/cpp/w.dat')):
 #   print('weight is load')
#else:
#    print('error load weight')

# loadImg
imgList = []
pathImg = 'c:\\cpp\\other\\sunnet\\example\\mnist\\images\\'
for i in range(10):
   imgList.append(os.listdir(pathImg + str(i)))

bsz = 100
lr = 0.001
accuratSumm = 0.
inLayer = np.zeros((bsz, 1, 28, 28), ctypes.c_float)
outLayer = np.zeros((bsz, 1, 1, 10), ctypes.c_float)
targLayer = np.zeros((bsz, 1, 1, 10), ctypes.c_float)
imgMem = {}

# cycle lern
for n in range(1000):

    targLayer[...] = 0

    for i in range(bsz):
        ndir = random.randint(0, 10 - 1)
        nimg = random.randint(0, len(imgList[ndir]) - 1)

        nm = pathImg + str(ndir) + '/' + imgList[ndir][nimg]
        if (nm in imgMem):
            inLayer[i][0] = imgMem[nm]
        else:
            inLayer[i][0] = imageio.imread(nm)
            imgMem[nm] = inLayer[i][0].copy()

        targLayer[i][0][0][ndir] = 1.

    acc = [0]  # do not use default accurate
    net.training(lr, inLayer, outLayer, targLayer, acc)

    # calc accurate
    acc[0] = 0
    for i in range(bsz):
        if (np.argmax(outLayer[i][0][0]) == np.argmax(targLayer[i][0][0])):
            acc[0] += 1

    accuratSumm += acc[0]/bsz

    print(datetime.datetime.now().strftime('%H:%M:%S'), n, "accurate", accuratSumm / (n + 1))

# save weight
if (net.saveAllWeightToFile('c:/cpp/w.dat')):
    print('weight is save')
else:
    print('error save weight')

CIFAR-10

import os

from libsunnet import*
import numpy as np
import imageio
import random
import ctypes
import datetime

# create net
net = snNet.Net()
net.addNode('In', snOperator.Input(), 'C1') \
   .addNode('C1', snOperator.Convolution(15, (3, 3), -1, 1, snType.batchNormType.beforeActive), 'C2') \
   .addNode('C2', snOperator.Convolution(15, (3, 3), 0, 1, snType.batchNormType.beforeActive), 'P1') \
   .addNode('P1', snOperator.Pooling(snType.poolType.max), 'C3') \
   .addNode('C3', snOperator.Convolution(25, (3, 3), -1, 1, snType.batchNormType.beforeActive), 'C4') \
   .addNode('C4', snOperator.Convolution(25, (3, 3), 0, 1, snType.batchNormType.beforeActive), 'P2') \
   .addNode('P2', snOperator.Pooling(snType.poolType.max), 'C5') \
   .addNode('C5', snOperator.Convolution(40, (3, 3), -1, 1, snType.batchNormType.beforeActive), 'C6') \
   .addNode('C6', snOperator.Convolution(40, (3, 3), 0, 1, snType.batchNormType.beforeActive), 'P3') \
   .addNode('P3', snOperator.Pooling(snType.poolType.max), 'F1') \
   .addNode('F1', snOperator.FullyConnected(2048), 'F2') \
   .addNode('F2', snOperator.FullyConnected(128), 'F3') \
   .addNode('F3', snOperator.FullyConnected(10), 'LS') \
   .addNode('LS', snOperator.LossFunction(snType.lossType.softMaxToCrossEntropy), 'Output')

# loadImg
imgList = []
pathImg = 'c:/cpp/other/sunnet/example/cifar10/images/'
for i in range(10):
   imgList.append(os.listdir(pathImg + str(i)))

bsz = 100
lr = 0.0001
accuratSumm = 0.
inLayer = np.zeros((bsz, 3, 32, 32), ctypes.c_float)
outLayer = np.zeros((bsz, 1, 1, 10), ctypes.c_float)
targLayer = np.zeros((bsz, 1, 1, 10), ctypes.c_float)

# cycle lern
for n in range(1000):

    targLayer[...] = 0

    for i in range(bsz):
        ndir = random.randint(0, 10 - 1)
        nimg = random.randint(0, len(imgList[ndir]) - 1)
        inLayer[i] = imageio.imread(pathImg + str(ndir) + '/' + imgList[ndir][nimg]).reshape(3,32,32)

        targLayer[i][0][0][ndir] = 1.

    acc = [0]  # do not use default accurate
    net.training(lr, inLayer, outLayer, targLayer, acc)

    # calc accurate
    acc[0] = 0
    for i in range(bsz):
        if (np.argmax(outLayer[i][0][0]) == np.argmax(targLayer[i][0][0])):
            acc[0] += 1

    accuratSumm += acc[0]/bsz

    print(datetime.datetime.now().strftime('%H:%M:%S'), n, "accurate", accuratSumm / (n + 1))

UNET-tiny

import os

from libsunnet import*
import numpy as np
import imageio
import random
import ctypes
import datetime


# create net
net = snNet.Net()
net.addNode("In", snOperator.Input(), "C1") \
   .addNode("C1", snOperator.Convolution(10, (3, 3), -1), "C2") \
   .addNode("C2",snOperator.Convolution(10,(3, 3), 0), "P1 Crop1") \
   .addNode("Crop1", snOperator.Crop(snType.rect(0, 0, 487, 487)), "Rsz1") \
   .addNode("Rsz1", snOperator.Resize(snType.diap(0, 10), snType.diap(0, 10)), "Conc1") \
   .addNode("P1", snOperator.Pooling(), "C3") \
   \
   .addNode("C3", snOperator.Convolution(10,(3, 3), -1), "C4") \
   .addNode("C4", snOperator.Convolution(10,(3, 3), 0), "P2 Crop2") \
   .addNode("Crop2", snOperator.Crop(snType.rect(0, 0, 247, 247)), "Rsz2") \
   .addNode("Rsz2", snOperator.Resize(snType.diap(0, 10), snType.diap(0, 10)), "Conc2") \
   .addNode("P2", snOperator.Pooling(), "C5") \
   \
   .addNode("C5", snOperator.Convolution(10,(3, 3), 0), "C6") \
   .addNode("C6", snOperator.Convolution(10,(3, 3), 0), "DC1") \
   .addNode("DC1", snOperator.Deconvolution(10), "Rsz3") \
   .addNode("Rsz3", snOperator.Resize(snType.diap(0, 10), snType.diap(10, 20)), "Conc2") \
   \
   .addNode("Conc2", snOperator.Concat("Rsz2 Rsz3"), "C7") \
   \
   .addNode("C7", snOperator.Convolution(10,(3, 3), 0), "C8") \
   .addNode("C8", snOperator.Convolution(10,(3, 3), 0), "DC2") \
   .addNode("DC2", snOperator.Deconvolution(10), "Rsz4") \
   .addNode("Rsz4", snOperator.Resize(snType.diap(0, 10), snType.diap(10, 20)), "Conc1") \
   \
   .addNode("Conc1", snOperator.Concat("Rsz1 Rsz4"), "C9") \
   \
   .addNode("C9", snOperator.Convolution(10,(3, 3), 0), "C10")

convOut = snOperator.Convolution(1, (3, 3), 0)
convOut.act = snType.active.sigmoid;
net.addNode("C10", convOut, "LS") \
   .addNode('LS', snOperator.LossFunction(snType.lossType.binaryCrossEntropy), 'Output')

# loadImg

pathImg = 'c:/cpp/other/sunnet/example/unet/images/'
imgList = os.listdir(pathImg)

pathLabel= 'c:/cpp/other/sunnet/example/unet/labels/'
labelsList = os.listdir(pathLabel)

bsz = 5
lr = 0.001
accuratSumm = 0.
inLayer = np.zeros((bsz, 1, 512, 512), ctypes.c_float)
outLayer = np.zeros((bsz, 1, 483, 483), ctypes.c_float)
targLayer = np.zeros((bsz, 1, 483, 483), ctypes.c_float)

# cycle lern
for n in range(1000):

    targLayer[...] = 0

    for i in range(bsz):
        nimg = random.randint(0, len(imgList) - 1)
        inLayer[i] = imageio.imread(pathImg + imgList[nimg])

        targLayer[i] = np.resize(imageio.imread(pathLabel + labelsList[nimg]), (1, 483, 483)) / 255.

    acc = [0]  # do not use default accurate
    net.training(lr, inLayer, outLayer, targLayer, acc)

    accuratSumm += acc[0]

    print(datetime.datetime.now().strftime('%H:%M:%S'), n, "accurate", accuratSumm / (n + 1))

ResNet50

from libsunnet import*
import numpy as np
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input


### Create net
net = snResNet50.createNet()

### Set weight
weightTF = snWeight.getResNet50Weights()

if (not snResNet50.setWeights(net, weightTF)):
    print('Error setWeights')
    exit(-1)

#################################

img_path = 'c:\\cpp\\other\\sunnet\\example\\resnet50\\images\\elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x) # (224,224,3)

x = np.moveaxis(x, -1, 1)

outAr = np.zeros((1, 1, 1, 1000), ctypes.c_float)

import time

for i in range(100):
 ct = time.time()
 net.forward(False, x.copy(), outAr)
 print(time.time() - ct)

 mx = np.argmax(outAr[0])

 # for check: c:\cpp\other\sunnet\example\resnet50\imagenet_class_index.json
 print('Predicted:', mx, 'val', outAr[0][0][0][mx])

AutoEncoder

import os

from libsunnet import*
import numpy as np
import imageio
import random
import ctypes
import datetime


# create net
net = snNet.Net()
net.addNode('In', snOperator.Input(), 'FC1') \
   .addNode('FC1', snOperator.FullyConnected(256), 'FC2') \
   .addNode('FC2', snOperator.FullyConnected(128), 'FC3') \
   .addNode('FC3', snOperator.FullyConnected(32), 'FC4') \
   .addNode('FC4', snOperator.FullyConnected(128), 'FC5') \
   .addNode('FC5', snOperator.FullyConnected(256), 'FC6') \
   .addNode('FC6', snOperator.FullyConnected(784), 'LS') \
   .addNode('LS', snOperator.LossFunction(snType.lossType.binaryCrossEntropy), 'Output')

# load of weight
#if (net.loadAllWeightFromFile('c:/cpp/w.dat')):
 #   print('weight is load')
#else:
#    print('error load weight')

# loadImg
imgList = []
pathImg = 'c:\\cpp\\sunnet\\example\\autoEncoder\\images\\'
for i in range(10):
   imgList.append(os.listdir(pathImg + str(i)))

bsz = 100
lr = 0.001
accuratSumm = 0.
inLayer = np.zeros((bsz, 1, 28, 28), ctypes.c_float)
outLayer = np.zeros((bsz, 1, 1, 28 * 28), ctypes.c_float)
imgMem = {}

# cycle lern
for n in range(1000):

    for i in range(bsz):
        ndir = random.randint(0, 10 - 1)
        nimg = random.randint(0, len(imgList[ndir]) - 1)

        nm = pathImg + str(ndir) + '/' + imgList[ndir][nimg]
        if (nm in imgMem):
            inLayer[i][0] = imgMem[nm]
        else:
            inLayer[i][0] = imageio.imread(nm)
            imgMem[nm] = inLayer[i][0].copy()

    acc = [0]
    net.training(lr, inLayer, outLayer, inLayer, acc)

    accuratSumm += acc[0]/bsz

    print(datetime.datetime.now().strftime('%H:%M:%S'), n, "accurate", accuratSumm / (n + 1))

# save weight
if (net.saveAllWeightToFile('c:/cpp/w.dat')):
    print('weight is save')
else:
    print('error save weight')