Using the deepviz package you can plot the architecture of keras models.

Simple examples

Sequential model

Simple sequential model

require(keras)

# Sequential model with several different layers

model <- keras_model_sequential() %>%
  layer_dense(10, input_shape = c(64, 64)) %>%
  layer_conv_1d(filters = 16, kernel_size = 8) %>%
  layer_max_pooling_1d() %>%
  layer_flatten() %>%
  layer_dense(25) %>%
  layer_dense(25, activation = "relu") %>%
  layer_dropout(0.25) %>%
  layer_dense(2, activation = "sigmoid")


model
#> Model
#> ___________________________________________________________________________
#> Layer (type)                     Output Shape                  Param #     
#> ===========================================================================
#> dense_1 (Dense)                  (None, 64, 10)                650         
#> ___________________________________________________________________________
#> conv1d_1 (Conv1D)                (None, 57, 16)                1296        
#> ___________________________________________________________________________
#> max_pooling1d_1 (MaxPooling1D)   (None, 28, 16)                0           
#> ___________________________________________________________________________
#> flatten_1 (Flatten)              (None, 448)                   0           
#> ___________________________________________________________________________
#> dense_2 (Dense)                  (None, 25)                    11225       
#> ___________________________________________________________________________
#> dense_3 (Dense)                  (None, 25)                    650         
#> ___________________________________________________________________________
#> dropout_1 (Dropout)              (None, 25)                    0           
#> ___________________________________________________________________________
#> dense_4 (Dense)                  (None, 2)                     52          
#> ===========================================================================
#> Total params: 13,873
#> Trainable params: 13,873
#> Non-trainable params: 0
#> ___________________________________________________________________________

model %>% plot_model()

Network model

# Model with several inputs and several outputs
# Example from https://keras.rstudio.com/articles/functional_api.html

model <- local({
  main_input <- layer_input(shape = c(100), dtype = 'int32', name = 'main_input')

  lstm_out <- main_input %>%
    layer_embedding(input_dim = 10000, output_dim = 512, input_length = 100) %>%
    layer_lstm(units = 32)

  auxiliary_output <- lstm_out %>%
    layer_dense(units = 1, activation = 'sigmoid', name = 'aux_output')

  auxiliary_input <- layer_input(shape = c(5), name = 'aux_input')

  main_output <- layer_concatenate(c(lstm_out, auxiliary_input)) %>%
    layer_dense(units = 64, activation = 'relu') %>%
    layer_dense(units = 64, activation = 'relu') %>%
    layer_dense(units = 64, activation = 'relu') %>%
    layer_dense(units = 1, activation = 'sigmoid', name = 'main_output')

  keras_model(
    inputs = c(main_input, auxiliary_input),
    outputs = c(main_output, auxiliary_output)
  )
})

model
#> Model
#> ___________________________________________________________________________
#> Layer (type)            Output Shape     Param #  Connected to             
#> ===========================================================================
#> main_input (InputLayer) (None, 100)      0                                 
#> ___________________________________________________________________________
#> embedding_1 (Embedding) (None, 100, 512) 5120000  main_input[0][0]         
#> ___________________________________________________________________________
#> lstm_1 (LSTM)           (None, 32)       69760    embedding_1[0][0]        
#> ___________________________________________________________________________
#> aux_input (InputLayer)  (None, 5)        0                                 
#> ___________________________________________________________________________
#> concatenate_1 (Concaten (None, 37)       0        lstm_1[0][0]             
#>                                                   aux_input[0][0]          
#> ___________________________________________________________________________
#> dense_5 (Dense)         (None, 64)       2432     concatenate_1[0][0]      
#> ___________________________________________________________________________
#> dense_6 (Dense)         (None, 64)       4160     dense_5[0][0]            
#> ___________________________________________________________________________
#> dense_7 (Dense)         (None, 64)       4160     dense_6[0][0]            
#> ___________________________________________________________________________
#> main_output (Dense)     (None, 1)        65       dense_7[0][0]            
#> ___________________________________________________________________________
#> aux_output (Dense)      (None, 1)        33       lstm_1[0][0]             
#> ===========================================================================
#> Total params: 5,200,610
#> Trainable params: 5,200,610
#> Non-trainable params: 0
#> ___________________________________________________________________________

model %>% plot_model()

Famous architectures

Examples of some famous architectures.

Depthwise separable convolution

# Creates a network that illustrates depthwise separable convolution

depthwise_separable <- local({
  input <-
    layer_input(
      shape = c(3, 64, 64),
      dtype = 'float32',
      name = 'input'
    )

  conv_1x1 <- input %>%
    layer_conv_2d(8, kernel_size = c(1, 1), name = "1x1_convolution")

  conv_1 <- conv_1x1 %>%
    layer_conv_2d(8, kernel_size = c(3, 3), name = "3x3_convolution_1")

  conv_2 <- conv_1x1 %>%
    layer_conv_2d(8, kernel_size = c(3, 3), name = "3x3_convolution_2")

  conv_3 <- conv_1x1 %>%
    layer_conv_2d(8, kernel_size = c(3, 3), name = "3x3_convolution_3")

  output <- layer_concatenate(
    c(conv_1, conv_2, conv_3),
    name = "concat"
  )

  keras_model(
    inputs = c(input),
    outputs = c(output)
  )

})

depthwise_separable
#> Model
#> ___________________________________________________________________________
#> Layer (type)            Output Shape     Param #  Connected to             
#> ===========================================================================
#> input (InputLayer)      (None, 3, 64, 64 0                                 
#> ___________________________________________________________________________
#> 1x1_convolution (Conv2D (None, 3, 64, 8) 520      input[0][0]              
#> ___________________________________________________________________________
#> 3x3_convolution_1 (Conv (None, 1, 62, 8) 584      1x1_convolution[0][0]    
#> ___________________________________________________________________________
#> 3x3_convolution_2 (Conv (None, 1, 62, 8) 584      1x1_convolution[0][0]    
#> ___________________________________________________________________________
#> 3x3_convolution_3 (Conv (None, 1, 62, 8) 584      1x1_convolution[0][0]    
#> ___________________________________________________________________________
#> concat (Concatenate)    (None, 1, 62, 24 0        3x3_convolution_1[0][0]  
#>                                                   3x3_convolution_2[0][0]  
#>                                                   3x3_convolution_3[0][0]  
#> ===========================================================================
#> Total params: 2,272
#> Trainable params: 2,272
#> Non-trainable params: 0
#> ___________________________________________________________________________

depthwise_separable %>% plot_model()

Resnet

Classical inception v3

# Creates a network that illustrates the inception v3 network
# references: https://arxiv.org/pdf/1610.02357.pdf


inception_v3 <- local({
  input <- layer_input(shape = c(3, 64, 64), dtype = 'float32')

  stream_1 <- input %>%
    layer_conv_2d(1, kernel_size = c(1, 1), filters = 3)


  stream_2 <- input %>%
    layer_conv_2d(1, kernel_size = c(1, 1)) %>%
    layer_conv_2d(1, kernel_size = c(3, 3), padding = "same")

  stream_3 <- input %>%
    layer_average_pooling_2d(pool_size = c(1, 1)) %>%
    layer_conv_2d(8, kernel_size = c(3, 3), padding = "same")

  stream_4 <- input %>%
    layer_conv_2d(8, kernel_size = c(1, 1)) %>%
    layer_conv_2d(8, kernel_size = c(3, 3), padding = "same") %>%
    layer_conv_2d(8, kernel_size = c(3, 3), padding = "same")

  output <- layer_concatenate(
    c(stream_1, stream_2, stream_3, stream_4),
    name = "concat"
  )

  keras_model(inputs = c(input),
              outputs = c(output))

})

inception_v3
#> Model
#> ___________________________________________________________________________
#> Layer (type)            Output Shape     Param #  Connected to             
#> ===========================================================================
#> input_2 (InputLayer)    (None, 3, 64, 64 0                                 
#> ___________________________________________________________________________
#> conv2d_7 (Conv2D)       (None, 3, 64, 8) 520      input_2[0][0]            
#> ___________________________________________________________________________
#> conv2d_4 (Conv2D)       (None, 3, 64, 1) 65       input_2[0][0]            
#> ___________________________________________________________________________
#> average_pooling2d_1 (Av (None, 3, 64, 64 0        input_2[0][0]            
#> ___________________________________________________________________________
#> conv2d_8 (Conv2D)       (None, 3, 64, 8) 584      conv2d_7[0][0]           
#> ___________________________________________________________________________
#> conv2d_3 (Conv2D)       (None, 3, 64, 3) 195      input_2[0][0]            
#> ___________________________________________________________________________
#> conv2d_5 (Conv2D)       (None, 3, 64, 1) 10       conv2d_4[0][0]           
#> ___________________________________________________________________________
#> conv2d_6 (Conv2D)       (None, 3, 64, 8) 4616     average_pooling2d_1[0][0]
#> ___________________________________________________________________________
#> conv2d_9 (Conv2D)       (None, 3, 64, 8) 584      conv2d_8[0][0]           
#> ___________________________________________________________________________
#> concat (Concatenate)    (None, 3, 64, 20 0        conv2d_3[0][0]           
#>                                                   conv2d_5[0][0]           
#>                                                   conv2d_6[0][0]           
#>                                                   conv2d_9[0][0]           
#> ===========================================================================
#> Total params: 6,574
#> Trainable params: 6,574
#> Non-trainable params: 0
#> ___________________________________________________________________________

inception_v3 %>% plot_model()