from keras import backend as K
# Get original and reference style images No change set to constant
target_image = (preprocess_image(target_image_path))
style_reference_image = (preprocess_image(style_reference_image_path))
# Target generates image placeholders (variable) defaults to float32
combination_image = ((1, img_height, img_width, 3))
# Merge into one batch (here to conform to VGG19 batches (3, width, height, 3))
input_tensor = ([target_image,
style_reference_image,
combination_image], axis=0)
# official documentation /api/applications/vgg/#vgg19-function
model = vgg19.VGG19(input_tensor=input_tensor,
weights='imagenet',
include_top=False)
print('Model loaded.')
print(())
outputs_dict = dict([(, ) for layer in ])
# Layer activation for content formats
content_layer = 'block5_conv4'
# Style CNN layer activation
style_layers = ['block1_conv1',
'block2_conv1',
'block3_conv1',
'block4_conv1',
'block5_conv1']
outputs_dict
from import fmin_l_bfgs_b
# from import imsavefrom import
import time
result_prefix = 'style_transfer_result'
iterations = 10 # Deeper rounds Deeper effects (stronger style migration)
# Run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the neural style loss.
# This is our initial state: the target image.
# Note that `.fmin_l_bfgs_b` can only process flat vectors.
x = preprocess_image(target_image_path)
x = ()
for i in range(iterations):
print('Start of iteration', i)
start_time = ()
x, min_val, info = fmin_l_bfgs_b(, x,
fprime=, maxfun=20)
print('Current loss value:', min_val)
# Save current generated image
img = ().reshape((img_height, img_width, 3))
img = deprocess_image(img)
fname = result_prefix + '_at_iteration_%' % i
.save_img(fname, img)
end_time = ()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i, end_time - start_time))