Updated install.sh
This commit is contained in:
parent
3b0062a84b
commit
d64e5eb347
@ -1,126 +0,0 @@
|
|||||||
# Example for Cloud Computing Course Master AI / GSD
|
|
||||||
#
|
|
||||||
# uses libCloud: https://libcloud.apache.org/
|
|
||||||
# libCloud API documentation: https://libcloud.readthedocs.io/en/latest/
|
|
||||||
# OpenStack API documentation: https://developer.openstack.org/
|
|
||||||
# this code was initially based on the former tutorial: https://developer.openstack.org/firstapp-libcloud/
|
|
||||||
|
|
||||||
import getpass
|
|
||||||
|
|
||||||
from libcloud.compute.providers import get_driver
|
|
||||||
from libcloud.compute.types import Provider
|
|
||||||
|
|
||||||
# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username,
|
|
||||||
# project etc., as coordinated in the lab sessions)
|
|
||||||
|
|
||||||
group_number = 30
|
|
||||||
|
|
||||||
|
|
||||||
########################################################################################################################
|
|
||||||
#
|
|
||||||
# no changes necessary below this line in this example
|
|
||||||
#
|
|
||||||
########################################################################################################################
|
|
||||||
|
|
||||||
# web service endpoint of the private cloud infrastructure
|
|
||||||
auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
|
||||||
# your username in OpenStack
|
|
||||||
auth_username = 'CloudComp' + str(group_number)
|
|
||||||
# your project in OpenStack
|
|
||||||
project_name = 'CloudComp' + str(group_number)
|
|
||||||
# A network in the project the started instance will be attached to
|
|
||||||
project_network = 'CloudComp' + str(group_number) + '-net'
|
|
||||||
|
|
||||||
# The image to look for and use for the started instance
|
|
||||||
ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image"
|
|
||||||
# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example?
|
|
||||||
|
|
||||||
# default region
|
|
||||||
region_name = 'RegionOne'
|
|
||||||
# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username
|
|
||||||
# domain_name = "default"
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# get the password from user
|
|
||||||
# auth_password = getpass.getpass("Enter your OpenStack password:")
|
|
||||||
auth_password = "demo"
|
|
||||||
|
|
||||||
# instantiate a connection to the OpenStack private cloud
|
|
||||||
# make sure to include ex_force_auth_version='3.x_password', as needed in our environment
|
|
||||||
provider = get_driver(Provider.OPENSTACK)
|
|
||||||
|
|
||||||
print("Opening connection to %s as %s..." % (auth_url, auth_username))
|
|
||||||
|
|
||||||
conn = provider(auth_username,
|
|
||||||
auth_password,
|
|
||||||
ex_force_auth_url=auth_url,
|
|
||||||
ex_force_auth_version='3.x_password',
|
|
||||||
ex_tenant_name=project_name,
|
|
||||||
ex_force_service_region=region_name)
|
|
||||||
# ex_domain_name=domain_name)
|
|
||||||
|
|
||||||
print("Getting images and selecting desired one...")
|
|
||||||
print("=========================================================================")
|
|
||||||
|
|
||||||
# get a list of images offered in the cloud context (e.g. Ubuntu 20.04, cirros, ...)
|
|
||||||
images = conn.list_images()
|
|
||||||
image = ''
|
|
||||||
for img in images:
|
|
||||||
if img.name == ubuntu_image_name:
|
|
||||||
image = img
|
|
||||||
print(img)
|
|
||||||
|
|
||||||
print("Getting flavors...")
|
|
||||||
print("=========================================================================")
|
|
||||||
|
|
||||||
# get a list of flavors offered in the cloud context (e.g. m1.small, m1.medium, ...)
|
|
||||||
flavors = conn.list_sizes()
|
|
||||||
for flavor in flavors:
|
|
||||||
print(flavor)
|
|
||||||
|
|
||||||
print("Selecting desired flavor...")
|
|
||||||
print("=========================================================================")
|
|
||||||
|
|
||||||
# get the flavor with id 2
|
|
||||||
flavor_id = '2'
|
|
||||||
flavor = conn.ex_get_size(flavor_id)
|
|
||||||
print(flavor)
|
|
||||||
|
|
||||||
print("Selecting desired network...")
|
|
||||||
print("=========================================================================")
|
|
||||||
|
|
||||||
# get a list of networks in the cloud context
|
|
||||||
networks = conn.ex_list_networks()
|
|
||||||
network = ''
|
|
||||||
for net in networks:
|
|
||||||
if net.name == project_network:
|
|
||||||
network = net
|
|
||||||
|
|
||||||
print("Create instance 'testing'...")
|
|
||||||
print("=========================================================================")
|
|
||||||
|
|
||||||
# create a new instance with the name "testing"
|
|
||||||
# make sure to provide networks (networks={network}) the instance should be attached to
|
|
||||||
instance_name = 'testing'
|
|
||||||
testing_instance = conn.create_node(name=instance_name, image=image, size=flavor, networks={network})
|
|
||||||
print(testing_instance)
|
|
||||||
|
|
||||||
print("Showing all running instances...")
|
|
||||||
print("=========================================================================")
|
|
||||||
|
|
||||||
# show all instances (running nodes) in the cloud context
|
|
||||||
instances = conn.list_nodes()
|
|
||||||
for instance in instances:
|
|
||||||
print(instance)
|
|
||||||
|
|
||||||
print("Destroying instance...")
|
|
||||||
print("=========================================================================")
|
|
||||||
|
|
||||||
# destroy the instance we have just created
|
|
||||||
conn.destroy_node(testing_instance)
|
|
||||||
|
|
||||||
|
|
||||||
# method that is called when the script is started from the command line
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,276 +0,0 @@
|
|||||||
# import getpass
|
|
||||||
# import os
|
|
||||||
|
|
||||||
from libcloud.compute.providers import get_driver
|
|
||||||
from libcloud.compute.types import Provider
|
|
||||||
|
|
||||||
# reqs:
|
|
||||||
# services: nova, glance, neutron
|
|
||||||
# resources: 2 instances, 2 floating ips (1 keypair, 2 security groups)
|
|
||||||
|
|
||||||
# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username,
|
|
||||||
# project etc., as coordinated in the lab sessions)
|
|
||||||
|
|
||||||
group_number = 30
|
|
||||||
|
|
||||||
|
|
||||||
# web service endpoint of the private cloud infrastructure
|
|
||||||
auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
|
||||||
# your username in OpenStack
|
|
||||||
auth_username = 'CloudComp' + str(group_number)
|
|
||||||
# your project in OpenStack
|
|
||||||
project_name = 'CloudComp' + str(group_number)
|
|
||||||
# A network in the project the started instance will be attached to
|
|
||||||
project_network = 'CloudComp' + str(group_number) + '-net'
|
|
||||||
|
|
||||||
# The image to look for and use for the started instance
|
|
||||||
ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image"
|
|
||||||
# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example?
|
|
||||||
|
|
||||||
# The public key to be used for SSH connection, please make sure, that you have the corresponding private key
|
|
||||||
#
|
|
||||||
# id_rsa.pub should look like this (standard sshd pubkey format):
|
|
||||||
# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME
|
|
||||||
|
|
||||||
keypair_name = 'srieger-pub'
|
|
||||||
pub_key_file = '~/.ssh/id_rsa.pub'
|
|
||||||
|
|
||||||
flavor_name = 'm1.small'
|
|
||||||
|
|
||||||
|
|
||||||
# default region
|
|
||||||
region_name = 'RegionOne'
|
|
||||||
# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username
|
|
||||||
# domain_name = "default"
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# get credentials
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# if "OS_PASSWORD" in os.environ:
|
|
||||||
# auth_password = os.environ["OS_PASSWORD"]
|
|
||||||
# else:
|
|
||||||
# auth_password = getpass.getpass("Enter your OpenStack password:")
|
|
||||||
auth_password = "demo"
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create connection
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
provider = get_driver(Provider.OPENSTACK)
|
|
||||||
conn = provider(auth_username,
|
|
||||||
auth_password,
|
|
||||||
ex_force_auth_url=auth_url,
|
|
||||||
ex_force_auth_version='3.x_password',
|
|
||||||
ex_tenant_name=project_name,
|
|
||||||
ex_force_service_region=region_name)
|
|
||||||
# ex_domain_name=domain_name)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# get image, flavor, network for instance creation
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
images = conn.list_images()
|
|
||||||
image = ''
|
|
||||||
for img in images:
|
|
||||||
if img.name == ubuntu_image_name:
|
|
||||||
image = img
|
|
||||||
|
|
||||||
flavors = conn.list_sizes()
|
|
||||||
flavor = ''
|
|
||||||
for flav in flavors:
|
|
||||||
if flav.name == flavor_name:
|
|
||||||
flavor = conn.ex_get_size(flav.id)
|
|
||||||
|
|
||||||
networks = conn.ex_list_networks()
|
|
||||||
network = ''
|
|
||||||
for net in networks:
|
|
||||||
if net.name == project_network:
|
|
||||||
network = net
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create keypair dependency
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
print('Checking for existing SSH key pair...')
|
|
||||||
keypair_exists = False
|
|
||||||
for keypair in conn.list_key_pairs():
|
|
||||||
if keypair.name == keypair_name:
|
|
||||||
keypair_exists = True
|
|
||||||
|
|
||||||
if keypair_exists:
|
|
||||||
print('Keypair ' + keypair_name + ' already exists. Skipping import.')
|
|
||||||
else:
|
|
||||||
print('adding keypair...')
|
|
||||||
conn.import_key_pair_from_file(keypair_name, pub_key_file)
|
|
||||||
|
|
||||||
for keypair in conn.list_key_pairs():
|
|
||||||
print(keypair)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create security group dependency
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
print('Checking for existing worker security group...')
|
|
||||||
security_group_name = 'worker'
|
|
||||||
security_group_exists = False
|
|
||||||
worker_security_group = ''
|
|
||||||
for security_group in conn.ex_list_security_groups():
|
|
||||||
if security_group.name == security_group_name:
|
|
||||||
worker_security_group = security_group
|
|
||||||
security_group_exists = True
|
|
||||||
|
|
||||||
if security_group_exists:
|
|
||||||
print('Worker Security Group ' + worker_security_group.name + ' already exists. Skipping creation.')
|
|
||||||
else:
|
|
||||||
worker_security_group = conn.ex_create_security_group('worker', 'for services that run on a worker node')
|
|
||||||
conn.ex_create_security_group_rule(worker_security_group, 'TCP', 22, 22)
|
|
||||||
|
|
||||||
print('Checking for existing controller security group...')
|
|
||||||
security_group_name = 'control'
|
|
||||||
security_group_exists = False
|
|
||||||
controller_security_group = ''
|
|
||||||
for security_group in conn.ex_list_security_groups():
|
|
||||||
if security_group.name == security_group_name:
|
|
||||||
controller_security_group = security_group
|
|
||||||
security_group_exists = True
|
|
||||||
|
|
||||||
if security_group_exists:
|
|
||||||
print('Controller Security Group ' + controller_security_group.name + ' already exists. Skipping creation.')
|
|
||||||
else:
|
|
||||||
controller_security_group = conn.ex_create_security_group('control', 'for services that run on a control node')
|
|
||||||
conn.ex_create_security_group_rule(controller_security_group, 'TCP', 22, 22)
|
|
||||||
conn.ex_create_security_group_rule(controller_security_group, 'TCP', 80, 80)
|
|
||||||
conn.ex_create_security_group_rule(controller_security_group, 'TCP', 5672, 5672,
|
|
||||||
source_security_group=worker_security_group)
|
|
||||||
|
|
||||||
for security_group in conn.ex_list_security_groups():
|
|
||||||
print(security_group)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create app-controller
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# https://git.openstack.org/cgit/openstack/faafo/plain/contrib/install.sh
|
|
||||||
# is currently broken, hence the "rabbitctl" lines were added in the example
|
|
||||||
# below, see also https://bugs.launchpad.net/faafo/+bug/1679710
|
|
||||||
#
|
|
||||||
# Thanks to Stefan Friedmann for finding this fix ;)
|
|
||||||
|
|
||||||
userdata = '''#!/usr/bin/env bash
|
|
||||||
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
|
|
||||||
-i messaging -i faafo -r api
|
|
||||||
rabbitmqctl add_user faafo guest
|
|
||||||
rabbitmqctl set_user_tags faafo administrator
|
|
||||||
rabbitmqctl set_permissions -p / faafo ".*" ".*" ".*"
|
|
||||||
'''
|
|
||||||
|
|
||||||
print('Starting new app-controller instance and wait until it is running...')
|
|
||||||
instance_controller_1 = conn.create_node(name='app-controller',
|
|
||||||
image=image,
|
|
||||||
size=flavor,
|
|
||||||
networks=[network],
|
|
||||||
ex_keyname=keypair_name,
|
|
||||||
ex_userdata=userdata,
|
|
||||||
ex_security_groups=[controller_security_group])
|
|
||||||
|
|
||||||
conn.wait_until_running(nodes=[instance_controller_1], timeout=120, ssh_interface='private_ips')
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# assign app-controller floating ip
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
print('Checking for unused Floating IP...')
|
|
||||||
unused_floating_ip = None
|
|
||||||
for floating_ip in conn.ex_list_floating_ips():
|
|
||||||
if not floating_ip.node_id:
|
|
||||||
unused_floating_ip = floating_ip
|
|
||||||
break
|
|
||||||
|
|
||||||
if not unused_floating_ip:
|
|
||||||
pool = conn.ex_list_floating_ip_pools()[0]
|
|
||||||
print('Allocating new Floating IP from pool: {}'.format(pool))
|
|
||||||
unused_floating_ip = pool.create_floating_ip()
|
|
||||||
|
|
||||||
conn.ex_attach_floating_ip_to_node(instance_controller_1, unused_floating_ip)
|
|
||||||
print('Controller Application will be deployed to http://%s' % unused_floating_ip.ip_address)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# getting id and ip address of app-controller instance
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# instance should not have a public ip? floating ips are assigned later
|
|
||||||
instance_controller_1 = conn.ex_get_node_details(instance_controller_1.id)
|
|
||||||
if instance_controller_1.public_ips:
|
|
||||||
ip_controller = instance_controller_1.public_ips[0]
|
|
||||||
else:
|
|
||||||
ip_controller = instance_controller_1.private_ips[0]
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create app-worker-1
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
userdata = '''#!/usr/bin/env bash
|
|
||||||
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
|
|
||||||
-i faafo -r worker -e 'http://%(ip_controller)s' -m 'amqp://faafo:guest@%(ip_controller)s:5672/'
|
|
||||||
''' % {'ip_controller': ip_controller}
|
|
||||||
|
|
||||||
print('Starting new app-worker-1 instance and wait until it is running...')
|
|
||||||
instance_worker_1 = conn.create_node(name='app-worker-1',
|
|
||||||
image=image,
|
|
||||||
size=flavor,
|
|
||||||
networks=[network],
|
|
||||||
ex_keyname=keypair_name,
|
|
||||||
ex_userdata=userdata,
|
|
||||||
ex_security_groups=[worker_security_group])
|
|
||||||
|
|
||||||
conn.wait_until_running(nodes=[instance_worker_1], timeout=120, ssh_interface='private_ips')
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# assign app-worker floating ip
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
print('Checking for unused Floating IP...')
|
|
||||||
unused_floating_ip = None
|
|
||||||
for floating_ip in conn.ex_list_floating_ips():
|
|
||||||
if not floating_ip.node_id:
|
|
||||||
unused_floating_ip = floating_ip
|
|
||||||
break
|
|
||||||
|
|
||||||
if not unused_floating_ip:
|
|
||||||
pool = conn.ex_list_floating_ip_pools()[0]
|
|
||||||
print('Allocating new Floating IP from pool: {}'.format(pool))
|
|
||||||
unused_floating_ip = pool.create_floating_ip()
|
|
||||||
|
|
||||||
conn.ex_attach_floating_ip_to_node(instance_worker_1, unused_floating_ip)
|
|
||||||
print('The worker will be available for SSH at %s' % unused_floating_ip.ip_address)
|
|
||||||
|
|
||||||
print('You can use ssh to login to the controller using your private key. After login, you can list available '
|
|
||||||
'fractals using "faafo list". To request the generation of new fractals, you can use "faafo create". '
|
|
||||||
'You can also see other options to use the faafo example cloud service using "faafo -h".')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,190 +0,0 @@
|
|||||||
# import getpass
|
|
||||||
# import os
|
|
||||||
# import libcloud.security
|
|
||||||
|
|
||||||
import time
|
|
||||||
from libcloud.compute.providers import get_driver
|
|
||||||
from libcloud.compute.types import Provider
|
|
||||||
|
|
||||||
# reqs:
|
|
||||||
# services: nova, glance, neutron
|
|
||||||
# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups)
|
|
||||||
|
|
||||||
# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username,
|
|
||||||
# project etc., as coordinated in the lab sessions)
|
|
||||||
|
|
||||||
group_number = 30
|
|
||||||
|
|
||||||
|
|
||||||
# web service endpoint of the private cloud infrastructure
|
|
||||||
auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
|
||||||
# your username in OpenStack
|
|
||||||
auth_username = 'CloudComp' + str(group_number)
|
|
||||||
# your project in OpenStack
|
|
||||||
project_name = 'CloudComp' + str(group_number)
|
|
||||||
# A network in the project the started instance will be attached to
|
|
||||||
project_network = 'CloudComp' + str(group_number) + '-net'
|
|
||||||
|
|
||||||
# The image to look for and use for the started instance
|
|
||||||
ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image"
|
|
||||||
# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example?
|
|
||||||
|
|
||||||
# The public key to be used for SSH connection, please make sure, that you have the corresponding private key
|
|
||||||
#
|
|
||||||
# id_rsa.pub should look like this (standard sshd pubkey format):
|
|
||||||
# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME
|
|
||||||
|
|
||||||
keypair_name = 'srieger-pub'
|
|
||||||
pub_key_file = '~/.ssh/id_rsa.pub'
|
|
||||||
|
|
||||||
flavor_name = 'm1.small'
|
|
||||||
|
|
||||||
|
|
||||||
# default region
|
|
||||||
region_name = 'RegionOne'
|
|
||||||
# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username
|
|
||||||
domain_name = "default"
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# get credentials
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# if "OS_PASSWORD" in os.environ:
|
|
||||||
# auth_password = os.environ["OS_PASSWORD"]
|
|
||||||
# else:
|
|
||||||
# auth_password = getpass.getpass("Enter your OpenStack password:")
|
|
||||||
auth_password = "demo"
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create connection
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# libcloud.security.VERIFY_SSL_CERT = False
|
|
||||||
|
|
||||||
provider = get_driver(Provider.OPENSTACK)
|
|
||||||
conn = provider(auth_username,
|
|
||||||
auth_password,
|
|
||||||
ex_force_auth_url=auth_url,
|
|
||||||
ex_force_auth_version='3.x_password',
|
|
||||||
ex_tenant_name=project_name,
|
|
||||||
ex_force_service_region=region_name,
|
|
||||||
ex_domain_name=domain_name)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# get image, flavor, network for instance creation
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
images = conn.list_images()
|
|
||||||
image = ''
|
|
||||||
for img in images:
|
|
||||||
if img.name == ubuntu_image_name:
|
|
||||||
image = img
|
|
||||||
|
|
||||||
flavors = conn.list_sizes()
|
|
||||||
flavor = ''
|
|
||||||
for flav in flavors:
|
|
||||||
if flav.name == flavor_name:
|
|
||||||
flavor = conn.ex_get_size(flav.id)
|
|
||||||
|
|
||||||
networks = conn.ex_list_networks()
|
|
||||||
network = ''
|
|
||||||
for net in networks:
|
|
||||||
if net.name == project_network:
|
|
||||||
network = net
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# get fixed a ip for service and api instance
|
|
||||||
# (better would be shared IP for the cluster etc.)
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# find service instance
|
|
||||||
for instance in conn.list_nodes():
|
|
||||||
if instance.name == 'app-services':
|
|
||||||
services_ip = instance.private_ips[0]
|
|
||||||
print('Found app-services fixed IP to be: ', services_ip)
|
|
||||||
if instance.name == 'app-api-1':
|
|
||||||
api_1_ip = instance.private_ips[0]
|
|
||||||
print('Found app-api-1 fixed IP to be: ', api_1_ip)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create keypair dependency
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
print('Checking for existing SSH key pair...')
|
|
||||||
keypair_exists = False
|
|
||||||
for keypair in conn.list_key_pairs():
|
|
||||||
if keypair.name == keypair_name:
|
|
||||||
keypair_exists = True
|
|
||||||
|
|
||||||
if keypair_exists:
|
|
||||||
print('Keypair ' + keypair_name + ' already exists. Skipping import.')
|
|
||||||
else:
|
|
||||||
print('adding keypair...')
|
|
||||||
conn.import_key_pair_from_file(keypair_name, pub_key_file)
|
|
||||||
|
|
||||||
for keypair in conn.list_key_pairs():
|
|
||||||
print(keypair)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create security group dependency
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
def get_security_group(connection, security_group_name):
|
|
||||||
"""A helper function to check if security group already exists"""
|
|
||||||
print('Checking for existing ' + security_group_name + ' security group...')
|
|
||||||
for security_grp in connection.ex_list_security_groups():
|
|
||||||
if security_grp.name == security_group_name:
|
|
||||||
print('Security Group ' + security_group_name + ' already exists. Skipping creation.')
|
|
||||||
return security_grp
|
|
||||||
return False
|
|
||||||
|
|
||||||
if not get_security_group(conn, "worker"):
|
|
||||||
worker_security_group = conn.ex_create_security_group('worker', 'for services that run on a worker node')
|
|
||||||
conn.ex_create_security_group_rule(worker_security_group, 'TCP', 22, 22)
|
|
||||||
else:
|
|
||||||
worker_security_group = get_security_group(conn, "worker")
|
|
||||||
|
|
||||||
for security_group in conn.ex_list_security_groups():
|
|
||||||
print(security_group)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create worker instances
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
userdata_worker = '''#!/usr/bin/env bash
|
|
||||||
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
|
|
||||||
-i faafo -r worker -e 'http://%(api_1_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/'
|
|
||||||
''' % {'api_1_ip': api_1_ip, 'services_ip': services_ip}
|
|
||||||
|
|
||||||
# userdata-api-2 = '''#!/usr/bin/env bash
|
|
||||||
# curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
|
|
||||||
# -i faafo -r worker -e 'http://%(api_2_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/'
|
|
||||||
# ''' % {'api_2_ip': api_2_ip, 'services_ip': services_ip}
|
|
||||||
|
|
||||||
print('Starting new app-worker-3 instance and wait until it is running...')
|
|
||||||
instance_worker_3 = conn.create_node(name='app-worker-3',
|
|
||||||
image=image, size=flavor,
|
|
||||||
networks=[network],
|
|
||||||
ex_keyname=keypair_name,
|
|
||||||
ex_userdata=userdata_worker,
|
|
||||||
ex_security_groups=[worker_security_group])
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,345 +0,0 @@
|
|||||||
# import getpass
|
|
||||||
# import os
|
|
||||||
# import libcloud.security
|
|
||||||
|
|
||||||
import time
|
|
||||||
from libcloud.compute.providers import get_driver
|
|
||||||
from libcloud.compute.types import Provider
|
|
||||||
|
|
||||||
# reqs:
|
|
||||||
# services: nova, glance, neutron
|
|
||||||
# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups)
|
|
||||||
|
|
||||||
# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username,
|
|
||||||
# project etc., as coordinated in the lab sessions)
|
|
||||||
|
|
||||||
group_number = 30
|
|
||||||
|
|
||||||
|
|
||||||
# web service endpoint of the private cloud infrastructure
|
|
||||||
auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
|
||||||
# your username in OpenStack
|
|
||||||
auth_username = 'CloudComp' + str(group_number)
|
|
||||||
# your project in OpenStack
|
|
||||||
project_name = 'CloudComp' + str(group_number)
|
|
||||||
# A network in the project the started instance will be attached to
|
|
||||||
project_network = 'CloudComp' + str(group_number) + '-net'
|
|
||||||
|
|
||||||
# The image to look for and use for the started instance
|
|
||||||
ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image"
|
|
||||||
# TODO: Ubuntu >18.04 would require major updates to faafo example again/better option: complete rewrite of example?
|
|
||||||
|
|
||||||
# The public key to be used for SSH connection, please make sure, that you have the corresponding private key
|
|
||||||
#
|
|
||||||
# id_rsa.pub should look like this (standard sshd pubkey format):
|
|
||||||
# ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw+J...F3w2mleybgT1w== user@HOSTNAME
|
|
||||||
|
|
||||||
keypair_name = 'srieger-pub'
|
|
||||||
pub_key_file = '~/.ssh/id_rsa.pub'
|
|
||||||
|
|
||||||
flavor_name = 'm1.small'
|
|
||||||
|
|
||||||
|
|
||||||
# default region
|
|
||||||
region_name = 'RegionOne'
|
|
||||||
# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username
|
|
||||||
domain_name = "default"
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# get credentials
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# if "OS_PASSWORD" in os.environ:
|
|
||||||
# auth_password = os.environ["OS_PASSWORD"]
|
|
||||||
# else:
|
|
||||||
# auth_password = getpass.getpass("Enter your OpenStack password:")
|
|
||||||
auth_password = "demo"
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create connection
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# libcloud.security.VERIFY_SSL_CERT = False
|
|
||||||
|
|
||||||
provider = get_driver(Provider.OPENSTACK)
|
|
||||||
conn = provider(auth_username,
|
|
||||||
auth_password,
|
|
||||||
ex_force_auth_url=auth_url,
|
|
||||||
ex_force_auth_version='3.x_password',
|
|
||||||
ex_tenant_name=project_name,
|
|
||||||
ex_force_service_region=region_name,
|
|
||||||
ex_domain_name=domain_name)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# get image, flavor, network for instance creation
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
images = conn.list_images()
|
|
||||||
image = ''
|
|
||||||
for img in images:
|
|
||||||
if img.name == ubuntu_image_name:
|
|
||||||
image = img
|
|
||||||
|
|
||||||
flavors = conn.list_sizes()
|
|
||||||
flavor = ''
|
|
||||||
for flav in flavors:
|
|
||||||
if flav.name == flavor_name:
|
|
||||||
flavor = conn.ex_get_size(flav.id)
|
|
||||||
|
|
||||||
networks = conn.ex_list_networks()
|
|
||||||
network = ''
|
|
||||||
for net in networks:
|
|
||||||
if net.name == project_network:
|
|
||||||
network = net
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create keypair dependency
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
print('Checking for existing SSH key pair...')
|
|
||||||
keypair_exists = False
|
|
||||||
for keypair in conn.list_key_pairs():
|
|
||||||
if keypair.name == keypair_name:
|
|
||||||
keypair_exists = True
|
|
||||||
|
|
||||||
if keypair_exists:
|
|
||||||
print('Keypair ' + keypair_name + ' already exists. Skipping import.')
|
|
||||||
else:
|
|
||||||
print('adding keypair...')
|
|
||||||
conn.import_key_pair_from_file(keypair_name, pub_key_file)
|
|
||||||
|
|
||||||
for keypair in conn.list_key_pairs():
|
|
||||||
print(keypair)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# clean up resources from previous demos
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# destroy running demo instances
|
|
||||||
for instance in conn.list_nodes():
|
|
||||||
if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller',
|
|
||||||
'app-services', 'app-api-1', 'app-api-2']:
|
|
||||||
print('Destroying Instance: %s' % instance.name)
|
|
||||||
conn.destroy_node(instance)
|
|
||||||
|
|
||||||
# wait until all nodes are destroyed to be able to remove depended security groups
|
|
||||||
nodes_still_running = True
|
|
||||||
while nodes_still_running:
|
|
||||||
nodes_still_running = False
|
|
||||||
time.sleep(3)
|
|
||||||
instances = conn.list_nodes()
|
|
||||||
for instance in instances:
|
|
||||||
# if we see any demo instances still running continue to wait for them to stop
|
|
||||||
if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-controller']:
|
|
||||||
nodes_still_running = True
|
|
||||||
print('There are still instances running, waiting for them to be destroyed...')
|
|
||||||
|
|
||||||
# delete security groups
|
|
||||||
for group in conn.ex_list_security_groups():
|
|
||||||
if group.name in ['control', 'worker', 'api', 'services']:
|
|
||||||
print('Deleting security group: %s' % group.name)
|
|
||||||
conn.ex_delete_security_group(group)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create security group dependency
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
def get_security_group(connection, security_group_name):
|
|
||||||
"""A helper function to check if security group already exists"""
|
|
||||||
print('Checking for existing ' + security_group_name + ' security group...')
|
|
||||||
for security_grp in connection.ex_list_security_groups():
|
|
||||||
if security_grp.name == security_group_name:
|
|
||||||
print('Security Group ' + security_group_name + ' already exists. Skipping creation.')
|
|
||||||
return worker_security_group
|
|
||||||
return False
|
|
||||||
|
|
||||||
if not get_security_group(conn, "api"):
|
|
||||||
api_security_group = conn.ex_create_security_group('api', 'for API services only')
|
|
||||||
conn.ex_create_security_group_rule(api_security_group, 'TCP', 80, 80)
|
|
||||||
conn.ex_create_security_group_rule(api_security_group, 'TCP', 22, 22)
|
|
||||||
else:
|
|
||||||
api_security_group = get_security_group(conn, "api")
|
|
||||||
|
|
||||||
if not get_security_group(conn, "worker"):
|
|
||||||
worker_security_group = conn.ex_create_security_group('worker', 'for services that run on a worker node')
|
|
||||||
conn.ex_create_security_group_rule(worker_security_group, 'TCP', 22, 22)
|
|
||||||
else:
|
|
||||||
worker_security_group = get_security_group(conn, "worker")
|
|
||||||
|
|
||||||
if not get_security_group(conn, "control"):
|
|
||||||
controller_security_group = conn.ex_create_security_group('control', 'for services that run on a control node')
|
|
||||||
conn.ex_create_security_group_rule(controller_security_group, 'TCP', 22, 22)
|
|
||||||
conn.ex_create_security_group_rule(controller_security_group, 'TCP', 80, 80)
|
|
||||||
conn.ex_create_security_group_rule(controller_security_group, 'TCP', 5672, 5672,
|
|
||||||
source_security_group=worker_security_group)
|
|
||||||
|
|
||||||
if not get_security_group(conn, "services"):
|
|
||||||
services_security_group = conn.ex_create_security_group('services', 'for DB and AMQP services only')
|
|
||||||
conn.ex_create_security_group_rule(services_security_group, 'TCP', 22, 22)
|
|
||||||
conn.ex_create_security_group_rule(services_security_group, 'TCP', 3306, 3306,
|
|
||||||
source_security_group=api_security_group)
|
|
||||||
conn.ex_create_security_group_rule(services_security_group, 'TCP', 5672, 5672,
|
|
||||||
source_security_group=worker_security_group)
|
|
||||||
conn.ex_create_security_group_rule(services_security_group, 'TCP', 5672, 5672,
|
|
||||||
source_security_group=api_security_group)
|
|
||||||
else:
|
|
||||||
services_security_group = get_security_group(conn, "services")
|
|
||||||
|
|
||||||
for security_group in conn.ex_list_security_groups():
|
|
||||||
print(security_group)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# get floating ip helper function
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
def get_floating_ip(connection):
|
|
||||||
"""A helper function to re-use available Floating IPs"""
|
|
||||||
unused_floating_ip = None
|
|
||||||
for float_ip in connection.ex_list_floating_ips():
|
|
||||||
if not float_ip.node_id:
|
|
||||||
unused_floating_ip = float_ip
|
|
||||||
break
|
|
||||||
if not unused_floating_ip:
|
|
||||||
pool = connection.ex_list_floating_ip_pools()[0]
|
|
||||||
unused_floating_ip = pool.create_floating_ip()
|
|
||||||
return unused_floating_ip
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create app-services instance (database & messaging)
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# https://git.openstack.org/cgit/openstack/faafo/plain/contrib/install.sh
|
|
||||||
# is currently broken, hence the "rabbitctl" lines were added in the example
|
|
||||||
# below, see also https://bugs.launchpad.net/faafo/+bug/1679710
|
|
||||||
#
|
|
||||||
# Thanks to Stefan Friedmann for finding this fix ;)
|
|
||||||
|
|
||||||
userdata_service = '''#!/usr/bin/env bash
|
|
||||||
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
|
|
||||||
-i database -i messaging
|
|
||||||
rabbitmqctl add_user faafo guest
|
|
||||||
rabbitmqctl set_user_tags faafo administrator
|
|
||||||
rabbitmqctl set_permissions -p / faafo ".*" ".*" ".*"
|
|
||||||
'''
|
|
||||||
|
|
||||||
print('Starting new app-services instance and wait until it is running...')
|
|
||||||
instance_services = conn.create_node(name='app-services',
|
|
||||||
image=image,
|
|
||||||
size=flavor,
|
|
||||||
networks=[network],
|
|
||||||
ex_keyname=keypair_name,
|
|
||||||
ex_userdata=userdata_service,
|
|
||||||
ex_security_groups=[services_security_group])
|
|
||||||
instance_services = conn.wait_until_running(nodes=[instance_services], timeout=120,
|
|
||||||
ssh_interface='private_ips')[0][0]
|
|
||||||
services_ip = instance_services.private_ips[0]
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create app-api instances
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
userdata_api = '''#!/usr/bin/env bash
|
|
||||||
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
|
|
||||||
-i faafo -r api -m 'amqp://faafo:guest@%(services_ip)s:5672/' \
|
|
||||||
-d 'mysql+pymysql://faafo:password@%(services_ip)s:3306/faafo'
|
|
||||||
''' % {'services_ip': services_ip}
|
|
||||||
|
|
||||||
print('Starting new app-api-1 instance and wait until it is running...')
|
|
||||||
instance_api_1 = conn.create_node(name='app-api-1',
|
|
||||||
image=image,
|
|
||||||
size=flavor,
|
|
||||||
networks=[network],
|
|
||||||
ex_keyname=keypair_name,
|
|
||||||
ex_userdata=userdata_api,
|
|
||||||
ex_security_groups=[api_security_group])
|
|
||||||
|
|
||||||
print('Starting new app-api-2 instance and wait until it is running...')
|
|
||||||
instance_api_2 = conn.create_node(name='app-api-2',
|
|
||||||
image=image,
|
|
||||||
size=flavor,
|
|
||||||
networks=[network],
|
|
||||||
ex_keyname=keypair_name,
|
|
||||||
ex_userdata=userdata_api,
|
|
||||||
ex_security_groups=[api_security_group])
|
|
||||||
|
|
||||||
instance_api_1 = conn.wait_until_running(nodes=[instance_api_1], timeout=120,
|
|
||||||
ssh_interface='private_ips')[0][0]
|
|
||||||
api_1_ip = instance_api_1.private_ips[0]
|
|
||||||
instance_api_2 = conn.wait_until_running(nodes=[instance_api_2], timeout=120,
|
|
||||||
ssh_interface='private_ips')[0][0]
|
|
||||||
# api_2_ip = instance_api_2.private_ips[0]
|
|
||||||
|
|
||||||
for instance in [instance_api_1, instance_api_2]:
|
|
||||||
floating_ip = get_floating_ip(conn)
|
|
||||||
conn.ex_attach_floating_ip_to_node(instance, floating_ip)
|
|
||||||
print('allocated %(ip)s to %(host)s' % {'ip': floating_ip.ip_address, 'host': instance.name})
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create worker instances
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
userdata_worker = '''#!/usr/bin/env bash
|
|
||||||
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
|
|
||||||
-i faafo -r worker -e 'http://%(api_1_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/'
|
|
||||||
''' % {'api_1_ip': api_1_ip, 'services_ip': services_ip}
|
|
||||||
|
|
||||||
# userdata_api-api-2 = '''#!/usr/bin/env bash
|
|
||||||
# curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh | bash -s -- \
|
|
||||||
# -i faafo -r worker -e 'http://%(api_2_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/'
|
|
||||||
# ''' % {'api_2_ip': api_2_ip, 'services_ip': services_ip}
|
|
||||||
|
|
||||||
print('Starting new app-worker-1 instance and wait until it is running...')
|
|
||||||
instance_worker_1 = conn.create_node(name='app-worker-1',
|
|
||||||
image=image, size=flavor,
|
|
||||||
networks=[network],
|
|
||||||
ex_keyname=keypair_name,
|
|
||||||
ex_userdata=userdata_worker,
|
|
||||||
ex_security_groups=[worker_security_group])
|
|
||||||
|
|
||||||
print('Starting new app-worker-2 instance and wait until it is running...')
|
|
||||||
instance_worker_2 = conn.create_node(name='app-worker-2',
|
|
||||||
image=image, size=flavor,
|
|
||||||
networks=[network],
|
|
||||||
ex_keyname=keypair_name,
|
|
||||||
ex_userdata=userdata_worker,
|
|
||||||
ex_security_groups=[worker_security_group])
|
|
||||||
|
|
||||||
# do not start worker 3 initially, can be started using scale-out-add-worker.py demo
|
|
||||||
|
|
||||||
#print('Starting new app-worker-3 instance and wait until it is running...')
|
|
||||||
#instance_worker_3 = conn.create_node(name='app-worker-3',
|
|
||||||
# image=image, size=flavor,
|
|
||||||
# networks=[network],
|
|
||||||
# ex_keyname=keypair_name,
|
|
||||||
# ex_userdata=userdata_worker,
|
|
||||||
# ex_security_groups=[worker_security_group])
|
|
||||||
|
|
||||||
print(instance_worker_1)
|
|
||||||
print(instance_worker_2)
|
|
||||||
#print(instance_worker_3)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,123 +0,0 @@
|
|||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import getpass
|
|
||||||
import os
|
|
||||||
|
|
||||||
import libcloud.security
|
|
||||||
from libcloud.storage.providers import get_driver
|
|
||||||
from libcloud.storage.types import Provider
|
|
||||||
|
|
||||||
# reqs:
|
|
||||||
# services: nova, glance, neutron
|
|
||||||
# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups)
|
|
||||||
|
|
||||||
# HS-Fulda Private Cloud
|
|
||||||
auth_url = 'https://192.168.72.40:5000'
|
|
||||||
region_name = 'RegionOne'
|
|
||||||
domain_name = "hsfulda"
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# get credentials
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
if "OS_PROJECT_NAME" in os.environ:
|
|
||||||
project_name = os.environ["OS_PROJECT_NAME"]
|
|
||||||
else:
|
|
||||||
project_name = input("Enter your OpenStack project:")
|
|
||||||
|
|
||||||
if "OS_USERNAME" in os.environ:
|
|
||||||
auth_username = os.environ["OS_USERNAME"]
|
|
||||||
else:
|
|
||||||
auth_username = input("Enter your OpenStack username:")
|
|
||||||
|
|
||||||
if "OS_PASSWORD" in os.environ:
|
|
||||||
auth_password = os.environ["OS_PASSWORD"]
|
|
||||||
else:
|
|
||||||
auth_password = getpass.getpass("Enter your OpenStack password:")
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create connection
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
libcloud.security.VERIFY_SSL_CERT = False
|
|
||||||
|
|
||||||
provider = get_driver(Provider.OPENSTACK_SWIFT)
|
|
||||||
swift = provider(auth_username,
|
|
||||||
auth_password,
|
|
||||||
ex_force_auth_url=auth_url,
|
|
||||||
ex_force_auth_version='3.x_password',
|
|
||||||
ex_tenant_name=project_name,
|
|
||||||
ex_force_service_region=region_name,
|
|
||||||
ex_domain_name=domain_name)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create container
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
container_name = 'fractals'
|
|
||||||
containers = swift.list_containers()
|
|
||||||
container = False
|
|
||||||
for con in containers:
|
|
||||||
if con.name == container_name:
|
|
||||||
container = con
|
|
||||||
|
|
||||||
if not container:
|
|
||||||
container = swift.create_container(container_name=container_name)
|
|
||||||
|
|
||||||
print(container)
|
|
||||||
|
|
||||||
print(swift.list_containers())
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# upload a goat
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
object_name = 'an amazing goat'
|
|
||||||
file_path = 'C:\\Users\\Sebastian\\goat.jpg'
|
|
||||||
objects = container.list_objects()
|
|
||||||
object_data = False
|
|
||||||
for obj in objects:
|
|
||||||
if obj.name == object_name:
|
|
||||||
object_data = obj
|
|
||||||
|
|
||||||
if not object_data:
|
|
||||||
# print(os.getcwd())
|
|
||||||
container = swift.get_container(container_name=container_name)
|
|
||||||
object_data = container.upload_object(file_path=file_path, object_name=object_name)
|
|
||||||
|
|
||||||
objects = container.list_objects()
|
|
||||||
print(objects)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# check goat integrity
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
import hashlib
|
|
||||||
print(hashlib.md5(open(file_path, 'rb').read()).hexdigest())
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# delete goat
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
swift.delete_object(object_data)
|
|
||||||
|
|
||||||
objects = container.list_objects()
|
|
||||||
print(objects)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,97 +0,0 @@
|
|||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import getpass
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
|
|
||||||
import libcloud
|
|
||||||
import libcloud.security
|
|
||||||
import requests
|
|
||||||
from libcloud.storage.providers import get_driver
|
|
||||||
from libcloud.storage.types import Provider
|
|
||||||
|
|
||||||
# HS-Fulda Private Cloud
|
|
||||||
auth_url = 'https://192.168.72.40:5000'
|
|
||||||
region_name = 'RegionOne'
|
|
||||||
domain_name = "hsfulda"
|
|
||||||
|
|
||||||
api_ip = '192.168.72.102'
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# get credentials
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
if "OS_PROJECT_NAME" in os.environ:
|
|
||||||
project_name = os.environ["OS_PROJECT_NAME"]
|
|
||||||
else:
|
|
||||||
project_name = input("Enter your OpenStack project:")
|
|
||||||
|
|
||||||
if "OS_USERNAME" in os.environ:
|
|
||||||
auth_username = os.environ["OS_USERNAME"]
|
|
||||||
else:
|
|
||||||
auth_username = input("Enter your OpenStack username:")
|
|
||||||
|
|
||||||
if "OS_PASSWORD" in os.environ:
|
|
||||||
auth_password = os.environ["OS_PASSWORD"]
|
|
||||||
else:
|
|
||||||
auth_password = getpass.getpass("Enter your OpenStack password:")
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create connection
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
libcloud.security.VERIFY_SSL_CERT = False
|
|
||||||
|
|
||||||
provider = get_driver(Provider.OPENSTACK_SWIFT)
|
|
||||||
swift = provider(auth_username,
|
|
||||||
auth_password,
|
|
||||||
ex_force_auth_url=auth_url,
|
|
||||||
ex_force_auth_version='3.x_password',
|
|
||||||
ex_tenant_name=project_name,
|
|
||||||
ex_force_service_region=region_name,
|
|
||||||
ex_domain_name=domain_name)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create container
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
container_name = 'fractals'
|
|
||||||
containers = swift.list_containers()
|
|
||||||
container = False
|
|
||||||
for con in containers:
|
|
||||||
if con.name == container_name:
|
|
||||||
container = con
|
|
||||||
|
|
||||||
if not container:
|
|
||||||
container = swift.create_container(container_name=container_name)
|
|
||||||
|
|
||||||
print(container)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# backup existing fractals to container
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
endpoint = 'http://' + api_ip
|
|
||||||
params = { 'results_per_page': '-1' }
|
|
||||||
response = requests.get('%s/v1/fractal' % endpoint, params=params)
|
|
||||||
data = json.loads(response.text)
|
|
||||||
for fractal in data['objects']:
|
|
||||||
response = requests.get('%s/fractal/%s' % (endpoint, fractal['uuid']), stream=True)
|
|
||||||
container.upload_object_via_stream(response.iter_content(), object_name=fractal['uuid'])
|
|
||||||
|
|
||||||
for object_data in container.list_objects():
|
|
||||||
print(object_data)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,97 +0,0 @@
|
|||||||
# import getpass
|
|
||||||
# import os
|
|
||||||
# import libcloud.security
|
|
||||||
|
|
||||||
import time
|
|
||||||
from libcloud.compute.providers import get_driver
|
|
||||||
from libcloud.compute.types import Provider
|
|
||||||
|
|
||||||
# reqs:
|
|
||||||
# services: nova, glance, neutron
|
|
||||||
# resources: 2 instances (m1.small), 2 floating ips (1 keypair, 2 security groups)
|
|
||||||
|
|
||||||
# Please use 1-29 for X in the following variable to specify your group number. (will be used for the username,
|
|
||||||
# project etc., as coordinated in the lab sessions)
|
|
||||||
|
|
||||||
group_number = 30
|
|
||||||
|
|
||||||
|
|
||||||
# web service endpoint of the private cloud infrastructure
|
|
||||||
auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
|
||||||
# your username in OpenStack
|
|
||||||
auth_username = 'CloudComp' + str(group_number)
|
|
||||||
# your project in OpenStack
|
|
||||||
project_name = 'CloudComp' + str(group_number)
|
|
||||||
|
|
||||||
|
|
||||||
# default region
|
|
||||||
region_name = 'RegionOne'
|
|
||||||
# domain to use, "default" for local accounts, "hsfulda" for RZ LDAP, e.g., using fdaiXXXX as auth_username
|
|
||||||
domain_name = "default"
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# get credentials
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# if "OS_PASSWORD" in os.environ:
|
|
||||||
# auth_password = os.environ["OS_PASSWORD"]
|
|
||||||
# else:
|
|
||||||
# auth_password = getpass.getpass("Enter your OpenStack password:")
|
|
||||||
auth_password = "demo"
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# create connection
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# libcloud.security.VERIFY_SSL_CERT = False
|
|
||||||
|
|
||||||
provider = get_driver(Provider.OPENSTACK)
|
|
||||||
conn = provider(auth_username,
|
|
||||||
auth_password,
|
|
||||||
ex_force_auth_url=auth_url,
|
|
||||||
ex_force_auth_version='3.x_password',
|
|
||||||
ex_tenant_name=project_name,
|
|
||||||
ex_force_service_region=region_name,
|
|
||||||
ex_domain_name=domain_name)
|
|
||||||
|
|
||||||
###########################################################################
|
|
||||||
#
|
|
||||||
# clean up resources from previous demos
|
|
||||||
#
|
|
||||||
###########################################################################
|
|
||||||
|
|
||||||
# destroy running demo instances
|
|
||||||
for instance in conn.list_nodes():
|
|
||||||
if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller',
|
|
||||||
'app-services', 'app-api-1', 'app-api-2']:
|
|
||||||
print('Destroying Instance: %s' % instance.name)
|
|
||||||
conn.destroy_node(instance)
|
|
||||||
|
|
||||||
# wait until all nodes are destroyed to be able to remove depended security groups
|
|
||||||
nodes_still_running = True
|
|
||||||
while nodes_still_running:
|
|
||||||
nodes_still_running = False
|
|
||||||
time.sleep(3)
|
|
||||||
instances = conn.list_nodes()
|
|
||||||
for instance in instances:
|
|
||||||
# if we see any demo instances still running continue to wait for them to stop
|
|
||||||
if instance.name in ['all-in-one', 'app-worker-1', 'app-worker-2', 'app-worker-3', 'app-controller',
|
|
||||||
'app-services', 'app-api-1', 'app-api-2']:
|
|
||||||
nodes_still_running = True
|
|
||||||
print('There are still instances running, waiting for them to be destroyed...')
|
|
||||||
|
|
||||||
# delete security groups
|
|
||||||
for group in conn.ex_list_security_groups():
|
|
||||||
if group.name in ['control', 'worker', 'api', 'services']:
|
|
||||||
print('Deleting security group: %s' % group.name)
|
|
||||||
conn.ex_delete_security_group(group)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,52 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import kombu
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
|
||||||
|
|
||||||
from faafo.worker import service as worker
|
|
||||||
from faafo import version
|
|
||||||
|
|
||||||
LOG = log.getLogger('faafo.worker')
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
# If ../faafo/__init__.py exists, add ../ to Python search path, so that
|
|
||||||
# it will override what happens to be installed in /usr/(local/)lib/python...
|
|
||||||
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
|
||||||
os.pardir,
|
|
||||||
os.pardir))
|
|
||||||
if os.path.exists(os.path.join(possible_topdir, 'faafo', '__init__.py')):
|
|
||||||
sys.path.insert(0, possible_topdir)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
log.register_options(CONF)
|
|
||||||
log.set_defaults()
|
|
||||||
|
|
||||||
CONF(project='worker', prog='faafo-worker',
|
|
||||||
default_config_files=['/etc/faafo/faafo.conf'],
|
|
||||||
version=version.version_info.version_string())
|
|
||||||
|
|
||||||
log.setup(CONF, 'worker',
|
|
||||||
version=version.version_info.version_string())
|
|
||||||
|
|
||||||
connection = kombu.Connection(CONF.transport_url)
|
|
||||||
server = worker.Worker(connection)
|
|
||||||
try:
|
|
||||||
server.run()
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
LOG.info("Caught keyboard interrupt. Exiting.")
|
|
@ -1,267 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import copy
|
|
||||||
import json
|
|
||||||
import random
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
|
||||||
from prettytable import PrettyTable
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from faafo import version
|
|
||||||
|
|
||||||
|
|
||||||
LOG = log.getLogger('faafo.client')
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
def get_random_task():
|
|
||||||
random.seed()
|
|
||||||
|
|
||||||
if CONF.command.width:
|
|
||||||
width = int(CONF.command.width)
|
|
||||||
else:
|
|
||||||
width = random.randint(int(CONF.command.min_width),
|
|
||||||
int(CONF.command.max_width))
|
|
||||||
|
|
||||||
if CONF.command.height:
|
|
||||||
height = int(CONF.command.height)
|
|
||||||
else:
|
|
||||||
height = random.randint(int(CONF.command.min_height),
|
|
||||||
int(CONF.command.max_height))
|
|
||||||
|
|
||||||
if CONF.command.iterations:
|
|
||||||
iterations = int(CONF.command.iterations)
|
|
||||||
else:
|
|
||||||
iterations = random.randint(int(CONF.command.min_iterations),
|
|
||||||
int(CONF.command.max_iterations))
|
|
||||||
|
|
||||||
if CONF.command.xa:
|
|
||||||
xa = float(CONF.command.xa)
|
|
||||||
else:
|
|
||||||
xa = random.uniform(float(CONF.command.min_xa),
|
|
||||||
float(CONF.command.max_xa))
|
|
||||||
|
|
||||||
if CONF.command.xb:
|
|
||||||
xb = float(CONF.command.xb)
|
|
||||||
else:
|
|
||||||
xb = random.uniform(float(CONF.command.min_xb),
|
|
||||||
float(CONF.command.max_xb))
|
|
||||||
|
|
||||||
if CONF.command.ya:
|
|
||||||
ya = float(CONF.command.ya)
|
|
||||||
else:
|
|
||||||
ya = random.uniform(float(CONF.command.min_ya),
|
|
||||||
float(CONF.command.max_ya))
|
|
||||||
|
|
||||||
if CONF.command.yb:
|
|
||||||
yb = float(CONF.command.yb)
|
|
||||||
else:
|
|
||||||
yb = random.uniform(float(CONF.command.min_yb),
|
|
||||||
float(CONF.command.max_yb))
|
|
||||||
|
|
||||||
task = {
|
|
||||||
'uuid': str(uuid.uuid4()),
|
|
||||||
'width': width,
|
|
||||||
'height': height,
|
|
||||||
'iterations': iterations, 'xa': xa,
|
|
||||||
'xb': xb,
|
|
||||||
'ya': ya,
|
|
||||||
'yb': yb
|
|
||||||
}
|
|
||||||
|
|
||||||
return task
|
|
||||||
|
|
||||||
|
|
||||||
def do_get_fractal():
|
|
||||||
LOG.error("command 'download' not yet implemented")
|
|
||||||
|
|
||||||
|
|
||||||
def do_show_fractal():
|
|
||||||
LOG.info("showing fractal %s" % CONF.command.uuid)
|
|
||||||
result = requests.get("%s/v1/fractal/%s" %
|
|
||||||
(CONF.endpoint_url, CONF.command.uuid))
|
|
||||||
if result.status_code == 200:
|
|
||||||
data = json.loads(result.text)
|
|
||||||
output = PrettyTable(["Parameter", "Value"])
|
|
||||||
output.align["Parameter"] = "l"
|
|
||||||
output.align["Value"] = "l"
|
|
||||||
output.add_row(["uuid", data['uuid']])
|
|
||||||
output.add_row(["duration", "%f seconds" % data['duration']])
|
|
||||||
output.add_row(["dimensions", "%d x %d pixels" %
|
|
||||||
(data['width'], data['height'])])
|
|
||||||
output.add_row(["iterations", data['iterations']])
|
|
||||||
output.add_row(["xa", data['xa']])
|
|
||||||
output.add_row(["xb", data['xb']])
|
|
||||||
output.add_row(["ya", data['ya']])
|
|
||||||
output.add_row(["yb", data['yb']])
|
|
||||||
output.add_row(["size", "%d bytes" % data['size']])
|
|
||||||
output.add_row(["checksum", data['checksum']])
|
|
||||||
output.add_row(["generated_by", data['generated_by']])
|
|
||||||
print(output)
|
|
||||||
else:
|
|
||||||
LOG.error("fractal '%s' not found" % CONF.command.uuid)
|
|
||||||
|
|
||||||
|
|
||||||
def do_list_fractals():
|
|
||||||
LOG.info("listing all fractals")
|
|
||||||
|
|
||||||
fractals = get_fractals()
|
|
||||||
output = PrettyTable(["UUID", "Dimensions", "Filesize"])
|
|
||||||
for fractal in fractals:
|
|
||||||
output.add_row([
|
|
||||||
fractal["uuid"],
|
|
||||||
"%d x %d pixels" % (fractal["width"], fractal["height"]),
|
|
||||||
"%d bytes" % (fractal["size"] or 0),
|
|
||||||
])
|
|
||||||
print(output)
|
|
||||||
|
|
||||||
|
|
||||||
def get_fractals(page=1):
|
|
||||||
result = requests.get("%s/v1/fractal?page=%d" %
|
|
||||||
(CONF.endpoint_url, page))
|
|
||||||
|
|
||||||
fractals = []
|
|
||||||
if result.status_code == 200:
|
|
||||||
data = json.loads(result.text)
|
|
||||||
if page < data['total_pages']:
|
|
||||||
fractals = data['objects'] + get_fractals(page + 1)
|
|
||||||
else:
|
|
||||||
return data['objects']
|
|
||||||
|
|
||||||
return fractals
|
|
||||||
|
|
||||||
|
|
||||||
def do_delete_fractal():
|
|
||||||
LOG.info("deleting fractal %s" % CONF.command.uuid)
|
|
||||||
result = requests.delete("%s/v1/fractal/%s" %
|
|
||||||
(CONF.endpoint_url, CONF.command.uuid))
|
|
||||||
LOG.debug("result: %s" %result)
|
|
||||||
|
|
||||||
|
|
||||||
def do_create_fractal():
|
|
||||||
random.seed()
|
|
||||||
if CONF.command.tasks:
|
|
||||||
number = int(CONF.command.tasks)
|
|
||||||
else:
|
|
||||||
number = random.randint(int(CONF.command.min_tasks),
|
|
||||||
int(CONF.command.max_tasks))
|
|
||||||
LOG.info("generating %d task(s)" % number)
|
|
||||||
for i in xrange(0, number):
|
|
||||||
task = get_random_task()
|
|
||||||
LOG.debug("created task %s" % task)
|
|
||||||
# NOTE(berendt): only necessary when using requests < 2.4.2
|
|
||||||
headers = {'Content-type': 'application/json',
|
|
||||||
'Accept': 'text/plain'}
|
|
||||||
requests.post("%s/v1/fractal" % CONF.endpoint_url,
|
|
||||||
json.dumps(task), headers=headers)
|
|
||||||
|
|
||||||
|
|
||||||
def add_command_parsers(subparsers):
|
|
||||||
parser = subparsers.add_parser('create')
|
|
||||||
parser.set_defaults(func=do_create_fractal)
|
|
||||||
parser.add_argument("--height", default=None,
|
|
||||||
help="The height of the generate image.")
|
|
||||||
parser.add_argument("--min-height", default=256,
|
|
||||||
help="The minimum height of the generate image.")
|
|
||||||
parser.add_argument("--max-height", default=1024,
|
|
||||||
help="The maximum height of the generate image.")
|
|
||||||
parser.add_argument("--width", default=None,
|
|
||||||
help="The width of the generated image.")
|
|
||||||
parser.add_argument("--min-width", default=256,
|
|
||||||
help="The minimum width of the generated image.")
|
|
||||||
parser.add_argument("--max-width", default=1024,
|
|
||||||
help="The maximum width of the generated image.")
|
|
||||||
parser.add_argument("--iterations", default=None,
|
|
||||||
help="The number of iterations.")
|
|
||||||
parser.add_argument("--min-iterations", default=128,
|
|
||||||
help="The minimum number of iterations.")
|
|
||||||
parser.add_argument("--max-iterations", default=512,
|
|
||||||
help="The maximum number of iterations.")
|
|
||||||
parser.add_argument("--tasks", default=None,
|
|
||||||
help="The number of generated fractals.")
|
|
||||||
parser.add_argument("--min-tasks", default=1,
|
|
||||||
help="The minimum number of generated fractals.")
|
|
||||||
parser.add_argument("--max-tasks", default=10,
|
|
||||||
help="The maximum number of generated fractals.")
|
|
||||||
parser.add_argument("--xa", default=None,
|
|
||||||
help="The value for the parameter 'xa'.")
|
|
||||||
parser.add_argument("--min-xa", default=-1.0,
|
|
||||||
help="The minimum value for the parameter 'xa'.")
|
|
||||||
parser.add_argument("--max-xa", default=-4.0,
|
|
||||||
help="The maximum value for the parameter 'xa'.")
|
|
||||||
parser.add_argument("--xb", default=None,
|
|
||||||
help="The value for the parameter 'xb'.")
|
|
||||||
parser.add_argument("--min-xb", default=1.0,
|
|
||||||
help="The minimum value for the parameter 'xb'.")
|
|
||||||
parser.add_argument("--max-xb", default=4.0,
|
|
||||||
help="The maximum value for the parameter 'xb'.")
|
|
||||||
parser.add_argument("--ya", default=None,
|
|
||||||
help="The value for the parameter 'ya'.")
|
|
||||||
parser.add_argument("--min-ya", default=-0.5,
|
|
||||||
help="The minimum value for the parameter 'ya'.")
|
|
||||||
parser.add_argument("--max-ya", default=-3,
|
|
||||||
help="The maximum value for the parameter 'ya'.")
|
|
||||||
parser.add_argument("--yb", default=None,
|
|
||||||
help="The value for the parameter 'yb'.")
|
|
||||||
parser.add_argument("--min-yb", default=0.5,
|
|
||||||
help="The minimum value for the parameter 'yb'.")
|
|
||||||
parser.add_argument("--max-yb", default=3,
|
|
||||||
help="The maximum value for the parameter 'yb'.")
|
|
||||||
|
|
||||||
parser = subparsers.add_parser('delete')
|
|
||||||
parser.set_defaults(func=do_delete_fractal)
|
|
||||||
parser.add_argument("uuid", help="Fractal to delete.")
|
|
||||||
|
|
||||||
parser = subparsers.add_parser('show')
|
|
||||||
parser.set_defaults(func=do_show_fractal)
|
|
||||||
parser.add_argument("uuid", help="Fractal to show.")
|
|
||||||
|
|
||||||
parser = subparsers.add_parser('get')
|
|
||||||
parser.set_defaults(func=do_get_fractal)
|
|
||||||
parser.add_argument("uuid", help="Fractal to download.")
|
|
||||||
|
|
||||||
parser = subparsers.add_parser('list')
|
|
||||||
parser.set_defaults(func=do_list_fractals)
|
|
||||||
|
|
||||||
|
|
||||||
client_commands = cfg.SubCommandOpt('command', title='Commands',
|
|
||||||
help='Show available commands.',
|
|
||||||
handler=add_command_parsers)
|
|
||||||
|
|
||||||
CONF.register_cli_opts([client_commands])
|
|
||||||
|
|
||||||
client_cli_opts = [
|
|
||||||
cfg.StrOpt('endpoint-url',
|
|
||||||
default='http://localhost',
|
|
||||||
help='API connection URL')
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF.register_cli_opts(client_cli_opts)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
log.register_options(CONF)
|
|
||||||
log.set_defaults()
|
|
||||||
|
|
||||||
CONF(project='client', prog='faafo-client',
|
|
||||||
version=version.version_info.version_string())
|
|
||||||
|
|
||||||
log.setup(CONF, 'client',
|
|
||||||
version=version.version_info.version_string())
|
|
||||||
|
|
||||||
CONF.command.func()
|
|
@ -83,13 +83,13 @@ if [[ -e /etc/os-release ]]; then
|
|||||||
|
|
||||||
if [[ $INSTALL_DATABASE -eq 1 ]]; then
|
if [[ $INSTALL_DATABASE -eq 1 ]]; then
|
||||||
if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then
|
if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then
|
||||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y mysql-server python-mysqldb
|
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y mysql-server python3-mysqldb
|
||||||
# HSFD changes for Ubuntu 18.04
|
# HSFD changes for Ubuntu 18.04
|
||||||
sudo sed -i -e "/bind-address/d" /etc/mysql/mysql.conf.d/mysqld.cnf
|
sudo sed -i -e "/bind-address/d" /etc/mysql/mysql.conf.d/mysqld.cnf
|
||||||
#sudo sed -i -e "/bind-address/d" /etc/mysql/my.cnf
|
#sudo sed -i -e "/bind-address/d" /etc/mysql/my.cnf
|
||||||
sudo service mysql restart
|
sudo service mysql restart
|
||||||
elif [[ $ID = 'fedora' ]]; then
|
elif [[ $ID = 'fedora' ]]; then
|
||||||
sudo dnf install -y mariadb-server python-mysql
|
sudo dnf install -y mariadb-server python3-mysql
|
||||||
printf "[mysqld]\nbind-address = 127.0.0.1\n" | sudo tee /etc/my.cnf.d/faafo.conf
|
printf "[mysqld]\nbind-address = 127.0.0.1\n" | sudo tee /etc/my.cnf.d/faafo.conf
|
||||||
sudo systemctl enable mariadb
|
sudo systemctl enable mariadb
|
||||||
sudo systemctl start mariadb
|
sudo systemctl start mariadb
|
||||||
@ -117,7 +117,7 @@ if [[ -e /etc/os-release ]]; then
|
|||||||
|
|
||||||
if [[ $INSTALL_FAAFO -eq 1 ]]; then
|
if [[ $INSTALL_FAAFO -eq 1 ]]; then
|
||||||
if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then
|
if [[ $ID = 'ubuntu' || $ID = 'debian' ]]; then
|
||||||
sudo apt-get install -y python-dev python-pip supervisor git zlib1g-dev libmysqlclient-dev python-mysqldb
|
sudo apt-get install -y python3-dev python3-pip supervisor git zlib1g-dev libmysqlclient-dev python3-mysqldb
|
||||||
# Following is needed because of
|
# Following is needed because of
|
||||||
# https://bugs.launchpad.net/ubuntu/+source/supervisor/+bug/1594740
|
# https://bugs.launchpad.net/ubuntu/+source/supervisor/+bug/1594740
|
||||||
if [ $(lsb_release --short --codename) = xenial ]; then
|
if [ $(lsb_release --short --codename) = xenial ]; then
|
||||||
@ -131,7 +131,7 @@ if [[ -e /etc/os-release ]]; then
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
elif [[ $ID = 'fedora' ]]; then
|
elif [[ $ID = 'fedora' ]]; then
|
||||||
sudo dnf install -y python-devel python-pip supervisor git zlib-devel mariadb-devel gcc which python-mysql
|
sudo dnf install -y python3-devel python3-pip supervisor git zlib-devel mariadb-devel gcc which python3-mysql
|
||||||
sudo systemctl enable supervisord
|
sudo systemctl enable supervisord
|
||||||
sudo systemctl start supervisord
|
sudo systemctl start supervisord
|
||||||
#elif [[ $ID = 'opensuse' || $ID = 'sles' ]]; then
|
#elif [[ $ID = 'opensuse' || $ID = 'sles' ]]; then
|
||||||
|
@ -1,56 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
|
|
||||||
url = 'http://127.0.0.1/api/fractal'
|
|
||||||
headers = {'Content-Type': 'application/json'}
|
|
||||||
|
|
||||||
uuid = '13bf15a8-9f6c-4d59-956f-7d20f7484687'
|
|
||||||
data = {
|
|
||||||
'uuid': uuid,
|
|
||||||
'width': 100,
|
|
||||||
'height': 100,
|
|
||||||
'iterations': 10,
|
|
||||||
'xa': 1.0,
|
|
||||||
'xb': -1.0,
|
|
||||||
'ya': 1.0,
|
|
||||||
'yb': -1.0,
|
|
||||||
}
|
|
||||||
response = requests.post(url, data=json.dumps(data), headers=headers)
|
|
||||||
assert response.status_code == 201
|
|
||||||
|
|
||||||
response = requests.get(url, headers=headers)
|
|
||||||
assert response.status_code == 200
|
|
||||||
print(response.json())
|
|
||||||
|
|
||||||
response = requests.get(url + '/' + uuid, headers=headers)
|
|
||||||
assert response.status_code == 200
|
|
||||||
print(response.json())
|
|
||||||
|
|
||||||
data = {
|
|
||||||
'checksum': 'c6fef4ef13a577066c2281b53c82ce2c7e94e',
|
|
||||||
'duration': 10.12
|
|
||||||
}
|
|
||||||
response = requests.put(url + '/' + uuid, data=json.dumps(data),
|
|
||||||
headers=headers)
|
|
||||||
assert response.status_code == 200
|
|
||||||
|
|
||||||
response = requests.get(url + '/' + uuid, headers=headers)
|
|
||||||
assert response.status_code == 200
|
|
||||||
print(response.json())
|
|
||||||
|
|
||||||
response = requests.delete(url + '/' + uuid, headers=headers)
|
|
||||||
assert response.status_code == 204
|
|
@ -104,7 +104,7 @@ def main():
|
|||||||
keypair_exists = True
|
keypair_exists = True
|
||||||
|
|
||||||
if keypair_exists:
|
if keypair_exists:
|
||||||
print('Keypair ' + keypair_name + ' already exists. Skipping import.')
|
print(('Keypair ' + keypair_name + ' already exists. Skipping import.'))
|
||||||
else:
|
else:
|
||||||
print('adding keypair...')
|
print('adding keypair...')
|
||||||
conn.import_key_pair_from_file(keypair_name, pub_key_file)
|
conn.import_key_pair_from_file(keypair_name, pub_key_file)
|
||||||
@ -128,7 +128,7 @@ def main():
|
|||||||
security_group_exists = True
|
security_group_exists = True
|
||||||
|
|
||||||
if security_group_exists:
|
if security_group_exists:
|
||||||
print('Security Group ' + all_in_one_security_group.name + ' already exists. Skipping creation.')
|
print(('Security Group ' + all_in_one_security_group.name + ' already exists. Skipping creation.'))
|
||||||
else:
|
else:
|
||||||
all_in_one_security_group = conn.ex_create_security_group(security_group_name,
|
all_in_one_security_group = conn.ex_create_security_group(security_group_name,
|
||||||
'network access for all-in-one application.')
|
'network access for all-in-one application.')
|
||||||
@ -159,7 +159,7 @@ def main():
|
|||||||
instance_exists = True
|
instance_exists = True
|
||||||
|
|
||||||
if instance_exists:
|
if instance_exists:
|
||||||
print('Instance ' + testing_instance.name + ' already exists. Skipping creation.')
|
print(('Instance ' + testing_instance.name + ' already exists. Skipping creation.'))
|
||||||
exit()
|
exit()
|
||||||
else:
|
else:
|
||||||
print('Starting new all-in-one instance and wait until it is running...')
|
print('Starting new all-in-one instance and wait until it is running...')
|
||||||
@ -181,12 +181,12 @@ def main():
|
|||||||
private_ip = None
|
private_ip = None
|
||||||
if len(testing_instance.private_ips):
|
if len(testing_instance.private_ips):
|
||||||
private_ip = testing_instance.private_ips[0]
|
private_ip = testing_instance.private_ips[0]
|
||||||
print('Private IP found: {}'.format(private_ip))
|
print(('Private IP found: {}'.format(private_ip)))
|
||||||
|
|
||||||
public_ip = None
|
public_ip = None
|
||||||
if len(testing_instance.public_ips):
|
if len(testing_instance.public_ips):
|
||||||
public_ip = testing_instance.public_ips[0]
|
public_ip = testing_instance.public_ips[0]
|
||||||
print('Public IP found: {}'.format(public_ip))
|
print(('Public IP found: {}'.format(public_ip)))
|
||||||
|
|
||||||
print('Checking for unused Floating IP...')
|
print('Checking for unused Floating IP...')
|
||||||
unused_floating_ip = None
|
unused_floating_ip = None
|
||||||
@ -197,11 +197,11 @@ def main():
|
|||||||
|
|
||||||
if not unused_floating_ip and len(conn.ex_list_floating_ip_pools()):
|
if not unused_floating_ip and len(conn.ex_list_floating_ip_pools()):
|
||||||
pool = conn.ex_list_floating_ip_pools()[0]
|
pool = conn.ex_list_floating_ip_pools()[0]
|
||||||
print('Allocating new Floating IP from pool: {}'.format(pool))
|
print(('Allocating new Floating IP from pool: {}'.format(pool)))
|
||||||
unused_floating_ip = pool.create_floating_ip()
|
unused_floating_ip = pool.create_floating_ip()
|
||||||
|
|
||||||
if public_ip:
|
if public_ip:
|
||||||
print('Instance ' + testing_instance.name + ' already has a public ip. Skipping attachment.')
|
print(('Instance ' + testing_instance.name + ' already has a public ip. Skipping attachment.'))
|
||||||
elif unused_floating_ip:
|
elif unused_floating_ip:
|
||||||
conn.ex_attach_floating_ip_to_node(testing_instance, unused_floating_ip)
|
conn.ex_attach_floating_ip_to_node(testing_instance, unused_floating_ip)
|
||||||
|
|
||||||
@ -214,7 +214,7 @@ def main():
|
|||||||
actual_ip_address = private_ip
|
actual_ip_address = private_ip
|
||||||
|
|
||||||
print('\n')
|
print('\n')
|
||||||
print('The Fractals app will be deployed to http://{}\n'.format(actual_ip_address))
|
print(('The Fractals app will be deployed to http://{}\n'.format(actual_ip_address)))
|
||||||
|
|
||||||
print('You can use ssh to login to the instance using your private key. Default user name for official Ubuntu\n'
|
print('You can use ssh to login to the instance using your private key. Default user name for official Ubuntu\n'
|
||||||
'Cloud Images is: ubuntu, so you can use, e.g.: "ssh -i ~/.ssh/id_rsa ubuntu@<floating-ip>" if your private\n'
|
'Cloud Images is: ubuntu, so you can use, e.g.: "ssh -i ~/.ssh/id_rsa ubuntu@<floating-ip>" if your private\n'
|
@ -1,146 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import base64
|
|
||||||
import copy
|
|
||||||
import cStringIO
|
|
||||||
from pkg_resources import resource_filename
|
|
||||||
|
|
||||||
import flask
|
|
||||||
from flask_restless import APIManager
|
|
||||||
from flask_sqlalchemy import SQLAlchemy
|
|
||||||
from flask_bootstrap import Bootstrap
|
|
||||||
from kombu import Connection
|
|
||||||
from kombu.pools import producers
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
|
||||||
from PIL import Image
|
|
||||||
from sqlalchemy.dialects import mysql
|
|
||||||
|
|
||||||
from faafo import queues
|
|
||||||
from faafo import version
|
|
||||||
|
|
||||||
LOG = log.getLogger('faafo.api')
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
api_opts = [
|
|
||||||
cfg.StrOpt('listen-address',
|
|
||||||
default='0.0.0.0',
|
|
||||||
help='Listen address.'),
|
|
||||||
cfg.IntOpt('bind-port',
|
|
||||||
default='80',
|
|
||||||
help='Bind port.'),
|
|
||||||
cfg.StrOpt('database-url',
|
|
||||||
default='sqlite:////tmp/sqlite.db',
|
|
||||||
help='Database connection URL.')
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF.register_opts(api_opts)
|
|
||||||
|
|
||||||
log.register_options(CONF)
|
|
||||||
log.set_defaults()
|
|
||||||
|
|
||||||
CONF(project='api', prog='faafo-api',
|
|
||||||
default_config_files=['/etc/faafo/faafo.conf'],
|
|
||||||
version=version.version_info.version_string())
|
|
||||||
|
|
||||||
log.setup(CONF, 'api',
|
|
||||||
version=version.version_info.version_string())
|
|
||||||
|
|
||||||
template_path = resource_filename(__name__, "templates")
|
|
||||||
app = flask.Flask('faafo.api', template_folder=template_path)
|
|
||||||
app.config['DEBUG'] = CONF.debug
|
|
||||||
app.config['SQLALCHEMY_DATABASE_URI'] = CONF.database_url
|
|
||||||
db = SQLAlchemy(app)
|
|
||||||
Bootstrap(app)
|
|
||||||
|
|
||||||
|
|
||||||
def list_opts():
|
|
||||||
"""Entry point for oslo-config-generator."""
|
|
||||||
return [(None, copy.deepcopy(api_opts))]
|
|
||||||
|
|
||||||
|
|
||||||
class Fractal(db.Model):
|
|
||||||
uuid = db.Column(db.String(36), primary_key=True)
|
|
||||||
checksum = db.Column(db.String(256), unique=True)
|
|
||||||
url = db.Column(db.String(256), nullable=True)
|
|
||||||
duration = db.Column(db.Float)
|
|
||||||
size = db.Column(db.Integer, nullable=True)
|
|
||||||
width = db.Column(db.Integer, nullable=False)
|
|
||||||
height = db.Column(db.Integer, nullable=False)
|
|
||||||
iterations = db.Column(db.Integer, nullable=False)
|
|
||||||
xa = db.Column(db.Float, nullable=False)
|
|
||||||
xb = db.Column(db.Float, nullable=False)
|
|
||||||
ya = db.Column(db.Float, nullable=False)
|
|
||||||
yb = db.Column(db.Float, nullable=False)
|
|
||||||
|
|
||||||
if CONF.database_url.startswith('mysql'):
|
|
||||||
LOG.debug('Using MySQL database backend')
|
|
||||||
image = db.Column(mysql.MEDIUMBLOB, nullable=True)
|
|
||||||
else:
|
|
||||||
image = db.Column(db.LargeBinary, nullable=True)
|
|
||||||
|
|
||||||
generated_by = db.Column(db.String(256), nullable=True)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return '<Fractal %s>' % self.uuid
|
|
||||||
|
|
||||||
|
|
||||||
db.create_all()
|
|
||||||
manager = APIManager(app, flask_sqlalchemy_db=db)
|
|
||||||
connection = Connection(CONF.transport_url)
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/', methods=['GET'])
|
|
||||||
@app.route('/index', methods=['GET'])
|
|
||||||
@app.route('/index/<int:page>', methods=['GET'])
|
|
||||||
def index(page=1):
|
|
||||||
fractals = Fractal.query.filter(
|
|
||||||
(Fractal.checksum != None) & (Fractal.size != None)).paginate( # noqa
|
|
||||||
page, 5, error_out=False)
|
|
||||||
return flask.render_template('index.html', fractals=fractals)
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/fractal/<string:fractalid>', methods=['GET'])
|
|
||||||
def get_fractal(fractalid):
|
|
||||||
fractal = Fractal.query.filter_by(uuid=fractalid).first()
|
|
||||||
if not fractal:
|
|
||||||
response = flask.jsonify({'code': 404,
|
|
||||||
'message': 'Fracal not found'})
|
|
||||||
response.status_code = 404
|
|
||||||
else:
|
|
||||||
image_data = base64.b64decode(fractal.image)
|
|
||||||
image = Image.open(cStringIO.StringIO(image_data))
|
|
||||||
output = cStringIO.StringIO()
|
|
||||||
image.save(output, "PNG")
|
|
||||||
image.seek(0)
|
|
||||||
response = flask.make_response(output.getvalue())
|
|
||||||
response.content_type = "image/png"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
def generate_fractal(**kwargs):
|
|
||||||
with producers[connection].acquire(block=True) as producer:
|
|
||||||
producer.publish(kwargs['result'],
|
|
||||||
serializer='json',
|
|
||||||
exchange=queues.task_exchange,
|
|
||||||
declare=[queues.task_exchange],
|
|
||||||
routing_key='normal')
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
manager.create_api(Fractal, methods=['GET', 'POST', 'DELETE', 'PUT'],
|
|
||||||
postprocessors={'POST': [generate_fractal]},
|
|
||||||
exclude_columns=['image'],
|
|
||||||
url_prefix='/v1')
|
|
||||||
app.run(host=CONF.listen_address, port=CONF.bind_port)
|
|
@ -1,32 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import copy
|
|
||||||
|
|
||||||
import kombu
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
task_exchange = kombu.Exchange('tasks', type='direct')
|
|
||||||
task_queue = kombu.Queue('normal', task_exchange, routing_key='normal')
|
|
||||||
|
|
||||||
queues_opts = [
|
|
||||||
cfg.StrOpt('transport-url',
|
|
||||||
default='amqp://guest:guest@localhost:5672//',
|
|
||||||
help='AMQP connection URL.')
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(queues_opts)
|
|
||||||
|
|
||||||
|
|
||||||
def list_opts():
|
|
||||||
"""Entry point for oslo-config-generator."""
|
|
||||||
return [(None, copy.deepcopy(queues_opts))]
|
|
@ -1,15 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import pbr.version
|
|
||||||
|
|
||||||
version_info = pbr.version.VersionInfo('faafo')
|
|
@ -1,29 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import setuptools
|
|
||||||
|
|
||||||
# In python < 2.7.4, a lazy loading of package `pbr` will break
|
|
||||||
# setuptools if some other modules registered functions in `atexit`.
|
|
||||||
# solution from: http://bugs.python.org/issue15881#msg170215
|
|
||||||
try:
|
|
||||||
import multiprocessing # noqa
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
setuptools.setup(
|
|
||||||
setup_requires=['pbr'],
|
|
||||||
pbr=True)
|
|
Loading…
x
Reference in New Issue
Block a user