Compare commits
10 Commits
259afec126
...
727b03dcf8
Author | SHA1 | Date | |
---|---|---|---|
![]() |
727b03dcf8 | ||
![]() |
50aa1a8519 | ||
![]() |
667d2dfe3f | ||
![]() |
64670ebd17 | ||
![]() |
9a71244f72 | ||
![]() |
bf8678c36c | ||
![]() |
51102e7f4c | ||
![]() |
b84f53b07e | ||
![]() |
9df153cfde | ||
![]() |
376d287b65 |
@@ -17,7 +17,7 @@ export GROUP_NUMBER=0
|
||||
# OpenStack API is version 3. For example, your cloud provider may implement
|
||||
# Image API v1.1, Block Storage API v2, and Compute API v2.0. OS_AUTH_URL is
|
||||
# only for the Identity API served through keystone.
|
||||
export OS_AUTH_URL=https://10.32.4.182:5000/v3
|
||||
export OS_AUTH_URL=https://10.32.4.29:5000/v3
|
||||
# With the addition of Keystone we have standardized on the term **project**
|
||||
# as the entity that owns the resources.
|
||||
#export OS_PROJECT_ID=bba62cf6bf0b447491829d207e1b05f9
|
||||
|
@@ -11,7 +11,7 @@ clouds:
|
||||
auth:
|
||||
# auth_url: https://private-cloud.example.com:5000/v3
|
||||
# auth_url: https://private-cloud2.example.com:5000/v3
|
||||
auth_url: https://10.32.4.182:5000/v3
|
||||
auth_url: https://10.32.4.29:5000/v3
|
||||
username: "CloudComp0"
|
||||
password: "demo"
|
||||
# project_id: bba62cf6bf0b447491829d207e1b05f9
|
||||
|
@@ -9,21 +9,23 @@
|
||||
# libCloud: https://libcloud.apache.org/
|
||||
# libCloud API documentation: https://libcloud.readthedocs.io/en/latest/
|
||||
# OpenStack API documentation: https://developer.openstack.org/
|
||||
#
|
||||
# this code was initially based on the former tutorial:
|
||||
# https://developer.openstack.org/firstapp-libcloud/
|
||||
|
||||
# Only needed for the password prompt:
|
||||
# Only needed when using a password prompt:
|
||||
# import getpass
|
||||
|
||||
from libcloud.compute.providers import get_driver
|
||||
from libcloud.compute.types import Provider
|
||||
|
||||
# For our new Charmed OpenStack private cloud, we need to specify the path to the
|
||||
# root CA certificate
|
||||
# root CA certificate for now, until we have a valid certificate for the OpenStack API.
|
||||
# This is necessary to avoid SSL certificate verification errors.
|
||||
import libcloud.security
|
||||
libcloud.security.CA_CERTS_PATH = ['./root-ca.crt']
|
||||
# Disable SSL certificate verification (not recommended for production)
|
||||
# libcloud.security.VERIFY_SSL_CERT = False
|
||||
#libcloud.security.VERIFY_SSL_CERT = False
|
||||
|
||||
# Please use 1-29 for 0 in the following variable to specify your group number.
|
||||
# (will be used for the username, project etc., as coordinated in the lab sessions)
|
||||
@@ -31,6 +33,7 @@ libcloud.security.CA_CERTS_PATH = ['./root-ca.crt']
|
||||
GROUP_NUMBER = 0
|
||||
|
||||
|
||||
|
||||
###############################################################################################
|
||||
#
|
||||
# no changes necessary below this line in this example
|
||||
@@ -38,9 +41,11 @@ GROUP_NUMBER = 0
|
||||
###############################################################################################
|
||||
|
||||
# web service endpoint of the private cloud infrastructure
|
||||
# auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
||||
AUTH_URL = 'https://10.32.4.182:5000'
|
||||
#AUTH_URL = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
||||
# auth_url = 'https://private-cloud2.informatik.hs-fulda.de:5000'
|
||||
# using the IP address of the OpenStack API endpoint to avoid DNS resolution issues for students
|
||||
# using Linux or MacOS as our VPN currently does not support IPv6
|
||||
AUTH_URL = "https://10.32.4.29:5000"
|
||||
# your username in OpenStack
|
||||
AUTH_USERNAME = 'CloudComp' + str(GROUP_NUMBER)
|
||||
# your project in OpenStack
|
||||
@@ -51,7 +56,7 @@ PROJECT_NETWORK = 'CloudComp' + str(GROUP_NUMBER) + '-net'
|
||||
# The image to look for and use for the started instance
|
||||
# ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image"
|
||||
#UBUNTU_IMAGE_NAME = "auto-sync/ubuntu-jammy-22.04-amd64-server-20240319-disk1.img"
|
||||
UBUNTU_IMAGE_NAME = "ubuntu-22.04-jammy-x86_64"
|
||||
UBUNTU_IMAGE_NAME = "ubuntu-22.04-jammy-server-cloud-image-amd64"
|
||||
|
||||
# default region
|
||||
REGION_NAME = 'RegionOne'
|
@@ -26,6 +26,7 @@ libcloud.security.CA_CERTS_PATH = ['./root-ca.crt']
|
||||
# Please use 1-29 as environment variable GROUP_NUMBER to specify your group number.
|
||||
# (will be used for the username, project etc., as coordinated in the lab sessions)
|
||||
|
||||
# get the GROUP_NUMBER from an env var, was only hard-coded as easy entry in demo1
|
||||
group_number = os.environ.get('GROUP_NUMBER')
|
||||
if group_number is None:
|
||||
sys.exit('Please set the GROUP_NUMBER environment variable to your group number,\n'
|
||||
@@ -37,8 +38,8 @@ if group_number is None:
|
||||
|
||||
# web service endpoint of the private cloud infrastructure
|
||||
# auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
||||
AUTH_URL = 'https://10.32.4.182:5000'
|
||||
# auth_url = 'https://private-cloud2.informatik.hs-fulda.de:5000'
|
||||
AUTH_URL = 'https://10.32.4.29:5000'
|
||||
# your username in OpenStack
|
||||
AUTH_USERNAME = 'CloudComp' + str(group_number)
|
||||
print(f'Using username: {AUTH_USERNAME}\n')
|
||||
@@ -50,7 +51,7 @@ PROJECT_NETWORK = 'CloudComp' + str(group_number) + '-net'
|
||||
# The image to look for and use for the started instance
|
||||
# ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image"
|
||||
#UBUNTU_IMAGE_NAME = "auto-sync/ubuntu-jammy-22.04-amd64-server-20240319-disk1.img"
|
||||
UBUNTU_IMAGE_NAME = "ubuntu-22.04-jammy-x86_64"
|
||||
UBUNTU_IMAGE_NAME = "ubuntu-22.04-jammy-server-cloud-image-amd64"
|
||||
|
||||
# The public key to be used for SSH connection, please make sure, that you have the
|
||||
# corresponding private key
|
||||
@@ -175,9 +176,8 @@ def main(): # noqa: C901 pylint: disable=too-many-branches,too-many-statements,
|
||||
#
|
||||
###########################################################################
|
||||
|
||||
hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install.sh' # noqa: E501 pylint: disable=line-too-long
|
||||
# testing / faafo dev branch:
|
||||
# hsfd_faafo_cloud_init_script = 'https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/branch/dev_faafo/faafo/contrib/install.sh' # noqa: E501 pylint: disable=line-too-long
|
||||
# new repo on git-ce.rwth-aachen.de:
|
||||
hsfd_faafo_cloud_init_script = 'https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/-/raw/master/faafo/contrib/install.sh'
|
||||
|
||||
userdata = '#!/usr/bin/env bash\n' \
|
||||
f'curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- ' \
|
@@ -35,8 +35,8 @@ if group_number is None:
|
||||
|
||||
# web service endpoint of the private cloud infrastructure
|
||||
# auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
||||
AUTH_URL = 'https://10.32.4.182:5000'
|
||||
# auth_url = 'https://private-cloud2.informatik.hs-fulda.de:5000'
|
||||
AUTH_URL = 'https://10.32.4.29:5000'
|
||||
# your username in OpenStack
|
||||
AUTH_USERNAME = 'CloudComp' + str(group_number)
|
||||
print(f'Using username: {AUTH_USERNAME}\n')
|
||||
@@ -48,7 +48,7 @@ PROJECT_NETWORK = 'CloudComp' + str(group_number) + '-net'
|
||||
# The image to look for and use for the started instance
|
||||
# ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image"
|
||||
#UBUNTU_IMAGE_NAME = "auto-sync/ubuntu-jammy-22.04-amd64-server-20240319-disk1.img"
|
||||
UBUNTU_IMAGE_NAME = "ubuntu-22.04-jammy-x86_64"
|
||||
UBUNTU_IMAGE_NAME = "ubuntu-22.04-jammy-server-cloud-image-amd64"
|
||||
|
||||
# The public key to be used for SSH connection, please make sure, that you have the
|
||||
# corresponding private key
|
||||
@@ -192,9 +192,11 @@ def main(): # noqa: C901 pylint: disable=too-many-branches,too-many-statements,
|
||||
#
|
||||
###########################################################################
|
||||
|
||||
# new repo on git-ce.rwth-aachen.de:
|
||||
hsfd_faafo_cloud_init_script = 'https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/-/raw/master/faafo/contrib/install.sh'
|
||||
|
||||
userdata = '#!/usr/bin/env bash\n' \
|
||||
'curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-' \
|
||||
'examples/raw/master/faafo/contrib/install.sh | bash -s -- ' \
|
||||
f'curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- ' \
|
||||
'-i messaging -i faafo -r api\n'
|
||||
print('\nUsing cloud-init userdata for controller:\n"' + userdata + '"\n')
|
||||
|
||||
@@ -252,8 +254,7 @@ def main(): # noqa: C901 pylint: disable=too-many-branches,too-many-statements,
|
||||
###########################################################################
|
||||
|
||||
userdata = '#!/usr/bin/env bash\n' \
|
||||
'curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-' \
|
||||
'examples/raw/master/faafo/contrib/install.sh | bash -s -- ' \
|
||||
f'curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- ' \
|
||||
f'-i faafo -r worker -e "http://{ip_controller}" -m "amqp://faafo:guest@' \
|
||||
f'{ip_controller}:5672/"\n'
|
||||
print('\nUsing cloud-init userdata for worker:\n"' + userdata + '"\n')
|
@@ -35,8 +35,8 @@ if group_number is None:
|
||||
|
||||
# web service endpoint of the private cloud infrastructure
|
||||
# auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
||||
AUTH_URL = 'https://10.32.4.182:5000'
|
||||
# auth_url = 'https://private-cloud2.informatik.hs-fulda.de:5000'
|
||||
AUTH_URL = 'https://10.32.4.29:5000'
|
||||
# your username in OpenStack
|
||||
AUTH_USERNAME = 'CloudComp' + str(group_number)
|
||||
print(f'Using username: {AUTH_USERNAME}\n')
|
||||
@@ -48,7 +48,7 @@ PROJECT_NETWORK = 'CloudComp' + str(group_number) + '-net'
|
||||
# The image to look for and use for the started instance
|
||||
# ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image"
|
||||
#UBUNTU_IMAGE_NAME = "auto-sync/ubuntu-jammy-22.04-amd64-server-20240319-disk1.img"
|
||||
UBUNTU_IMAGE_NAME = "ubuntu-22.04-jammy-x86_64"
|
||||
UBUNTU_IMAGE_NAME = "ubuntu-22.04-jammy-server-cloud-image-amd64"
|
||||
|
||||
# The public key to be used for SSH connection, please make sure, that you have the
|
||||
# corresponding private key
|
||||
@@ -250,9 +250,11 @@ def main(): # noqa: C901 pylint: disable=too-many-branches,too-many-statements,
|
||||
#
|
||||
###########################################################################
|
||||
|
||||
# new repo on git-ce.rwth-aachen.de:
|
||||
hsfd_faafo_cloud_init_script = 'https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/-/raw/master/faafo/contrib/install.sh'
|
||||
|
||||
userdata_service = '#!/usr/bin/env bash\n' \
|
||||
'curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-' \
|
||||
'examples/raw/master/faafo/contrib/install.sh | bash -s -- ' \
|
||||
f'curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- ' \
|
||||
'-i database -i messaging\n'
|
||||
print('\nUsing cloud-init userdata for service:\n"' + userdata_service + '"\n')
|
||||
|
||||
@@ -275,9 +277,7 @@ def main(): # noqa: C901 pylint: disable=too-many-branches,too-many-statements,
|
||||
###########################################################################
|
||||
|
||||
userdata_api = '#!/usr/bin/env bash\n' \
|
||||
'curl -L -s https://gogs.informatik.hs-fulda.de/srieger/' \
|
||||
'cloud-computing-msc-ai-examples/raw/master/faafo/contrib/' \
|
||||
'install.sh | bash -s -- ' \
|
||||
f'curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- ' \
|
||||
f'-i faafo -r api -m "amqp://faafo:guest@{services_ip}:5672/" ' \
|
||||
f'-d "mysql+pymysql://faafo:password@{services_ip}:3306/faafo"'
|
||||
print('\nUsing cloud-init userdata for api:\n"' + userdata_api + '"\n')
|
||||
@@ -320,9 +320,7 @@ def main(): # noqa: C901 pylint: disable=too-many-branches,too-many-statements,
|
||||
###########################################################################
|
||||
|
||||
userdata_worker = '#!/usr/bin/env bash\n' \
|
||||
'curl -L -s https://gogs.informatik.hs-fulda.de/srieger/' \
|
||||
'cloud-computing-msc-ai-examples/raw/master/faafo/contrib/' \
|
||||
'install.sh | bash -s -- ' \
|
||||
f'curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- ' \
|
||||
f'-i faafo -r worker -e "http://{api_1_ip}" '\
|
||||
f'-m "amqp://faafo:guest@{services_ip}:5672/"'
|
||||
print('\nUsing cloud-init userdata for worker:\n"' + userdata_worker + '"\n')
|
@@ -33,8 +33,8 @@ if group_number is None:
|
||||
|
||||
# web service endpoint of the private cloud infrastructure
|
||||
# auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
||||
AUTH_URL = 'https://10.32.4.182:5000'
|
||||
# auth_url = 'https://private-cloud2.informatik.hs-fulda.de:5000'
|
||||
AUTH_URL = 'https://10.32.4.29:5000'
|
||||
# your username in OpenStack
|
||||
AUTH_USERNAME = 'CloudComp' + str(group_number)
|
||||
print(f'Using username: {AUTH_USERNAME}\n')
|
||||
@@ -46,7 +46,7 @@ PROJECT_NETWORK = 'CloudComp' + str(group_number) + '-net'
|
||||
# The image to look for and use for the started instance
|
||||
# ubuntu_image_name = "Ubuntu 18.04 - Bionic Beaver - 64-bit - Cloud Based Image"
|
||||
#UBUNTU_IMAGE_NAME = "auto-sync/ubuntu-jammy-22.04-amd64-server-20240319-disk1.img"
|
||||
UBUNTU_IMAGE_NAME = "ubuntu-22.04-jammy-x86_64"
|
||||
UBUNTU_IMAGE_NAME = "ubuntu-22.04-jammy-server-cloud-image-amd64"
|
||||
|
||||
# The public key to be used for SSH connection, please make sure, that you have the
|
||||
# corresponding private key
|
||||
@@ -189,10 +189,11 @@ def main(): # noqa: C901 pylint: disable=too-many-branches,too-many-statements,
|
||||
#
|
||||
###########################################################################
|
||||
|
||||
# new repo on git-ce.rwth-aachen.de:
|
||||
hsfd_faafo_cloud_init_script = 'https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/-/raw/master/faafo/contrib/install.sh'
|
||||
|
||||
userdata_worker = '#!/usr/bin/env bash\n' \
|
||||
'curl -L -s https://gogs.informatik.hs-fulda.de/srieger/' \
|
||||
'cloud-computing-msc-ai-examples/raw/master/faafo/contrib/' \
|
||||
'install.sh | bash -s -- ' \
|
||||
f'curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- ' \
|
||||
f'-i faafo -r worker -e "http://{api_1_ip}" '\
|
||||
f'-m "amqp://faafo:guest@{services_ip}:5672/"'
|
||||
print('\nUsing cloud-init userdata for worker:\n"' + userdata_worker + '"\n')
|
@@ -39,7 +39,7 @@ if group_number is None:
|
||||
|
||||
# web service endpoint of the private cloud infrastructure
|
||||
# auth_url = 'https://private-cloud.informatik.hs-fulda.de:5000'
|
||||
AUTH_URL = 'https://10.32.4.182:5000'
|
||||
AUTH_URL = 'https://10.32.4.29:5000'
|
||||
# auth_url = 'https://private-cloud2.informatik.hs-fulda.de:5000'
|
||||
# your username in OpenStack
|
||||
AUTH_USERNAME = 'CloudComp' + str(group_number)
|
@@ -68,7 +68,7 @@
|
||||
"VERTSYS_PATH=\"$SCRIPT_ROOT_PATH/verteilte-systeme-bsc-ai-examples/VerteilteSysteme-Examples/build/\"\n",
|
||||
"#JARS = \"TCPServer.jar TCPServerMulti.jar UDPServer.jar UDPServerMulti.jar UDPTimeCounterServer.jar TCPTimeCounterServer.jar TCPPerfServer.jar\"\n",
|
||||
"JARS=\"TCPServer.jar TCPPerfServer.jar UDPServer.jar UDPTimeCounterServer.jar TCPTimeCounterServer.jar TCPTimeCounterRESTServer.jar\"\n",
|
||||
"REPO=\"https://gogs.informatik.hs-fulda.de/srieger/verteilte-systeme-bsc-ai-examples.git\"\n",
|
||||
"REPO=\"https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples.git\"\n",
|
||||
"\n",
|
||||
"# Create path to run the script\n",
|
||||
"mkdir $SCRIPT_ROOT_PATH\n",
|
||||
|
@@ -79,7 +79,7 @@
|
||||
"VERTSYS_PATH=\"$SCRIPT_ROOT_PATH/verteilte-systeme-bsc-ai-examples/VerteilteSysteme-Examples/build/\"\n",
|
||||
"#JARS = \"TCPServer.jar TCPServerMulti.jar UDPServer.jar UDPServerMulti.jar UDPTimeCounterServer.jar TCPTimeCounterServer.jar TCPPerfServer.jar\"\n",
|
||||
"JARS=\"TCPServer.jar TCPPerfServer.jar UDPServer.jar UDPTimeCounterServer.jar TCPTimeCounterServer.jar TCPTimeCounterRESTServer.jar\"\n",
|
||||
"REPO=\"https://gogs.informatik.hs-fulda.de/srieger/verteilte-systeme-bsc-ai-examples.git\"\n",
|
||||
"REPO=\"https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples.git\"\n",
|
||||
"\n",
|
||||
"# Create path to run the script\n",
|
||||
"mkdir $SCRIPT_ROOT_PATH\n",
|
||||
|
@@ -131,7 +131,10 @@ print("Lambda Function and S3 Bucket to store the counter are available.\n"
|
||||
"Try to understand how Lambda can be used to cut costs regarding cloud services and what its pros\n"
|
||||
"and cons are.\n")
|
||||
|
||||
# sadly, AWS Academy Labs don't allow API gateways
|
||||
# API gateways require a Pro license from localstack, as a student you can apply for
|
||||
# a hobby/open source dev license, that's the same license that I used for the demo in the
|
||||
# lab/lecture
|
||||
|
||||
# API gateway would allow getting an HTTP endpoint that we could access directly in the browser,
|
||||
# that would call our function, as in the provided demo:
|
||||
#
|
||||
|
@@ -28,6 +28,8 @@ functionName = 'cloudcomp-counter-lambda-demo'
|
||||
# see ARN for AWS Academy LabRole function here:
|
||||
# https://us-east-1.console.aws.amazon.com/iamv2/home?region=us-east-1#/roles/details/LabRole?section=permissions
|
||||
#
|
||||
# e.g.: (309000625112, 919927306708, 488766701848 would in your case be your AWS Account ID, see Lab Details)
|
||||
#
|
||||
# roleArn = 'arn:aws:iam::309000625112:role/service-role/cloudcomp-counter-demo-role-6rs7pah3'
|
||||
# roleArn = 'arn:aws:iam::919927306708:role/cloudcomp-s3-access'
|
||||
# roleArn = 'arn:aws:iam::488766701848:role/LabRole'
|
||||
@@ -68,6 +70,16 @@ for role in response["Roles"]:
|
||||
roleArn = role["Arn"]
|
||||
print(roleArn)
|
||||
|
||||
print("Searching for old API gateway...")
|
||||
print("------------------------------------")
|
||||
for api in apiClient.get_apis()["Items"]:
|
||||
if api["Name"] == functionName + '-api':
|
||||
print("Deleting old API gateway...")
|
||||
print("------------------------------------")
|
||||
response = apiClient.delete_api(
|
||||
ApiId=api["ApiId"],
|
||||
)
|
||||
|
||||
print("Deleting old function...")
|
||||
print("------------------------------------")
|
||||
try:
|
||||
@@ -84,8 +96,8 @@ try:
|
||||
currentBucket = s3Resource.Bucket(globallyUniqueS3GroupBucketName)
|
||||
cleanup_s3_bucket(currentBucket)
|
||||
currentBucket.delete()
|
||||
except ClientError as e:
|
||||
print(e)
|
||||
except s3Client.exceptions.NoSuchBucket:
|
||||
print('Bucket not available. No need to delete it.')
|
||||
|
||||
print("creating S3 bucket (must be globally unique)...")
|
||||
print("------------------------------------")
|
||||
@@ -124,11 +136,25 @@ with open('lambda-deployment-archive.zip', mode='rb') as file:
|
||||
)
|
||||
lambdaFunctionARN = response['FunctionArn']
|
||||
|
||||
print("Lambda Function and S3 Bucket to store the counter are available. Sadly, AWS Academy labs do not allow\n"
|
||||
"creating an API gateway to be able to access the Lambda function directly via HTTP from the browser, as\n"
|
||||
"shown in https://348yxdily0.execute-api.eu-central-1.amazonaws.com/default/cloudcomp-counter-demo.\n"
|
||||
# API gateway to get an HTTP endpoint that we can access directly in the browser,
|
||||
# which will call our function, as in the provided demo:
|
||||
# https://348yxdily0.execute-api.eu-central-1.amazonaws.com/default/cloudcomp-counter-demo
|
||||
|
||||
print("creating API gateway...")
|
||||
print("------------------------------------")
|
||||
|
||||
response = apiClient.create_api(
|
||||
Name=functionName + '-api',
|
||||
ProtocolType='HTTP',
|
||||
Target=lambdaFunctionARN,
|
||||
CredentialsArn=roleArn
|
||||
)
|
||||
|
||||
print("Lambda Function and S3 Bucket to store the counter are created.\n"
|
||||
"\n"
|
||||
"However you can now run invoke-function.py to view an increment the counter. You can also use \n"
|
||||
"You can access the API gateway and increment the counter using the created Lambda function\n"
|
||||
"at: " + response["ApiEndpoint"] + " \n"
|
||||
"You can also run invoke-function.py to view an increment the counter. You can also use \n"
|
||||
"the test button in the Lambda AWS console. In this case you need to send the content\n"
|
||||
"\n"
|
||||
"{\n"
|
||||
@@ -137,19 +163,4 @@ print("Lambda Function and S3 Bucket to store the counter are available. Sadly,
|
||||
"\n"
|
||||
"to increment the counter by 1.\n"
|
||||
"Try to understand how Lambda can be used to cut costs regarding cloud services and what its pros\n"
|
||||
"and cons are.\n")
|
||||
|
||||
# sadly, AWS Academy Labs don't allow API gateways
|
||||
# API gateway would allow getting an HTTP endpoint that we could access directly in the browser,
|
||||
# that would call our function, as in the provided demo:
|
||||
#
|
||||
# https://348yxdily0.execute-api.eu-central-1.amazonaws.com/default/cloudcomp-counter-demo
|
||||
#
|
||||
# print("creating API gateway...")
|
||||
# print("------------------------------------")
|
||||
#
|
||||
# response = apiClient.create_api(
|
||||
# Name=functionName + '-api',
|
||||
# ProtocolType='HTTP',
|
||||
# Target=lambdaFunctionARN
|
||||
# )
|
||||
"and cons are.\n")
|
@@ -36,8 +36,19 @@ client = boto3.setup_default_session(region_name=region)
|
||||
s3Client = boto3.client('s3')
|
||||
s3Resource = boto3.resource('s3')
|
||||
lClient = boto3.client('lambda')
|
||||
apiClient = boto3.client("apigatewayv2")
|
||||
|
||||
|
||||
print("Searching for old API gateway...")
|
||||
print("------------------------------------")
|
||||
for api in apiClient.get_apis()["Items"]:
|
||||
if api["Name"] == functionName + '-api':
|
||||
print("Deleting old API gateway...")
|
||||
print("------------------------------------")
|
||||
response = apiClient.delete_api(
|
||||
ApiId=api["ApiId"],
|
||||
)
|
||||
|
||||
print("Deleting old function...")
|
||||
print("------------------------------------")
|
||||
try:
|
||||
@@ -54,5 +65,5 @@ try:
|
||||
currentBucket = s3Resource.Bucket(globallyUniqueS3GroupBucketName)
|
||||
cleanup_s3_bucket(currentBucket)
|
||||
currentBucket.delete()
|
||||
except ClientError as e:
|
||||
print(e)
|
||||
except s3Client.exceptions.NoSuchBucket:
|
||||
print('Bucket not available. No need to delete it.')
|
||||
|
@@ -203,8 +203,11 @@ def main():
|
||||
#
|
||||
# Thanks to Stefan Friedmann for finding this fix ;)
|
||||
|
||||
userdata = '''#!/usr/bin/env bash
|
||||
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install-aws.sh | bash -s -- \
|
||||
# new repo on git-ce.rwth-aachen.de:
|
||||
hsfd_faafo_cloud_init_script = 'https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/-/raw/master/faafo/contrib/install-aws.sh'
|
||||
|
||||
userdata = f'''#!/usr/bin/env bash
|
||||
curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- \
|
||||
-i messaging -i faafo -r api
|
||||
rabbitmqctl add_user faafo guest
|
||||
rabbitmqctl set_user_tags faafo administrator
|
||||
@@ -268,8 +271,8 @@ def main():
|
||||
#
|
||||
###########################################################################
|
||||
|
||||
userdata = '''#!/usr/bin/env bash
|
||||
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install-aws.sh | bash -s -- \
|
||||
userdata = f'''#!/usr/bin/env bash
|
||||
curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- \
|
||||
-i faafo -r worker -e 'http://%(ip_controller)s' -m 'amqp://faafo:guest@%(ip_controller)s:5672/'
|
||||
''' % {'ip_controller': private_ip_controller}
|
||||
|
||||
|
@@ -286,8 +286,11 @@ def main():
|
||||
#
|
||||
# Thanks to Stefan Friedmann for finding this fix ;)
|
||||
|
||||
userdata_service = '''#!/usr/bin/env bash
|
||||
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install-aws.sh | bash -s -- \
|
||||
# new repo on git-ce.rwth-aachen.de:
|
||||
hsfd_faafo_cloud_init_script = 'https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/-/raw/master/faafo/contrib/install-aws.sh'
|
||||
|
||||
userdata_service = f'''#!/usr/bin/env bash
|
||||
curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- \
|
||||
-i database -i messaging
|
||||
rabbitmqctl add_user faafo guest
|
||||
rabbitmqctl set_user_tags faafo administrator
|
||||
@@ -312,8 +315,8 @@ def main():
|
||||
#
|
||||
###########################################################################
|
||||
|
||||
userdata_api = '''#!/usr/bin/env bash
|
||||
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install-aws.sh | bash -s -- \
|
||||
userdata_api = f'''#!/usr/bin/env bash
|
||||
curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- \
|
||||
-i faafo -r api -m 'amqp://faafo:guest@%(services_ip)s:5672/' \
|
||||
-d 'mysql+pymysql://faafo:password@%(services_ip)s:3306/faafo'
|
||||
''' % {'services_ip': services_ip}
|
||||
@@ -350,13 +353,13 @@ def main():
|
||||
#
|
||||
###########################################################################
|
||||
|
||||
userdata_worker = '''#!/usr/bin/env bash
|
||||
curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install-aws.sh | bash -s -- \
|
||||
userdata_worker = f'''#!/usr/bin/env bash
|
||||
curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- \
|
||||
-i faafo -r worker -e 'http://%(api_1_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/'
|
||||
''' % {'api_1_ip': api_1_ip, 'services_ip': services_ip}
|
||||
|
||||
# userdata_api-api-2 = '''#!/usr/bin/env bash
|
||||
# curl -L -s https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/faafo/contrib/install-aws.sh | bash -s -- \
|
||||
# userdata_api-api-2 = f'''#!/usr/bin/env bash
|
||||
# curl -L -s {hsfd_faafo_cloud_init_script} | bash -s -- \
|
||||
# -i faafo -r worker -e 'http://%(api_2_ip)s' -m 'amqp://faafo:guest@%(services_ip)s:5672/'
|
||||
# ''' % {'api_2_ip': api_2_ip, 'services_ip': services_ip}
|
||||
|
||||
|
@@ -200,9 +200,9 @@ userDataWebServer = ('#!/bin/bash\n'
|
||||
# 'cp tug-of-war-in-the-clouds.tar.gz /var/www/html/\n'
|
||||
# 'tar zxvf tug-of-war-in-the-clouds.tar.gz\n'
|
||||
'cd /var/www/html\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/index.php\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/cloud.php\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/config.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/index.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/cloud.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/config.php\n'
|
||||
'\n'
|
||||
'# change hostname of db connection\n'
|
||||
'sed -i s/localhost/' + dbEndpointAddress + '/g /var/www/html/config.php\n'
|
||||
|
@@ -266,9 +266,9 @@ userDataWebServer = ('#!/bin/bash\n'
|
||||
# 'cp tug-of-war-in-the-clouds.tar.gz /var/www/html/\n'
|
||||
# 'tar zxvf tug-of-war-in-the-clouds.tar.gz\n'
|
||||
'cd /var/www/html\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/index.php\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/cloud.php\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/config.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/index.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/cloud.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/config.php\n'
|
||||
'\n'
|
||||
'# change hostname of db connection\n'
|
||||
'sed -i s/localhost/' + privateIpDB + '/g /var/www/html/config.php\n'
|
||||
|
@@ -80,9 +80,9 @@ userDataWebServer = ('#!/bin/bash\n'
|
||||
# 'cp tug-of-war-in-the-clouds.tar.gz /var/www/html/\n'
|
||||
# 'tar zxvf tug-of-war-in-the-clouds.tar.gz\n'
|
||||
'cd /var/www/html\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/index.php\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/cloud.php\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/config.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/index.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examplesraw/master/example-projects/tug-of-war-in-the-clouds/web-content/cloud.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/config.php\n'
|
||||
'\n'
|
||||
'# change hostname of db connection\n'
|
||||
'sed -i s/localhost/' + privateIpDB + '/g /var/www/html/config.php\n'
|
||||
|
@@ -190,9 +190,9 @@ userDataWebServer = ('#!/bin/bash\n'
|
||||
# 'cp tug-of-war-in-the-clouds.tar.gz /var/www/html/\n'
|
||||
# 'tar zxvf tug-of-war-in-the-clouds.tar.gz\n'
|
||||
'cd /var/www/html\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/index.php\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/cloud.php\n'
|
||||
'wget https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/config.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/index.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/cloud.php\n'
|
||||
'wget https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples/raw/master/example-projects/tug-of-war-in-the-clouds/web-content/config.php\n'
|
||||
'\n'
|
||||
'# change hostname of db connection\n'
|
||||
'sed -i s/localhost/' + privateIpDB + '/g /var/www/html/config.php\n'
|
||||
|
@@ -151,8 +151,8 @@ if [[ -e /etc/os-release ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# HSFD changed to local repo
|
||||
git clone https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples
|
||||
# HSFD changed to git-ce.rwth-aachen.de repo
|
||||
git clone https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples.git
|
||||
cd cloud-computing-msc-ai-examples/faafo
|
||||
# following line required by bug 1636150
|
||||
sudo pip3 install --upgrade pbr
|
||||
|
@@ -151,9 +151,8 @@ if [[ -e /etc/os-release ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# HSFD changed to local repo
|
||||
#git clone https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples
|
||||
git clone https://gogs.informatik.hs-fulda.de/srieger/cloud-computing-msc-ai-examples
|
||||
# HSFD changed to git-ce.rwth-aachen.de repo
|
||||
git clone https://git-ce.rwth-aachen.de/sebastian.rieger/cloud-computing-msc-ai-examples.git
|
||||
cd cloud-computing-msc-ai-examples/faafo
|
||||
# following line required by bug 1636150
|
||||
sudo pip install --upgrade pbr
|
||||
|
59
root-ca.crt
59
root-ca.crt
@@ -1,21 +1,46 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUEQjSqiZ86fhawQU09G8hn3i9dIwwDQYJKoZIhvcNAQEL
|
||||
MIIDazCCAlOgAwIBAgIUNuL4xjm+DmLpQ8+XdaFLF6WCeXswDQYJKoZIhvcNAQEL
|
||||
BQAwPTE7MDkGA1UEAxMyVmF1bHQgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkg
|
||||
KGNoYXJtLXBraS1sb2NhbCkwHhcNMjQwMzI1MTMxNDU0WhcNMzQwMzIzMTIxNTIz
|
||||
KGNoYXJtLXBraS1sb2NhbCkwHhcNMjUwNDExMTk1MDUzWhcNMzUwNDA5MTg1MTIz
|
||||
WjA9MTswOQYDVQQDEzJWYXVsdCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAo
|
||||
Y2hhcm0tcGtpLWxvY2FsKTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
|
||||
ALcax5l2zU1ELCtF/k6yq3HUv7gFq6f/sl8rX0VRhzZyEy4hcMgRSZDAM14viTVZ
|
||||
8d7+ptY3+GwuLSpEY2UUlX5kJSDb4pUNRXDhxzRatbByG8pr5FQE8pX9W7y4C0TU
|
||||
3PQA4uIjAsPFKayFxXjJjOQN0HX3K6MCQz/BTV81U3fmdFrKma3x/PXyUYndjQH6
|
||||
zlIiQSdYh7FMTbS2FlpvwWbT9zKOpp+2M0odI8Y8fjCSUdSdKDFhVu02zQTq6/h0
|
||||
Q1/sNHz2IP9F83sNW+ro0bvv5CJ2iCyAk/RiFoB+RoSO6HncOtYHxa/guwTy4eHh
|
||||
VQVJXkEI2PutCw6S3lWqLEcCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
|
||||
EwEB/wQFMAMBAf8wHQYDVR0OBBYEFDKQHfpVHJgge6RRC5uDwMByLbV3MB8GA1Ud
|
||||
IwQYMBaAFDKQHfpVHJgge6RRC5uDwMByLbV3MA0GCSqGSIb3DQEBCwUAA4IBAQCa
|
||||
ajIRi/+7Yy7l46yFURLyELMWayRpdx2KCxIuAiSkTlNSVOPCmwZvgnYLPVffXWpt
|
||||
IXJGQk//9+5q18LiZat5MbvUU3ffLc/ZCxIeQiWNuYKziLYNFHmpfMvxNxzAJ6Pi
|
||||
2fj5ZP/cA4Vie3M1iHfdSXmYPvyw76i9/sA2+F7Wy8fzK53S1OaMaeADNGljHTaW
|
||||
ovRxreLKJZybqe/YWlcEiP4dC4VMHLl+H5RmZ5ojrRiy1c3uUssNnIJU+ilkY8TP
|
||||
0VV17+wQBaJbbp4jh8acwvOJbN8Y1EHQWhxkEf3PfjJRv+b1NI/Iai27DfYto7Dm
|
||||
rZvaFnAMCcyFXyJv3WdJ
|
||||
-----END CERTIFICATE-----
|
||||
ANNZ/hPUFFUeiDX76gBYIKnpukB2Ebw0CAXlUnteHjLUCXyzvx+47C5RL4z9kJfv
|
||||
SpCVbMwYAw6/0xLvOCIsuPHMR+HMqVHnRhKRPeA0uuWycXs5v0qUK0tcfufA4I+v
|
||||
NhS5TDngfdScWr/HjhWdKxtZuTNI66nYDjluLfin2sjgwkKuOiE+/amReK7XYAnH
|
||||
zfkCi7BB2QcdEupvq8yqVh9N+KsoXAAdGmyfNIfSYWRbeSa+EQhE0FMDdx8UPO7+
|
||||
X/aFvylZVEHPeXX6/DfeELlxNYzWWWUeVEy3vYH1LRoDFB14mimx2QcuqL+gtIot
|
||||
c7NPkFna+b1Zoa6EYlvXkl0CAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
|
||||
EwEB/wQFMAMBAf8wHQYDVR0OBBYEFCwXMS3Ia0vMYuMPBQkE7w47NhSZMB8GA1Ud
|
||||
IwQYMBaAFCwXMS3Ia0vMYuMPBQkE7w47NhSZMA0GCSqGSIb3DQEBCwUAA4IBAQCg
|
||||
pQbydWUkeIcvCHxJrNh2Y8USW/jVWjARZ7/5bkSDM5hl7+DX24+m6k+Dvm9bf0ta
|
||||
c/6PLEByozjbR8IEqsonzawOb404opQVDujdrY65wUMkyIcBNA04R3a6RuoGndse
|
||||
CAK2Du60CW6XXSvrgRO/7gcQajs5B0NOykrubDb58JdaR3weIinhrmsr+0I9zqtv
|
||||
sLFrWlgXmelVhW3Sa7gFVbFehYdFy/3OCTFsrX9yIJjDL8u+cZdEFI/Vp0SHIWHO
|
||||
l2lqvX0E/VieM0D6FiIO/oVtD4zE+2162DtNg7iSyrYVnTjRK0wXIVMAXchJ2H6L
|
||||
mx+DEn45qORcxFOzj1QB
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
|
||||
MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
|
||||
GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
|
||||
YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
|
||||
MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
|
||||
BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
|
||||
GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
|
||||
ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
|
||||
BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
|
||||
3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
|
||||
YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
|
||||
rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
|
||||
ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
|
||||
oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
|
||||
MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
|
||||
QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
|
||||
b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
|
||||
AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
|
||||
GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
|
||||
Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
|
||||
G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
|
||||
l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
|
||||
smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
|
||||
-----END CERTIFICATE-----
|
||||
|
11
terraform/K3S/README.MD
Normal file
11
terraform/K3S/README.MD
Normal file
@@ -0,0 +1,11 @@
|
||||
## K3S using terraform and openstack
|
||||
|
||||
Using these scripts a K3S culster will be installed in the openstack cluster.
|
||||
To change the values such as node count, DNS, PV size please use the tfvars file which is located in the same location.
|
||||
|
||||
- A wait time has been implimented as we were not able to set a time till the cluster comes up. we clould have also checked the file but in K3S as soon as the process in executed the file is created so we cant base it thus we have added a timer of 120s but depends on cluster load and netspeed
|
||||
|
||||
- Note of `num_worker_nodes` is set to `0` the master will become a single node K3S cluster. if its more than 0 then a taint will be applied into master so no pods can go there
|
||||
|
||||
- In the script we have allowed all inbound traffic which should not be done a sample code to allow specific ports are given there. Its always good idea to open only needed ports
|
||||
|
483
terraform/K3S/insecure.main.tf
Normal file
483
terraform/K3S/insecure.main.tf
Normal file
@@ -0,0 +1,483 @@
|
||||
#######################################################
|
||||
#### THIS IS NOT HOW YOU DEPLOY K3S IN PROD
|
||||
#### THIS DOES NOT USE CERTS FOR INTERNAL COMMUNICATION
|
||||
#### USE THE SECURE SCRIPT FOR ACTUAL DEPLOYMENT
|
||||
####
|
||||
#### By Sagnik Bhattacharya, 2024
|
||||
####
|
||||
#######################################################
|
||||
|
||||
# instaling dependency
|
||||
terraform {
|
||||
required_version = ">= 0.14.0"
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = ">= 2.0.0"
|
||||
}
|
||||
tls = {
|
||||
source = "hashicorp/tls"
|
||||
version = ">= 3.1.0"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "~> 2.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "openstack" {
|
||||
auth_url = var.auth_url
|
||||
region = var.region
|
||||
tenant_name = var.tenant_name
|
||||
user_name = var.user_name
|
||||
password = var.password
|
||||
domain_name = var.domain_name
|
||||
insecure = true # DANGER
|
||||
}
|
||||
|
||||
variable "auth_url" {
|
||||
description = "OpenStack authentication URL"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
description = "OpenStack region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tenant_name" {
|
||||
description = "OpenStack tenant name"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "user_name" {
|
||||
description = "OpenStack username"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "password" {
|
||||
description = "OpenStack password"
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "domain_name" {
|
||||
description = "OpenStack domain name"
|
||||
type = string
|
||||
}
|
||||
|
||||
# Broken for some reason dont know why
|
||||
# variable "ssh_public_key" {
|
||||
# description = "Path to the SSH public key"
|
||||
# type = string
|
||||
# }
|
||||
|
||||
variable "num_worker_nodes" {
|
||||
description = "Number of worker nodes to create"
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "master_flavor" {
|
||||
description = "Flavor for the master node"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "worker_flavor" {
|
||||
description = "Flavor for the worker nodes"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
description = "OS image to use for instances"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "volume_size" {
|
||||
description = "Size of the volumes to create for nodes"
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "dns_servers" {
|
||||
description = "List of DNS servers for the instances"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "floating_ip_pool" {
|
||||
description = "Name of the floating IP pool for the instances"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "delay_seconds" {
|
||||
description = "The delay in seconds before creating the worker nodes"
|
||||
default = 120
|
||||
## This wait time has been implimented as we were not able tp set a time till the cluster comes up
|
||||
## we clould have also checked the file but in K3S as soon as the process in executed the file is created so we cant base it
|
||||
## thus we habe added a timer of 120s
|
||||
## depends on cluster load and netspeed
|
||||
}
|
||||
|
||||
# Delay resource for master
|
||||
resource "null_resource" "delay_master" {
|
||||
provisioner "local-exec" {
|
||||
command = "sleep ${var.delay_seconds}"
|
||||
}
|
||||
triggers = {
|
||||
instance_id_master = openstack_compute_instance_v2.k3s_master.id
|
||||
}
|
||||
}
|
||||
|
||||
# Delay resource for workers
|
||||
resource "null_resource" "delay_workers" {
|
||||
provisioner "local-exec" {
|
||||
command = "sleep ${var.delay_seconds}"
|
||||
}
|
||||
triggers = {
|
||||
instance_id_workers = join(",", openstack_compute_instance_v2.k3s_workers.*.id)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Define the network
|
||||
resource "openstack_networking_network_v2" "network" {
|
||||
name = "k3s-network"
|
||||
admin_state_up = "true"
|
||||
}
|
||||
|
||||
# Define the subnet
|
||||
resource "openstack_networking_subnet_v2" "subnet" {
|
||||
name = "k3s-subnet"
|
||||
network_id = openstack_networking_network_v2.network.id
|
||||
cidr = "192.168.1.0/24"
|
||||
ip_version = 4
|
||||
dns_nameservers = var.dns_servers
|
||||
}
|
||||
|
||||
# Define the router
|
||||
|
||||
data "openstack_networking_network_v2" "floating_ip" {
|
||||
name = var.floating_ip_pool
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_v2" "router" {
|
||||
name = "k3s-router"
|
||||
admin_state_up = "true"
|
||||
external_network_id = data.openstack_networking_network_v2.floating_ip.id
|
||||
}
|
||||
|
||||
# Connect the router to the subnet
|
||||
resource "openstack_networking_router_interface_v2" "router_interface" {
|
||||
router_id = openstack_networking_router_v2.router.id
|
||||
subnet_id = openstack_networking_subnet_v2.subnet.id
|
||||
}
|
||||
|
||||
# Adding FIP to master ## DEPRICATED
|
||||
resource "openstack_networking_floatingip_v2" "fip" {
|
||||
pool = var.floating_ip_pool
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "fip_assoc" {
|
||||
floating_ip = openstack_networking_floatingip_v2.fip.address
|
||||
instance_id = openstack_compute_instance_v2.k3s_master.id
|
||||
}
|
||||
|
||||
|
||||
# Creating SSH keys
|
||||
resource "tls_private_key" "ssh" {
|
||||
algorithm = "ECDSA"
|
||||
ecdsa_curve = "P256"
|
||||
}
|
||||
|
||||
# Saving key in local
|
||||
resource "local_file" "private_key" {
|
||||
content = tls_private_key.ssh.private_key_pem
|
||||
filename = "${path.module}/id_rsa"
|
||||
}
|
||||
|
||||
# Define the keypair for SSH
|
||||
resource "openstack_compute_keypair_v2" "default" {
|
||||
name = "k3s-key"
|
||||
# public_key = file(var.ssh_public_key)
|
||||
public_key = tls_private_key.ssh.public_key_openssh
|
||||
}
|
||||
|
||||
# Create a new security group
|
||||
resource "openstack_networking_secgroup_v2" "secgroup" {
|
||||
name = "k3s-secgroup"
|
||||
description = "Security group for k3s"
|
||||
}
|
||||
|
||||
# # Allow SSH traffic
|
||||
# resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_ssh" {
|
||||
# direction = "ingress"
|
||||
# ethertype = "IPv4"
|
||||
# protocol = "tcp"
|
||||
# port_range_min = 22
|
||||
# port_range_max = 22
|
||||
# remote_ip_prefix = "0.0.0.0/0"
|
||||
# security_group_id = openstack_networking_secgroup_v2.secgroup.id
|
||||
# }
|
||||
|
||||
########### DONT DO THIS ITS VERY BAD ########################
|
||||
# Allow all inbound traffic
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_all_inbound" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.secgroup.id
|
||||
}
|
||||
#############################################################
|
||||
|
||||
|
||||
# Allow all outbound traffic
|
||||
resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_all_outbound" {
|
||||
direction = "egress"
|
||||
ethertype = "IPv4"
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.secgroup.id
|
||||
}
|
||||
|
||||
# Define the master node
|
||||
resource "openstack_compute_instance_v2" "k3s_master" {
|
||||
name = "kube-master"
|
||||
image_name = var.os_image
|
||||
flavor_name = var.master_flavor
|
||||
key_pair = openstack_compute_keypair_v2.default.name
|
||||
security_groups = ["default",openstack_networking_secgroup_v2.secgroup.name]
|
||||
network {
|
||||
uuid = openstack_networking_network_v2.network.id
|
||||
}
|
||||
|
||||
# This thing does all the magic, a glorified bash script XD
|
||||
user_data = <<-EOT
|
||||
#!/bin/bash
|
||||
apt-get update
|
||||
apt-get install -y curl
|
||||
echo "Before snap"
|
||||
snap install helm --classic
|
||||
|
||||
# Install KubeCTL
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
kubectl version --client
|
||||
echo "before K3S"
|
||||
|
||||
# Install K3s with taint if there are worker nodes
|
||||
if [ ${var.num_worker_nodes} -gt 0 ]; then
|
||||
curl -sfL https://get.k3s.io | sh -s - --node-taint key=value:NoExecute --disable traefik --disable-agent --tls-san 127.0.0.1
|
||||
else
|
||||
# Install K3s without taint, allowing the master to schedule pods
|
||||
curl -sfL https://get.k3s.io | sh -s - --disable traefik --disable-agent --tls-san 127.0.0.1
|
||||
fi
|
||||
|
||||
# Wait and save the token into a file
|
||||
while [ ! -f /var/lib/rancher/k3s/server/node-token ]; do
|
||||
sleep 5
|
||||
done
|
||||
mkdir -p /var/lib/rancher/k3s/server/
|
||||
echo $(cat /var/lib/rancher/k3s/server/node-token) > /var/lib/rancher/k3s/server/token
|
||||
chmod 777 /var/lib/rancher/k3s/server/token
|
||||
ls -ltr /var/lib/rancher/k3s/server/token
|
||||
|
||||
# Mount the volume at /mnt
|
||||
mkdir /mnt/data
|
||||
mkfs.ext4 /dev/vdb
|
||||
echo '/dev/vdb /mnt/data ext4 defaults 0 0' >> /etc/fstab
|
||||
mount -a
|
||||
|
||||
# Adding kubeconfig
|
||||
chmod 644 /etc/rancher/k3s/k3s.yaml
|
||||
echo "export KUBECONFIG=/etc/rancher/k3s/k3s.yaml" >> /etc/profile
|
||||
|
||||
EOT
|
||||
|
||||
metadata = {
|
||||
instance_role = "master"
|
||||
}
|
||||
}
|
||||
|
||||
# Define the volume for the master node
|
||||
resource "openstack_blockstorage_volume_v3" "k3s_master_volume" {
|
||||
name = "k3s-master-volume"
|
||||
size = var.volume_size
|
||||
}
|
||||
|
||||
# Attach the volume to the master node
|
||||
resource "openstack_compute_volume_attach_v2" "k3s_master_volume_attach" {
|
||||
instance_id = openstack_compute_instance_v2.k3s_master.id
|
||||
volume_id = openstack_blockstorage_volume_v3.k3s_master_volume.id
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "k3s_workers" {
|
||||
count = var.num_worker_nodes
|
||||
name = "kubeworker-${count.index}"
|
||||
image_name = var.os_image
|
||||
flavor_name = var.worker_flavor
|
||||
key_pair = openstack_compute_keypair_v2.default.name
|
||||
security_groups = ["default", openstack_networking_secgroup_v2.secgroup.name]
|
||||
depends_on = [
|
||||
openstack_compute_instance_v2.k3s_master,
|
||||
null_resource.delay_master
|
||||
]
|
||||
|
||||
network {
|
||||
uuid = openstack_networking_network_v2.network.id
|
||||
}
|
||||
|
||||
# This script installs necessary software and prepares the mount point
|
||||
user_data = <<-EOT
|
||||
#!/bin/bash
|
||||
echo "hello"
|
||||
apt-get update
|
||||
apt-get install -y curl
|
||||
|
||||
# Create a mount point for the attached volume
|
||||
mkdir /mnt/data
|
||||
mkfs.ext4 /dev/vdb
|
||||
echo '/dev/vdb /mnt/data ext4 defaults 0 0' >> /etc/fstab
|
||||
mount -a
|
||||
|
||||
# Save the private key
|
||||
echo '${tls_private_key.ssh.private_key_pem}' > /home/ubuntu/.ssh/id_rsa
|
||||
chmod 600 /home/ubuntu/.ssh/id_rsa
|
||||
while [ -z "$TOKEN" ]; do
|
||||
TOKEN=$(ssh -o StrictHostKeyChecking=no -i /home/ubuntu/.ssh/id_rsa ubuntu@${openstack_compute_instance_v2.k3s_master.network.0.fixed_ip_v4} 'sudo cat /var/lib/rancher/k3s/server/token')
|
||||
sleep 5
|
||||
done
|
||||
curl -sfL https://get.k3s.io | K3S_URL=https://${openstack_compute_instance_v2.k3s_master.network.0.fixed_ip_v4}:6443 K3S_TOKEN=$TOKEN sh -
|
||||
EOT
|
||||
|
||||
# provisioner "remote-exec" {
|
||||
# inline = [
|
||||
# "TOKEN=$(ssh -o StrictHostKeyChecking=no -l ubuntu ${openstack_compute_instance_v2.k3s_master.network.0.fixed_ip_v4} 'cat /var/lib/rancher/k3s/server/token')",
|
||||
# "curl -sfL https://get.k3s.io | K3S_URL=http://${openstack_compute_instance_v2.k3s_master.network.0.fixed_ip_v4}:6443 K3S_TOKEN=$TOKEN sh -"
|
||||
# ]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
private_key = tls_private_key.ssh.private_key_pem
|
||||
host = self.access_ip_v4
|
||||
}
|
||||
|
||||
metadata = {
|
||||
instance_role = "worker"
|
||||
}
|
||||
}
|
||||
|
||||
# Define the volumes for the worker nodes
|
||||
resource "openstack_blockstorage_volume_v3" "k3s_worker_volumes" {
|
||||
count = var.num_worker_nodes
|
||||
name = "k3s-worker-volume-${count.index}"
|
||||
size = var.volume_size
|
||||
}
|
||||
|
||||
# Attach the volumes to the worker nodes
|
||||
resource "openstack_compute_volume_attach_v2" "k3s_worker_volume_attach" {
|
||||
count = var.num_worker_nodes
|
||||
instance_id = element(openstack_compute_instance_v2.k3s_workers.*.id, count.index)
|
||||
volume_id = element(openstack_blockstorage_volume_v3.k3s_worker_volumes.*.id, count.index)
|
||||
|
||||
# Ensure attachment only happens after instance and volume creation
|
||||
depends_on = [
|
||||
openstack_compute_instance_v2.k3s_workers,
|
||||
openstack_blockstorage_volume_v3.k3s_worker_volumes
|
||||
]
|
||||
}
|
||||
|
||||
## Works till here
|
||||
|
||||
# data "kubernetes_namespace" "existing" {
|
||||
# metadata {
|
||||
# name = "kube-system"
|
||||
# }
|
||||
# }
|
||||
|
||||
# resource "kubernetes_namespace" "default" {
|
||||
# count = data.kubernetes_namespace.existing.id != null ? 0 : 1
|
||||
# depends_on = [null_resource.delay_workers]
|
||||
# metadata {
|
||||
# name = "kube-system"
|
||||
# }
|
||||
# }
|
||||
|
||||
|
||||
# resource "kubernetes_deployment" "traefik" {
|
||||
# metadata {
|
||||
# name = "traefik"
|
||||
# namespace = "kube-system"
|
||||
# labels = {
|
||||
# app = "traefik"
|
||||
# }
|
||||
# }
|
||||
|
||||
# spec {
|
||||
# replicas = 1
|
||||
# selector {
|
||||
# match_labels = {
|
||||
# app = "traefik"
|
||||
# }
|
||||
# }
|
||||
|
||||
# template {
|
||||
# metadata {
|
||||
# labels = {
|
||||
# app = "traefik"
|
||||
# }
|
||||
# }
|
||||
|
||||
# spec {
|
||||
# container {
|
||||
# name = "traefik"
|
||||
# image = "traefik:v2.4"
|
||||
# args = ["--providers.kubernetescrd", "--entrypoints.web.Address=:80", "--entrypoints.websecure.Address=:443"]
|
||||
|
||||
# port {
|
||||
# name = "web"
|
||||
# container_port = 80
|
||||
# }
|
||||
|
||||
# port {
|
||||
# name = "websecure"
|
||||
# container_port = 443
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
|
||||
# resource "kubernetes_service" "traefik" {
|
||||
# metadata {
|
||||
# name = "traefik"
|
||||
# namespace = "kube-system"
|
||||
# labels = {
|
||||
# app = "traefik"
|
||||
# }
|
||||
# }
|
||||
|
||||
# spec {
|
||||
# selector = {
|
||||
# app = "traefik"
|
||||
# }
|
||||
|
||||
# type = "LoadBalancer"
|
||||
|
||||
# port {
|
||||
# name = "web"
|
||||
# port = 80
|
||||
# target_port = 80
|
||||
# }
|
||||
|
||||
# port {
|
||||
# name = "websecure"
|
||||
# port = 443
|
||||
# target_port = 443
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
|
||||
# output "traefik_lb_ip" {
|
||||
# value = flatten([for s in kubernetes_service.traefik.status : [for i in s.load_balancer.ingress : i.ip]])
|
||||
# }
|
111
terraform/K3S/runner/nginx.yaml
Normal file
111
terraform/K3S/runner/nginx.yaml
Normal file
@@ -0,0 +1,111 @@
|
||||
# Namespace
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
|
||||
---
|
||||
# Persistent Volume
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: nginx-pv
|
||||
spec:
|
||||
capacity:
|
||||
storage: 5Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: manual
|
||||
hostPath:
|
||||
path: "/mnt/data/nginx-pv" # Adjust this path according to the Kubernetes node filesystem
|
||||
|
||||
---
|
||||
# Persistent Volume Claim
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nginx-pvc
|
||||
namespace: nginx-deployment
|
||||
spec:
|
||||
storageClassName: manual
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 5Gi
|
||||
|
||||
---
|
||||
# Deployment for Nginx Pods
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
namespace: nginx-deployment
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- mountPath: /usr/share/nginx/html
|
||||
name: nginx-storage
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "1"
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "0.5"
|
||||
volumes:
|
||||
- name: nginx-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: nginx-pvc
|
||||
|
||||
---
|
||||
# Service of type LoadBalancer to expose Nginx externally
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-service
|
||||
namespace: nginx-deployment
|
||||
spec:
|
||||
selector:
|
||||
app: nginx
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
targetPort: 80
|
||||
type: LoadBalancer
|
||||
|
||||
---
|
||||
# Ingress to route traffic to Nginx service (no specific host)
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: nginx-ingress
|
||||
namespace: nginx-deployment
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
# host: something.hs-fulda.de # Replace with your domain
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: nginx-service
|
||||
port:
|
||||
number: 80
|
112
terraform/K3S/secure.main.tf
Normal file
112
terraform/K3S/secure.main.tf
Normal file
@@ -0,0 +1,112 @@
|
||||
#######################################################
|
||||
#### Incomplete
|
||||
####
|
||||
#### By Sagnik Bhattacharya, 2024
|
||||
####
|
||||
#######################################################
|
||||
|
||||
terraform {
|
||||
required_providers {
|
||||
openstack = {
|
||||
source = "terraform-provider-openstack/openstack"
|
||||
version = "~> 1.0"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "~> 2.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "openstack" {
|
||||
auth_url = var.auth_url
|
||||
region = var.region
|
||||
tenant_name = var.tenant_name
|
||||
user_name = var.user_name
|
||||
password = var.password
|
||||
domain_name = var.domain_name
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = var.kubernetes_host
|
||||
client_certificate = file(var.client_certificate)
|
||||
client_key = file(var.client_key)
|
||||
cluster_ca_certificate = file(var.cluster_ca_certificate)
|
||||
}
|
||||
|
||||
# Define variables without default values
|
||||
variable "auth_url" {
|
||||
description = "OpenStack authentication URL"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
description = "OpenStack region"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "tenant_name" {
|
||||
description = "OpenStack tenant name"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "user_name" {
|
||||
description = "OpenStack username"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "password" {
|
||||
description = "OpenStack password"
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "domain_name" {
|
||||
description = "OpenStack domain name"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ssh_public_key" {
|
||||
description = "Path to the SSH public key"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "kubernetes_host" {
|
||||
description = "Kubernetes API server URL"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "client_certificate" {
|
||||
description = "Path to the client certificate for Kubernetes"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "client_key" {
|
||||
description = "Path to the client key for Kubernetes"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_ca_certificate" {
|
||||
description = "Path to the cluster CA certificate for Kubernetes"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "num_worker_nodes" {
|
||||
description = "Number of worker nodes to create"
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "master_flavor" {
|
||||
description = "Flavor for the master node"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "worker_flavor" {
|
||||
description = "Flavor for the worker nodes"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "os_image" {
|
||||
description = "OS image to use for instances"
|
||||
type = string
|
||||
}
|
29
terraform/K3S/terraform.tfvars
Normal file
29
terraform/K3S/terraform.tfvars
Normal file
@@ -0,0 +1,29 @@
|
||||
## These are for connecting with Openstack and sharing the Keypair
|
||||
|
||||
auth_url = "https://10.32.4.29:5000/v3"
|
||||
region = "RegionOne"
|
||||
tenant_name = "CloudComp10" # Also known as project
|
||||
user_name = "CloudComp10"
|
||||
password = "demo"
|
||||
domain_name = "default"
|
||||
# ssh_public_key = "~/.ssh/id_ecdsa.pub"
|
||||
|
||||
# These are needed for the internal SSL certificate
|
||||
# Must use for Pord env but for simplicity removed from here
|
||||
|
||||
# client_certificate = "~/.ssh/client.crt"
|
||||
# client_key = "~/.ssh/client.key"
|
||||
# cluster_ca_certificate = "~/.ssh/ca.crt"
|
||||
|
||||
# Instance Configuration
|
||||
# num_worker_nodes is < 0 then master will be the worker otherwise master
|
||||
# is only for control
|
||||
|
||||
num_worker_nodes = 3
|
||||
master_flavor = "m1.small"
|
||||
worker_flavor = "m1.medium"
|
||||
os_image = "ubuntu-22.04-jammy-server-cloud-image-amd64"
|
||||
volume_size = 15
|
||||
dns_servers = ["10.33.16.100"]
|
||||
floating_ip_pool = "ext_net"
|
||||
delay_seconds = 120
|
@@ -12,13 +12,13 @@ variable "group_number" {
|
||||
|
||||
# Define OpenStack credentials, project config etc.
|
||||
locals {
|
||||
auth_url = "https://10.32.4.182:5000/v3"
|
||||
auth_url = "https://10.32.4.29:5000/v3"
|
||||
user_name = "CloudComp32"
|
||||
user_password = "demo"
|
||||
tenant_name = "CloudComp${var.group_number}"
|
||||
#network_name = "CloudComp${var.group_number}-net"
|
||||
router_name = "CloudComp${var.group_number}-router"
|
||||
image_name = "ubuntu-22.04-jammy-x86_64"
|
||||
image_name = "ubuntu-22.04-jammy-server-cloud-image-amd64"
|
||||
flavor_name = "m1.small"
|
||||
region_name = "RegionOne"
|
||||
floating_net = "ext_net"
|
||||
|
@@ -12,13 +12,13 @@ variable "group_number" {
|
||||
|
||||
# Define OpenStack credentials, project config etc.
|
||||
locals {
|
||||
auth_url = "https://10.32.4.182:5000/v3"
|
||||
auth_url = "https://10.32.4.29:5000/v3"
|
||||
user_name = "CloudComp${var.group_number}"
|
||||
user_password = "<password of your group here, private-cloud is only reachable via vpn>"
|
||||
tenant_name = "CloudComp${var.group_number}"
|
||||
#network_name = "CloudComp${var.group_number}-net"
|
||||
router_name = "CloudComp${var.group_number}-router"
|
||||
image_name = "ubuntu-22.04-jammy-x86_64"
|
||||
image_name = "ubuntu-22.04-jammy-server-cloud-image-amd64"
|
||||
flavor_name = "m1.small"
|
||||
region_name = "RegionOne"
|
||||
dns_nameservers = [ "10.33.16.100" ]
|
||||
|
Reference in New Issue
Block a user